diff --git a/docs/about-us/beta-and-experimental-features.md b/docs/about-us/beta-and-experimental-features.md index cb310d08f4b..311d7d9ed0b 100644 --- a/docs/about-us/beta-and-experimental-features.md +++ b/docs/about-us/beta-and-experimental-features.md @@ -49,18 +49,6 @@ Please note: no additional experimental features are allowed to be enabled in Cl | Name | Default | |------|--------| -| [shared_merge_tree_activate_coordinated_merges_tasks](/operations/settings/merge-tree-settings#shared_merge_tree_activate_coordinated_merges_tasks) | `0` | -| [shared_merge_tree_enable_coordinated_merges](/operations/settings/merge-tree-settings#shared_merge_tree_enable_coordinated_merges) | `0` | -| [shared_merge_tree_enable_keeper_parts_extra_data](/operations/settings/merge-tree-settings#shared_merge_tree_enable_keeper_parts_extra_data) | `0` | -| [shared_merge_tree_merge_coordinator_election_check_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_election_check_period_ms) | `30000` | -| [shared_merge_tree_merge_coordinator_factor](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_factor) | `1.1` | -| [shared_merge_tree_merge_coordinator_fetch_fresh_metadata_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_fetch_fresh_metadata_period_ms) | `10000` | -| [shared_merge_tree_merge_coordinator_max_merge_request_size](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_max_merge_request_size) | `20` | -| [shared_merge_tree_merge_coordinator_max_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_max_period_ms) | `10000` | -| [shared_merge_tree_merge_coordinator_merges_prepare_count](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_merges_prepare_count) | `100` | -| [shared_merge_tree_merge_coordinator_min_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_min_period_ms) | `1` | -| [shared_merge_tree_merge_worker_fast_timeout_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_worker_fast_timeout_ms) | `100` | -| [shared_merge_tree_merge_worker_regular_timeout_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_worker_regular_timeout_ms) | `10000` | | [geotoh3_argument_order](/operations/settings/settings#geotoh3_argument_order) | `lat_lon` | | [enable_lightweight_update](/operations/settings/settings#enable_lightweight_update) | `1` | | [allow_experimental_correlated_subqueries](/operations/settings/settings#allow_experimental_correlated_subqueries) | `1` | @@ -90,25 +78,24 @@ Please note: no additional experimental features are allowed to be enabled in Cl | [low_priority_query_wait_time_ms](/operations/settings/settings#low_priority_query_wait_time_ms) | `1000` | | [allow_statistics_optimize](/operations/settings/settings#allow_statistics_optimize) | `1` | | [allow_experimental_delta_kernel_rs](/operations/settings/settings#allow_experimental_delta_kernel_rs) | `1` | +| [shared_merge_tree_activate_coordinated_merges_tasks](/operations/settings/merge-tree-settings#shared_merge_tree_activate_coordinated_merges_tasks) | `0` | +| [shared_merge_tree_enable_coordinated_merges](/operations/settings/merge-tree-settings#shared_merge_tree_enable_coordinated_merges) | `0` | +| [shared_merge_tree_enable_keeper_parts_extra_data](/operations/settings/merge-tree-settings#shared_merge_tree_enable_keeper_parts_extra_data) | `0` | +| [shared_merge_tree_merge_coordinator_election_check_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_election_check_period_ms) | `30000` | +| [shared_merge_tree_merge_coordinator_factor](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_factor) | `1.1` | +| [shared_merge_tree_merge_coordinator_fetch_fresh_metadata_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_fetch_fresh_metadata_period_ms) | `10000` | +| [shared_merge_tree_merge_coordinator_max_merge_request_size](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_max_merge_request_size) | `20` | +| [shared_merge_tree_merge_coordinator_max_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_max_period_ms) | `10000` | +| [shared_merge_tree_merge_coordinator_merges_prepare_count](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_merges_prepare_count) | `100` | +| [shared_merge_tree_merge_coordinator_min_period_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_coordinator_min_period_ms) | `1` | +| [shared_merge_tree_merge_worker_fast_timeout_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_worker_fast_timeout_ms) | `100` | +| [shared_merge_tree_merge_worker_regular_timeout_ms](/operations/settings/merge-tree-settings#shared_merge_tree_merge_worker_regular_timeout_ms) | `10000` | ## Experimental settings {#experimental-settings} | Name | Default | |------|--------| -| [allow_experimental_replacing_merge_with_cleanup](/operations/settings/merge-tree-settings#allow_experimental_replacing_merge_with_cleanup) | `0` | -| [allow_experimental_reverse_key](/operations/settings/merge-tree-settings#allow_experimental_reverse_key) | `0` | -| [allow_remote_fs_zero_copy_replication](/operations/settings/merge-tree-settings#allow_remote_fs_zero_copy_replication) | `0` | -| [enable_replacing_merge_with_cleanup_for_min_age_to_force_merge](/operations/settings/merge-tree-settings#enable_replacing_merge_with_cleanup_for_min_age_to_force_merge) | `0` | -| [force_read_through_cache_for_merges](/operations/settings/merge-tree-settings#force_read_through_cache_for_merges) | `0` | -| [merge_selector_algorithm](/operations/settings/merge-tree-settings#merge_selector_algorithm) | `Simple` | -| [notify_newest_block_number](/operations/settings/merge-tree-settings#notify_newest_block_number) | `0` | -| [part_moves_between_shards_delay_seconds](/operations/settings/merge-tree-settings#part_moves_between_shards_delay_seconds) | `30` | -| [part_moves_between_shards_enable](/operations/settings/merge-tree-settings#part_moves_between_shards_enable) | `0` | -| [remote_fs_zero_copy_path_compatible_mode](/operations/settings/merge-tree-settings#remote_fs_zero_copy_path_compatible_mode) | `0` | -| [remote_fs_zero_copy_zookeeper_path](/operations/settings/merge-tree-settings#remote_fs_zero_copy_zookeeper_path) | `/clickhouse/zero_copy` | -| [remove_rolled_back_parts_immediately](/operations/settings/merge-tree-settings#remove_rolled_back_parts_immediately) | `1` | -| [shared_merge_tree_virtual_parts_discovery_batch](/operations/settings/merge-tree-settings#shared_merge_tree_virtual_parts_discovery_batch) | `1` | | [allow_experimental_kafka_offsets_storage_in_keeper](/operations/settings/settings#allow_experimental_kafka_offsets_storage_in_keeper) | `0` | | [allow_experimental_delta_lake_writes](/operations/settings/settings#allow_experimental_delta_lake_writes) | `0` | | [allow_experimental_materialized_postgresql_table](/operations/settings/settings#allow_experimental_materialized_postgresql_table) | `0` | @@ -165,4 +152,17 @@ Please note: no additional experimental features are allowed to be enabled in Cl | [promql_evaluation_time](/operations/settings/settings#promql_evaluation_time) | `auto` | | [allow_experimental_alias_table_engine](/operations/settings/settings#allow_experimental_alias_table_engine) | `0` | | [use_paimon_partition_pruning](/operations/settings/settings#use_paimon_partition_pruning) | `0` | +| [allow_experimental_replacing_merge_with_cleanup](/operations/settings/merge-tree-settings#allow_experimental_replacing_merge_with_cleanup) | `0` | +| [allow_experimental_reverse_key](/operations/settings/merge-tree-settings#allow_experimental_reverse_key) | `0` | +| [allow_remote_fs_zero_copy_replication](/operations/settings/merge-tree-settings#allow_remote_fs_zero_copy_replication) | `0` | +| [enable_replacing_merge_with_cleanup_for_min_age_to_force_merge](/operations/settings/merge-tree-settings#enable_replacing_merge_with_cleanup_for_min_age_to_force_merge) | `0` | +| [force_read_through_cache_for_merges](/operations/settings/merge-tree-settings#force_read_through_cache_for_merges) | `0` | +| [merge_selector_algorithm](/operations/settings/merge-tree-settings#merge_selector_algorithm) | `Simple` | +| [notify_newest_block_number](/operations/settings/merge-tree-settings#notify_newest_block_number) | `0` | +| [part_moves_between_shards_delay_seconds](/operations/settings/merge-tree-settings#part_moves_between_shards_delay_seconds) | `30` | +| [part_moves_between_shards_enable](/operations/settings/merge-tree-settings#part_moves_between_shards_enable) | `0` | +| [remote_fs_zero_copy_path_compatible_mode](/operations/settings/merge-tree-settings#remote_fs_zero_copy_path_compatible_mode) | `0` | +| [remote_fs_zero_copy_zookeeper_path](/operations/settings/merge-tree-settings#remote_fs_zero_copy_zookeeper_path) | `/clickhouse/zero_copy` | +| [remove_rolled_back_parts_immediately](/operations/settings/merge-tree-settings#remove_rolled_back_parts_immediately) | `1` | +| [shared_merge_tree_virtual_parts_discovery_batch](/operations/settings/merge-tree-settings#shared_merge_tree_virtual_parts_discovery_batch) | `1` | diff --git a/gt-lock.json b/gt-lock.json index 0addd1db0eb..d1f86fde02e 100644 --- a/gt-lock.json +++ b/gt-lock.json @@ -49,169 +49,208 @@ }, "a8710471f3f4af77c39b30a518f52a8deb6584069b4bfb8e64158f30627b4792": { "zh": { - "updatedAt": "2025-12-02T22:57:44.582Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "dea80e8633a01661415a8f657db721e366107e6372f47980f87efcebec57d84a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.583Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "80b4843e916f7b1e577f8e4b72a589432fbe5af00d6591dc500c49a69d00de25" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.583Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "cb5973f7e5a67bc20467cd92ce8a5b231d0fd34ebf05b10d80ea2cde4e65241c" } } }, "24bb0ca99917fdfda706556c75c640db16b12f966ea7bd58e1e9a8bdf4be5146": { "40c867ec4bd9ff53ca41f19ef2fb11bce1cd4d6f82211f50a350bacfd56350a1": { "jp": { - "updatedAt": "2025-12-02T22:57:44.598Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "c2b159034204cbe9194f226815c59581e141b4b1d9a0888360d2731c9c4135b6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.599Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "b4503160878c2254ab49f0007d23321ddb2c942c64c94db664bf33828e16dd60" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.592Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "0b1764d2530de07a912c6cd1024488b3c16f566eeb57af1c8868eeefc7807027" } } }, "2f81498e8b60c281ca710a3a25f611bf79424982fa85bce630e1d4182f252536": { "e5431d96bed4f0f93b507ffa84836d28b1d715ac31c199864a10370ec3b6f040": { "jp": { - "updatedAt": "2025-12-02T22:57:44.586Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "3f686d2c47341d014efae7f9d6f33a64509cfb1ba3ca22f872a5c370b6a3a358" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.567Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "375db763f436ad6553e921b026e7c0fa04329540ffc9e80499eb82e8c8ea0663" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.589Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "7507cfcbed4ebac25a56ae9e0e5a92a886049a5887ffc57eed724ccb32af8ec4" } } }, "37e1e1dcfe884bd88e97aa22d6ed7fc14323b326449d12f0a5644f15bd4ba087": { "bd75344d33495d82bb1ddbeeb77d5b1f53a6ecb5f788cb9eadaa606a67b5ba96": { "jp": { - "updatedAt": "2025-12-02T22:57:44.596Z" + "updatedAt": "2025-12-04T20:16:57.017Z", + "postProcessHash": "e5e905d07be28050aa66dd55c4896d833e812f116fe9120eac294829456f1242" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.597Z" + "updatedAt": "2025-12-04T20:16:57.017Z", + "postProcessHash": "14fd8bc4cbc9f61c5655f504fcb346fba3a806f8c548844bcee5bfcd9af0a0d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.600Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "9612a6ad02b86e775c0c30373bfa9bb267b079c3b296aa1f0e67771a11e06161" } } }, "49041ac358e6a0f1cdae73923da607add5f9d37fe3320250b5457924d09bcecc": { "d61c6739096f5de9a1f340500324926cc206fe878ab16df77def05d0ba746d3c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.600Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "149a40e0765161d19187178d9990fd73e946694b6e51ef1bceeb283543a9878d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.597Z" + "updatedAt": "2025-12-04T20:16:57.017Z", + "postProcessHash": "cd0d063215a756503aad7cc467ee44c6563646559dd30c885f70e8bec46f5f9b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.593Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "e699a728dbcdf02df448087277a49c8f5dc1a48548622121ebcb3031f826f9e9" } } }, "4abc97ebd23c7b3dacc0e18e77499272b51b908bd0c2a7a823d153d3c00f7613": { "7817d141aff4e4b1ceaca87c554c551bc1add23bd534611e2704fba56223fbfe": { "jp": { - "updatedAt": "2025-12-02T22:57:44.596Z" + "updatedAt": "2025-12-04T20:16:57.017Z", + "postProcessHash": "527ce7cbe58aa319ab05e565a6fe49cb82ea6cd4e17366423cbd60e886732393" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.598Z" + "updatedAt": "2025-12-04T20:16:57.019Z", + "postProcessHash": "983ce7e98f025ebbe40cef5c1c243223acbc01c78d010ead9d3f0320f8a35209" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.592Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "42c1f2e1ab6e097cce135c282ebaed5580447772d65fa6a2cf17df13d6c0b4da" } } }, "4e7333f7ff430819ccfae5b1f2b2ee97508f58db11c3e67c31430385b0618503": { "1a899ad20af5d3dc3c495e6ddc0c3ff5aacc9df838675e487a6910da0a531675": { "jp": { - "updatedAt": "2025-12-02T22:57:44.585Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "22b50687ded9f30d3b3dd7d449be0e485310b09ec26eace2b0a9c83e43e7ce00" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.590Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "a5492416744168520dbb0eb9fb77b1d38f8227898a20e1119fbbd9ec5c7ea8fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.589Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "463d50cacf516dab41081e1c1ff87fab243ae437c66f1eaf34b5d2666c9f8200" } } }, "6f13745927dfcaff0a5b759cdfc9dc47aba26e811ab26776ee363cd821f7d585": { "be6c5629590606c77cd44d60b8cb153a6e8b1ae6d9f710967b3ea692cfc8cb6d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.595Z" + "updatedAt": "2025-12-04T20:16:57.015Z", + "postProcessHash": "41ac3d39aa09bac8cde829d981d3feafcc427ae50b12a5ddab4f526608a02d23" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.593Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "049ba85e4c87c348a3e669ed4b331203275570c47be8b31481a7a35435f561a7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.594Z" + "updatedAt": "2025-12-04T20:16:57.015Z", + "postProcessHash": "0d87f7d349ae6e58126c41a418eefa79b58acb3ea6d84c86755369bd25696c27" } } }, "8ad40f5399ed36401edb12df869b1d441ff2d635581938c63d4f0a611fb977ae": { "16565c6a0928275a3a601a45f18823227dc886a00aad5531244bec633d3e8af4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.613Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "8bcf3ff641396e2d8c5da52dc58e751584d107f6d1d805c90988dec22c114a38" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.613Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "e625c4b6b53d8b1fa5873b15f2f5c571c9d49edf455c2dd316216095bd77654d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.591Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "f170d3dd3162329051742b675cbd0ffe6828303ed835195e7fe76a8adcc5f156" } } }, "a3ea3f0c344313a1d3ad7969f1c82ef13af419e6eec98da91153c8735fd46730": { "df3510130e5bdcdacd162718bb228e62987c548fea96f8a9e94123cc6b9a78d5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.585Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "2c670901840457d43631271d798d948ad1808ea3e3a0768832448fa5291a14fb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.586Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "34866960754235413e8855a940c78ea424a1dcba2dc6cbd96e6a8e4c3a88aef5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.585Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "ad7050ee90633a3a2cea8936df2c98ec9f52bde0487b44993cd64636741aa3dd" } } }, "b4b7e3ea48cb57c88168d17bf4d4f7d74e58a613803386d3229332939508c542": { "67faf8569421939ba33d4c9fdc3b64f28fcc3bc298cc8c8b43a29bf3499a6898": { "jp": { - "updatedAt": "2025-12-02T22:57:44.614Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "9d1fff2335500fea94d3724975766ef29f9d2969dcaa6b1b263acae18dd2a6a4" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.591Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "c4560376ecf7b29d362fc413002dcaa4fe08ac89e477221c6ce3d767912d8a1d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.613Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "f7be08bff036a4fcae66b8f5cb941e26a0dc9df82fec5705e5f308daf214123a" } } }, "bed5256b181dbcf92c02187749ebbf45c60b6bbfdee1789c1848984b6be1d78d": { "614647c380ff18e7b1672f19190809fcf15ba05429ff7f93a33f6c77255ba9ba": { "jp": { - "updatedAt": "2025-12-02T22:57:44.595Z" + "updatedAt": "2025-12-04T20:16:57.015Z", + "postProcessHash": "1ab88928e6d5bd19ba5c78dbc583c3a0f5dd74ef8024b486b6fe7a1b8961794b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.599Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "0bf7b55d68e8c32760083a4f0974e76e1788681abe8d2621dcc0328bf6e01fba" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.594Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "9fa6e7df16ec5380e9bc1597b072fdde81dc77cbbbf6923dd05c8c922e4b41e1" } } }, "c2811557e4f56ffd6e37b0f9f6558971e9d45005c22c3c19ebaef586f1591687": { "b9aea39ae1b4e63fef7a92d27750dfc746ac0ac174e77a895050ed0d24ff1ea7": { "jp": { - "updatedAt": "2025-12-02T22:57:44.568Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "e1b1ce689496d060b5172a3fbdb59cadf7145e23b1e6514e0010d0971196cae4" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.587Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "edba1c32a4523dbb9c9973228fcc228187dd6bc5987d3e75be5cf6b0c075b62d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.584Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "a16d69788eb2d608156b070abd071392f4921ce1d8a8358ed8bf4691082f3cf2" } } }, @@ -240,403 +279,496 @@ }, "a4c073207b34a9e6e51079c57f0e06190c406d676367e982df527e7379cf105d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.579Z" + "updatedAt": "2025-12-04T20:16:57.005Z", + "postProcessHash": "2b5837009e3da50041488124f7177f54786a6875323a348ea17e8786c3156ca5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.581Z" + "updatedAt": "2025-12-04T20:16:57.005Z", + "postProcessHash": "99e0f428ecd7fb29b7cf7a2eb9db047a31f6c71041dbbb69678e602e4e19b995" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.580Z" + "updatedAt": "2025-12-04T20:16:57.005Z", + "postProcessHash": "d81afbdb3706b5f0a3db2cad06e018bfbd6cba8679de0b94d1130cd60b05bc8b" } } }, "d59e7e7594ae44f151cb9c65dc0cf67dc9998af7e3a974cffc3d0f0dabce2e18": { "7f90a5a780c1bb26935f70fb9cdd36714ca975e36d84b530b0b75f565410ba0a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.586Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "1da247e74d18cd435ac6aacdc4fdd1bf3360964523a624d93f3ee3243e283723" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.588Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "2677a15eafb3267407541742b099d35f807853ba50698ab34c19c2ec970e44c3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.589Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "c563990ef0a3d1b09005841332cb571cd67f96f856e0dc395925105b3ab7fe6b" } } }, "d89bc73ed23da882f0c45593180a3989cb6844bd38d6496ab6cb5ab328d51083": { "42fe50c1e729beb1bfa14d29e80c4f579a068ebbfa39aa1ffe25b2bb963a815a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.615Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "12f93ea3d3825950e2a35a701ba23d5b5980e8037ee63d9b1ff2aa3d53099049" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.615Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "b60b4485e60c8f6adc554d776d4af299bd72a7a8d51d6a27533f2e91f527ed1d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.614Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "0e9d71f429e1c54a12237acf88146995423cfc66ebb3f75a38f5d87d6affccd0" } } }, "e8ae18c3678baf91b2255e5eab22effc78193c605230851316718cfb95063b2c": { "b8eaf5b30dc66a5bf4e27198f07863a95cd60a2e8b15d9fe7e86cc6f6eb603a7": { "jp": { - "updatedAt": "2025-12-02T22:57:44.615Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "ba385665bc7c8e1e7c7638d1b5b2387ec5b153e6e0b8ef275ce9b212301ef043" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.614Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "9fc70f3ff9c8ad4b4369433925b69776b42e153c032445df547301e9216926be" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.615Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "81b02e67e07018e5fd60381abbb70402f7fc8dbabd8c16cdc0ecdf987078434c" } } }, "e92405c74b1c19a280775296a5640f2c7646bfabd9d6af48d6359d9a4f09c9d8": { "c9015dfa533bb72f0fe4f1f5a455b0a5497c12b645e908ee88d9686adff07027": { "jp": { - "updatedAt": "2025-12-02T22:57:44.595Z" + "updatedAt": "2025-12-04T20:16:57.016Z", + "postProcessHash": "3e7c51f6cf75f0c9024617ce473a061249536a1e27d3108362f9e644f167a533" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.594Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "65c70bfa5c1066d61400754e3ecc5dd1c4e562f91e8c9218778f1727353a4b95" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.600Z" + "updatedAt": "2025-12-04T20:16:57.020Z", + "postProcessHash": "5a3f0ee08f15587cd66d919be56646cc7b54ff0d7cf1c9649472a45344eac53a" } } }, "ea04f6329e37f5487414c9b64a5e1602d705f1fc914807a5e16d95932f4ded16": { "c2794c8cfb2c5d8f3ad408c1a6ee6d92accd0948ff2682cca78897d7cef83daf": { "jp": { - "updatedAt": "2025-12-02T22:57:44.587Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "278a28af1f274ab6625dd842747e00021283b5ae861342f04c48c8ba169da45c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.588Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "2570c882c0329da070f76c503ff135daf236f53628c8e56655b165f3b6b0258f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.587Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "d558af907d6f102c55c61793e4f1b1dc0d4694633689c2e51feec34c3f305282" } } }, "ed828a4311942b25614d0fd962b572a6dc329c0d92a3891dce42290c1d8324f1": { "78977a9c19b7aa2ba08361a0d6ca3390d032f6997a67d280a40d8974f768bb52": { "jp": { - "updatedAt": "2025-12-02T22:57:44.590Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "3550ec99549bdfc84c6cc828ed70f0ad383d91c7600c157c2506641ba0aa0edb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.584Z" + "updatedAt": "2025-12-04T20:16:57.006Z", + "postProcessHash": "5b0c3157e8a4ea3131973c3209e2abe9c6d1d254187df96eaa61a9f45038c69c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.591Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "2e3abb38ddf37601a006893161ff5c32d0ad5cf5c0624701b09eb32c43a1fd24" } } }, "ef555d903b99c706a7fbc048a6888f3d3743693968bc76912f338d53af846b0c": { "c84825f7cf888bad7b7b5ec57d4a3941f8dc40c7526398600864fd18a77516ef": { "zh": { - "updatedAt": "2025-12-02T22:57:44.596Z" + "updatedAt": "2025-12-04T20:16:57.016Z", + "postProcessHash": "5e8fbc5025e237e43a50093550836e281b05664d90bfc7aca7c95ad3f88f75cd" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.597Z" + "updatedAt": "2025-12-04T20:16:57.018Z", + "postProcessHash": "9de8b7fc3937676407d7db0f252d6cd511a8673a3934d3470e8181db3e3271c9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.595Z" + "updatedAt": "2025-12-04T20:16:57.015Z", + "postProcessHash": "c1a2637d3b784bb1961182c0efed5a3e6c20c514f1fcee3d69b6bb993f387238" } } }, "fb2a4bdb7f2883fa7ac9878a6d4e978def652c408e4ef95784547eef9e313dbb": { "20e4763f0f7057430907de10bf00a918aa2e762becf34af686b125a9da4fe458": { "jp": { - "updatedAt": "2025-12-02T22:57:44.566Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "066c343903fc8f105322a8b7d2482f81cf1d3b4db8c650f4095546602c9998d7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.566Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "b3cb903f920b0efabfd2d2b8989b14439fc616dd236f7385108983931d029bf9" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.588Z" + "updatedAt": "2025-12-04T20:16:57.007Z", + "postProcessHash": "2447d25dc0f1696c6aaf1c6ae2d72db63375857928f0a721731f9c1facd8b816" } } }, "22760d417a52c66f14bee182587d458d0737a616dd14cb09748b4c725fc5809f": { "c6ca08107fa6822548ad3adc5de4b6fdf1d9860224c2cd62047f42bce72b1c12": { "jp": { - "updatedAt": "2025-12-02T22:57:12.861Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "f6deea422207d967b6461e97071734f39e34beaf512cd5b27858f885d8645481" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.861Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "f2dbb352b7da80b2bc1a94c2977e4417a93f0135b68a7b16bc9af6387847e01d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.681Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "be639ec494ec47de6646af8f5303f5a9cb200978f8f07fca02e5557a8a99d9df" } } }, "3e38c1623307fe1538f034436996c45b6ce42cebe6a35b146ba34a354e7b226a": { "7d9c49d88230712b6849bcab6640651373295cad7888223291eb46da868626e3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.698Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "adbf3470ec4df46143250be3ba2cd128011d0cc073401f47468887ef78f214bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.694Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "e5b3c3f5ea30fa4103f558f85732c4c901802a7b6b272cdd2800d3183e33172a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.694Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "42a5cca9d425a028e47cc62bff77488d51a89f7c1aa4217fc17fa7ff4a57d2c6" } } }, "434f73c99193146064105db19ce337122de0d78915918472d56f5393dc41a913": { "07acf0a2f2bf2cdedbe6696ce78f98b197df5722eecc5a214cf1d15173619bb2": { "jp": { - "updatedAt": "2025-12-02T22:57:12.869Z" + "updatedAt": "2025-12-04T20:16:57.142Z", + "postProcessHash": "65c42797c8bacabc72143be09683b3ee3d27393d6e7c506507ed5938e1babfff" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.681Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "aec977aa13a5a2eb5c23d9e8b2ce1ed9a48327e4b15fe80fb3f7bd7c1a4a374e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.682Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "7a6423ef2bb0984dca8cb73fc2323e4e2714e2a451d85242835158f40e519e66" } } }, "4fa6a5d68016ad855e418c2e88b5a37793256913a0caceaf33014edf61107509": { "1ed9748c6ebe33e1898f694a866a318e321540cc9186ac29b7621da0715118c5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.699Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "4b28de374793ac2ccec45b9f32a3d9415caa8b274547ca0c9cb4a128aaaabec8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.701Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "9fe0d9723b9570ba1b159dd3ca7bb497d138ecb29e10eb2a09663e445b759bdf" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.692Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "fa14cb3b1b996a1b7c8ac10227955be037bb2067631054e18dd2880e6eb9d045" } } }, "5dcaaaf5a4d53dc33da6680731d152b4a88a8d4e9c6058a18e521c7629865fb2": { "11c49d7827257644d730176fb691cb3d9705b0b2caafb1ee0ef7b70e70446275": { "jp": { - "updatedAt": "2025-12-02T22:57:44.676Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "98de1fa0f824db18057222b5c3af7114a783cac728dddd4f73421c62d47d6c93" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.677Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "5e754240f300dc95396bed0fcf36a5e03b697e79a19e592bbad674b373a2e49e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.678Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "7923bc53b3575bb76eb9cae1385c1f78de1e8366a87855a32ba5dc36877f0009" } } }, "6687b017941230cd05da5ec3d3b0b3a66c8da07927fdb43f62a2732581460749": { "df9979ccd3ace6a4ab1b704d9d4b233c9092cf3e747331f0d940179f918f015b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.619Z" + "updatedAt": "2025-12-04T20:16:57.013Z", + "postProcessHash": "b918e5a07461ebb5820764580d62ebf62353a05901ae7a27cdff1fc07d53ff1a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.617Z" + "updatedAt": "2025-12-04T20:16:57.012Z", + "postProcessHash": "9490f1a5d7e664e812a2cde9c40859a12338b1f8853ae3525c733c62f9b82fda" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.619Z" + "updatedAt": "2025-12-04T20:16:57.013Z", + "postProcessHash": "c29bc86f48335dfbc7c483094c8b80b116c4b3c974df4f07a1b8966667f6264b" } } }, "69aa1e22d1867f2dd9082998e597234169f92ed3ba4c3d6af26b34ffa82e4a48": { "aea97333102d80bfe523bef5b3932706938c1ab2307337cf20451a0633f0d7a0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.690Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "3ca330ec7d12194cd8d43155f7aeb26c13a9b53e3f683fd8a6732f0be0319a76" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.681Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "0756612fabd749f0c52b94a55937f26d5083fc138d02b2c62e504e6281b3aa1e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.676Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "0f12b8f141c59d2672d963aa025bede85dba80176b0e9610ebc70db0068ed783" } } }, "78ae413a62554c8c5ae5ac8301b68726066573d500bb6c8caabdecefd781bb3f": { "754657766dba43bf89b81e0a5c15e318411e3d1782280b5ae5d185edc97b8e9b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.860Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "53a0569a9815ae65ff106f265d782eb6543645c88bf9cabe68b832336d36bc0b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.860Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "db12a99a5062b8df14e0c4633c3c17ddf97099362f1147ea64b710385502f58c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.680Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "5a0e46753a5fdb69ba0782fe8b2525c81cd7db7c2cd654a9a77a8d3788bc0959" } } }, "7c0a1f3dadfd423b5f66993109c1e8bde0a0b4ff555067d9e6cd275bdaa7a391": { "ef65d65a01188f23ada0aa6a4be2fd11257542de621c6ad17c666a4b0b2aabf4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.704Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "09086c56127640ebe12e8aa8ad1e2d645bd2e150a27a746af186a02a46b010a6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.702Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "5bfd8e6d0803b938007d4729dc649988358f04c7a8442517bdbe1ffafe5005ff" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.703Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "f3ca08cf06d7795a53250b53a8b2c35e4d5b347cc42ade0c3f97a79e56a158b3" } } }, "8d9acd4ed372d08f28519dfb01b6900545df9f42502ac21f0ef6bd86b724c724": { "3ca361084040c6efbaef261b3b4c88e38d022539f3e58645a4023be45b9ed7f2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.701Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "6953961a2646a8ba9086b4de8c85bf1675ce1e9fbaa756900ca53568a50b942a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.693Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "0d3169e3982c50a1af0140de3dbe66b0a27f9c549609525ea3f3eeca871789db" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.687Z" + "updatedAt": "2025-12-04T20:16:57.088Z", + "postProcessHash": "1ee4ad34a8c447f681641d606c9f7f7cc555af7de87f35a1570190b32797a165" } } }, "902fba8b39d6b163ee66698d8bd12433740962a53ec93d756ebdc9d11cc5c531": { "dc72366dcf698c0d7f7b5eed229fd9a7dbb9776362cc9399cf927769376a9098": { "jp": { - "updatedAt": "2025-12-02T22:57:44.675Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "1f1ec41d23e63bd76de22c2cf8888b283f10a1b8b787d63b0b9d0495709e9d49" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.677Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "69c4f5372057ca46e67c0ffbd84fc38f696c6441ef42b3690952dd20899f8b3c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.676Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "ce58780af5fcd724a09d992cdad2a8b119596ea6f092bcadd56d6d3210a87474" } } }, "9414d7ed4d45271d4677e8083e81b7732a750c4dcd8dc182b14ce11749a9ec63": { "55348a4fc23db936f15885093540b12ab2d7159c2a91617e4058fef961a3c4ea": { "jp": { - "updatedAt": "2025-12-02T22:57:44.618Z" + "updatedAt": "2025-12-04T20:16:57.012Z", + "postProcessHash": "00c3b1540fa6b88c22c04dfbc9e7d2d1f50bffe6a7e3e9f0726ed52aeae6a787" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.617Z" + "updatedAt": "2025-12-04T20:16:57.012Z", + "postProcessHash": "5aa56dccdcf822ec62df20d2363b87bf99f276dfbc642ef9bd797b3b7090d6b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.618Z" + "updatedAt": "2025-12-04T20:16:57.012Z", + "postProcessHash": "9def9b4b3c4afd8b4109b84d07bdc30cd4f5f72b12a2a3554d059f2000b94be4" } } }, "9ac02f1c2289520aeb58725683781053f5ba6bf828b2e8585540596060f1f416": { "ae1a2a308feb0c5c6d10c67047a4b5867fd643296e4e816743b7e2e297fa0f5b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.679Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "17bcf1b7b92caa5357895d1fe299a7195e4dcc288cf97e34a16d5c4be926abb3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.680Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "77c88ac8a3adf0e84d1e0891ff5f88da4f197d65483428fb9a5c792fae5ef0ec" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.862Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "e2d5bf177b2d670016e09b59f5c61d8b513761dbc971b8329c0f04a64ee3f5c0" } } }, "9c7cf003973e27e4edaecd818b57b1e653cdfc8e936c45c67e314eb7123327be": { "1bca9e04eb1cf2d68b948ebb6ff7b813d50c8faada3f1ee2a8c561e9d96d6882": { "jp": { - "updatedAt": "2025-12-02T22:57:44.619Z" + "updatedAt": "2025-12-04T20:16:57.018Z", + "postProcessHash": "7643936e64d9421b1100b3e03f96f1d86566f298b013156b2294fa0e8090765d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.620Z" + "updatedAt": "2025-12-04T20:16:57.019Z", + "postProcessHash": "9771d7374750e9391c577cc6ab4783afc3355df8bd5cac0e3e6d811fad3c77ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.598Z" + "updatedAt": "2025-12-04T20:16:57.018Z", + "postProcessHash": "88267ee69616a7f62783d0cdf785584ff6f8c4d6b4fa617cd7985731fdd30fbd" } } }, "9d1a4cd9443c6b88ad09c86205631318b2554cff25df4445681bef27a63f1923": { "6349c4d8161b7c14d291e4b3a1c44b280c0eb9f067739c2cbeccc19c6800a2de": { "ru": { - "updatedAt": "2025-12-02T22:57:44.703Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "300e689f73b8a314d09765196d8f22817166c2c0be5d9d474f7c8dcfec8efda6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.704Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "da1498753a9d75cb8be1bdcaa4431aee6a9e087dacf436c3075302444831acf4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.701Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "7650f2586dc8a669367419b9d6295857d800f3e06528e2d19d40dc6564a0028c" } } }, "abe1e09771cc949a765515b0f1ae2d0c4a6ab90fceae133c7ea3efdc49fb65a6": { "3a86b5256b89e144630a5cab1fb1ee8cee76bb30374fd9a861dacc440a7b8bd9": { "jp": { - "updatedAt": "2025-12-02T22:57:12.862Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "daf74820f0de47bc47018b5965c3c3e1b9b205b5df0b7d93dc3366562ad94f6f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.859Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "0ea3c19c670f88e2ca010fff3a85ad6cf2711580f42e99d7d6f71b2fc0c392a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.859Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "5fa2dc4f89cfd91fac4b7d62704897ceddc9e47ff517a3e9a861ab016edb9543" } } }, "ad7599fbe857ed33f8231ed240a179e73c2e77cfa5e658ffca7502e66d2eeb8d": { "fadc1396d5e8d2ef81e08c49dd8a08b01468ff70c1b1463a692904b2403b88dc": { "jp": { - "updatedAt": "2025-12-02T22:57:44.677Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "ea2263030c46ca417add124667ce70d069b5ab67b2f1061a41caade774a77ba0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.678Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "d92c93d69333bb3bc7555b943cf57dfcdefa5f958a6c86775462296fe9a1698e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.678Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "030ce23797d2e95f531ec7e07a29de63efc5fb6fbba23c40bfb49efd37ee36b6" } } }, "b299c52ac5ef4f3399408a89f700027a4da61547b988cd85ef190b1cd544d809": { "ab3b1379019677d4f056b27b40d79c7a1d368792ecee4e8e04d9224e7f40f825": { "jp": { - "updatedAt": "2025-12-02T22:57:44.697Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "124a3e2ef9f22284438c43dadd8d8f66148b610bd88b59533d43ce92b8c23711" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.691Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "444d74d94170cb1e152ad87a84dd84afbce8201e1e4c2e6389ce4aae13d84440" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.698Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "f9b7a65de0c7b9d0a4fad0b5b6cd6e3d20a42ae97318ac5c3ec1c3be6cfc8995" } } }, "b776eb4f7e91ceadeb1cd902e4a72be31912c8f40357421634f01d720427d7cf": { "a157eb7d7ffd46e8626bd3b8ed555fd32deed480e675bf80cec9536c2cc53b70": { "jp": { - "updatedAt": "2025-12-02T22:57:44.693Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "8f62360a1333252926b93344e084faf3a2d313d1f86c675edcf721a34d00b4d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.689Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "0d157bcf10f26887dfd385a982438b02933943ea782b88f143a3ea53b8bcf340" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.699Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "96dfbb21076c6bd7e79b4aeefcb9f48571db2fc947d54a63f61d2f146a877ad1" } } }, "d294fb78df318396bfabb26180e94ed0286f348799a54a338dbcac4df2d501a8": { "2c1ad0e8f79ff31317243d7b0ba63abc05a794bb4cf50ddf3ab6a05a73136433": { "jp": { - "updatedAt": "2025-12-02T22:57:44.679Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "e8e6ab8458fb1da965dc5a46290ef2e645b4a28d79b3f59095615f2a2bfb19fb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.678Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "f48414f092c0b0e2d48295cc68d8679eda689bf7d92013346cfc3160c2ad7b9a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.679Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "a78267e9d1a30ab7c698f1df3d04b0753a081c2835c79436caf02ff3829e5762" } } }, "fb88079d51f44c0605f104c400878c73b1676f5d7360de0915e1f533962516d7": { "83b74506a046cca4bef2d9a75f263d66bc8cbdf6902a726a083fb24ba240c90a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.703Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "2ac607a2533a70b8792e43f0a1967f61d5f78f79c31628ec48ea5134914d98c3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.695Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "46f61667c4ff4c09edf454cf74c1401a772b97fcfe093dafbbdaa8181583927e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.691Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "48581e1edb533b6f96df47a93d6cc56a60dda895f85b0bad2cf5783ce348cb56" } } }, "08c0c301774aaa88b81ec6aa095f55e7824eafa1cbace5b623dc7c79a65127d2": { "69fd950d01a73a4628cd2ff26fd88bc864432af7ec9c2a0b214e105e41696130": { "jp": { - "updatedAt": "2025-12-02T22:57:44.690Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "e6b95fe15b73769c8c942b025e34bc6c9b796b7e3023974439110c46303b35b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.700Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "c2cc203d1b4acc4f016c9abe4519f61634fafa96714c3f0e7b1ee5cee96564c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.692Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "283cdfce3f052ca1b4058952ad56aa7bec051d8a2ab543cf07880fd7987ec784" } } }, @@ -687,13 +819,16 @@ }, "10ec0d04defc231968ae844734eecc3c0222a9f4c3d83e43404e025a224ce4c4": { "zh": { - "updatedAt": "2025-12-01T17:59:44.580Z" + "updatedAt": "2025-12-04T20:16:57.148Z", + "postProcessHash": "b3fd8d1b7eee787b31092cdd119ca4f2a73beccc27b59ccbdcd3f460a86ab95d" }, "ru": { - "updatedAt": "2025-12-01T17:59:44.580Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "effd6680bcd12f09d304a38a5731f6edeae0ab9624ed1bf0081dc446f3b99426" }, "jp": { - "updatedAt": "2025-12-01T17:59:44.580Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "9a17aa83bd7f4ecb388a30277b06c42ca2f82f9deb94834bcf76f6155e9fc3af" } }, "2b15529df6d1f9ca7f3c4170f39e449eaee89387e571a3fed2827059a441d581": { @@ -711,13 +846,16 @@ "3a3e4cf73cd863c0103607437eb8b4f6836337cfd7e83bdd562015c4ed9cdd6d": { "086e3e89b5951923ddf12df84d937ba158991125876b5f6d842de358bbe8b3fe": { "jp": { - "updatedAt": "2025-12-02T22:57:12.874Z" + "updatedAt": "2025-12-04T20:16:57.106Z", + "postProcessHash": "fb971b3f7abda0479ca6b446d10fa45e563e53e4b533193cdf47ac7601de08ca" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.874Z" + "updatedAt": "2025-12-04T20:16:57.106Z", + "postProcessHash": "ea00f78ee37041e965faf918712a683b8ffcaa68a84b666531ed38ba7cd8cf1b" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.878Z" + "updatedAt": "2025-12-04T20:16:57.121Z", + "postProcessHash": "b2465a5cd96f04ec09cdfeccabc659965d1af02e9db24576f14e357ca80834c8" } } }, @@ -735,273 +873,336 @@ }, "4d6402165019f36eba8ac335e666c3d5bc688e1cc2afc71d462b4b0a95607cb0": { "zh": { - "updatedAt": "2025-12-02T22:57:44.683Z" + "updatedAt": "2025-12-04T20:16:57.086Z", + "postProcessHash": "a38469346493ab3007e1e162d5e194fb2d686a1d88334495f903e002130ebfef" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.685Z" + "updatedAt": "2025-12-04T20:16:57.087Z", + "postProcessHash": "4d620b86b7816aae52d912ce7829f03c76baa70534045919bf05211460b37d1f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.687Z" + "updatedAt": "2025-12-04T20:16:57.088Z", + "postProcessHash": "f53e1e5a026dbef96c38fafea45bc14c07fe2ad4859a3ad749188b2c3d227c31" } } }, "490be0352814516ee6591ee5f8e07875e2139020d864d540140e0fa494298d5d": { "d23d41d10643691da14255ad0f85c7b97475432325af1c17be68df9efc12be5a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.705Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "8067b52c02b63003be76e6b33fb7da40b43bc3d4d036161d2059640dd1f7c426" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.869Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "a7b6da3a1fae0ce2ab8dba21352ca1f6e76dcea50cc11b69175b377d04c0497a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.705Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "a30a1b3a4adccbce7509c49e15ce0ce238efa1f7c4e1bd77864f105717b4c8e3" } } }, "55b28fab1ba94c3606d033461bcc70376b43d613080d008d80ef6eeee311b377": { "256a3209f20639b3de6006d270d351fa95df57bd7f581ffda6773fd8eba690c7": { "jp": { - "updatedAt": "2025-12-02T22:57:12.882Z" + "updatedAt": "2025-12-04T20:16:57.128Z", + "postProcessHash": "864a98275b08744b06c98dd96014565189f79d571df205cbf75e2482e49f18f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.873Z" + "updatedAt": "2025-12-04T20:16:57.101Z", + "postProcessHash": "de4bbf322e2a336a64395c4b9adfd73dc2380b715500e20cbc8d1df8009779bc" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.875Z" + "updatedAt": "2025-12-04T20:16:57.114Z", + "postProcessHash": "2b10d06d558ee85575fe42ac6386781077fa938da76da985a80d47c58486843b" } } }, "617059ab9b90c50e356730de729f0ae69ee3763a1e279dd764ff91a7fb180dcc": { "d57355b7ce2374ff50888d99d345884771d8478a28a50565e264c7183444541e": { "jp": { - "updatedAt": "2025-12-02T22:57:12.884Z" + "updatedAt": "2025-12-04T20:16:57.130Z", + "postProcessHash": "a2e2c2b5c704ccd6ece06ee9d5de9f716d887648e5d242eb1864757aa7670740" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.878Z" + "updatedAt": "2025-12-04T20:16:57.122Z", + "postProcessHash": "17d04cb1084f646a47034ba5b26f1bc9ee6d5f4c7ea407edacbc763f0ec11189" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.876Z" + "updatedAt": "2025-12-04T20:16:57.118Z", + "postProcessHash": "e709f25f06cf9da27b50f7dae1c6ea0f330312eda3c0f55fa460bc3faa274d5a" } } }, "752a92de3795a78c42039024533716b1bebd226dc5c16f6d9e6c32db92868aa9": { "3a70208f4d63a66f6cafa72e823a27c94a0b217c643d65060e75846cf03db29d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.887Z" + "updatedAt": "2025-12-04T20:16:57.136Z", + "postProcessHash": "6e9e6bbb0e0d89d7a1bb04df5588a1c39524ed19ebdcc06119d60b39a0e5dbe8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.890Z" + "updatedAt": "2025-12-04T20:16:57.140Z", + "postProcessHash": "5c7a2e0de6f9562fd9496dcfcece54f6dba9da7b9076282d990354147d737f23" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.890Z" + "updatedAt": "2025-12-04T20:16:57.141Z", + "postProcessHash": "52e53d389359b8b1fcbd073375a4782c90d17150c0edaee3f985ddb1a12b8924" } } }, "7c8202b183dd3bd51127bf5cff1d877fc101a710d10076050d3769cec7237315": { "cce8610caf1b6ee18be42bc4b4573a409a2178a60d7e7fdf9aa312bb9a0e96af": { "jp": { - "updatedAt": "2025-12-02T22:57:12.878Z" + "updatedAt": "2025-12-04T20:16:57.122Z", + "postProcessHash": "f942e6305b149dd731e797e01bde5f1ab8e88df370fed65c3d0497890c5516b5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.698Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "a19886bf8c9f0c0646ad12adc41bb64d3c7253c6909609d07e0a306d0a96748a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.702Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "331e6c38ae2460c3e45d3a23c7cf005cb95fb6ef3424d039d294c25fbdf3f71a" } } }, "7d8c9d047aa047d949a0099bf7badab51bf4cbb1242283616136def6a2087241": { "ae00c1636361dff35e6ca1fc517dd76ec664cbc4f992d5bcfebb7e2a76f626c4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.696Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "1140797eccd2bdff578f889659f5a7faa9917b5a999010e3f3dbfafc0abad6e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.699Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "7c3488ad862fb019112eaf4a05daea9c1795baa8474d66894fbd6a79b0e4c8c4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.695Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "cdcef18737de614827b984003f3c66a5a41cae0524bf8474b294737f97151794" } } }, "828d49017ac2a72d1fb53055bb4787df9014bcdf6914a82ba88ded05b27ec9d4": { "9d68c2d46ac27369e5a5becf238948336518cad4fd978e7648cd41b1f743b1b1": { "jp": { - "updatedAt": "2025-12-02T22:57:12.894Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "9c5bbd0fe25c8f6740a5b35d9b7938247d54cc72d464c493e5ca7b7b989c5de0" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.896Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "28e5606bf2fee21b52036f1b9746ba5db3168e107f14b5de1ed5b86b81f9faa7" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.893Z" + "updatedAt": "2025-12-04T20:16:57.144Z", + "postProcessHash": "dd1c221b3a6bc8fdcab0733ea347562aaec1f2d22578317da5d0e391257b95e3" } } }, "9970d9a6d501f36cf179c0419231b9d795a4c633dddeb9b278e8ba7a601a3f30": { "5509618b18f9e3d905b42bcca3ca87b185e363a986c08a3c7adaa67ea9d4602e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.700Z" + "updatedAt": "2025-12-04T20:16:57.094Z", + "postProcessHash": "88b708b0f77fcec267f2c025dd1c928486bf0b760faf4c69634066d911a0e58d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.702Z" + "updatedAt": "2025-12-04T20:16:57.095Z", + "postProcessHash": "2e44e3e9746c2a9b5c84a9a6b1b9220fc6966e84565dfc41889d6ad9341cc42b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.689Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "eb912abd25d2f977b73da0d25660c208c65e77e7a2b652dc80d7a067ed5c83f2" } } }, "a1bad3f4a716dc84c050e5be3e8486b6c74375173ac25b4b6faa1e07928f68dc": { "2ea331fabd4829ebc7e1af163a669bd7da7ebae75dc79796126ab275fd4d3c95": { "jp": { - "updatedAt": "2025-12-02T22:57:44.693Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "15485de315d5836a55c258e03b15354c0613cba0e4b50e218228c71fa8926665" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.692Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "8956ae6ba4a61c654d671ce6ea354ac3327c88d6fd2f32e94a595ced7846e7b4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.704Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "fdb406fd6ef9bae12b9029650e02d374aa31031a8dc29af43b762daa733d58b5" } } }, "b5fa886fabf17ebc48a6ca47fc6a8992d00da4b99785793543e0d888695a2688": { "259e7cbd211ad2a2649e5a8f0da300650ca51664a447e45289d100bfcdfc34d1": { "jp": { - "updatedAt": "2025-12-02T22:57:12.872Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "392ce7fc8f69851e0b74593ee40d704af892aba27e5cfefc5571bd48a1bab488" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.871Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "dc453455d77c5f0e8af320ec0d4550479d5e38bc253e197679facc75c6115e26" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.886Z" + "updatedAt": "2025-12-04T20:16:57.134Z", + "postProcessHash": "54c2bfdc07afd285cf23a8d05dc8cd24ef11df06708c5904cec766fde88a0025" } } }, "c1a7d6a20f956b50b1038cee0a820dd57189fc686d14660b023d1fc67ab2e1e9": { "dbc44ae26a03c1b8c3405262a6dd56a831c655163c2cd640d1e27879c8e4aead": { "jp": { - "updatedAt": "2025-12-02T22:57:12.870Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "c3b9a0ba0fb2dcb23f5a30f1b78ebbc26af8e4345e52147c7098140629174cc4" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.704Z" + "updatedAt": "2025-12-04T20:16:57.096Z", + "postProcessHash": "550637b0d2ab249b59a184a09d12bacfc2c5c02fd356ca1ab78d168cc55bd2cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.870Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "e05ebdd4142296471fe42fc06addf839728917f42325780ede43b7a45d41d6fe" } } }, "c369c0aa928f8264daf73b2cb8b5d20b0f760cd84c596ca63fb6e80bf182b3ac": { "081e5ae543866b5886ecf7decd8d4a80af7f854626b8b8136631cf04a6c7a9f8": { "jp": { - "updatedAt": "2025-12-02T22:57:12.880Z" + "updatedAt": "2025-12-04T20:16:57.127Z", + "postProcessHash": "970e83a15a6ea3eeacbb8e156d00618857f8824b5ffa4ade4a97f5b2c71dc576" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.875Z" + "updatedAt": "2025-12-04T20:16:57.117Z", + "postProcessHash": "8dd0323d81e8cff6d565d213896df667a459a80ca058470ccb4b010826d9b69e" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.876Z" + "updatedAt": "2025-12-04T20:16:57.117Z", + "postProcessHash": "27da05977f37b3b0642b34cd8d30808105faa5a644fc01060797966fc7be5568" } } }, "c6c433287e0c8c560d8ccfdb9dab1b764948e7aad08d8083787ea5a2ba4ffa25": { "3fa66f5214cb83c0d151b0adefad829fdec772c62100ad8be67b2c2d29a51136": { "jp": { - "updatedAt": "2025-12-02T22:57:12.892Z" + "updatedAt": "2025-12-04T20:16:57.143Z", + "postProcessHash": "8b1398508f10cdeb86c00cb818471954141f01a758d55a57cb17fa3082d4701b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.883Z" + "updatedAt": "2025-12-04T20:16:57.129Z", + "postProcessHash": "5f8b242bba96cd66db0532e53a0a9d959583f49b014ead8057bbbd3e92c54503" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.889Z" + "updatedAt": "2025-12-04T20:16:57.139Z", + "postProcessHash": "c70c64d0ae2c6767c915bfcb7ed4ff4051c915e8030a95fc8923003ad30b873a" } } }, "cee9dba64ce2a735e188505d45af71b74c5cd69ece6c6e7512832d84898157a2": { "ba71fb39dc5dad8ed0cc9385af226ee8ccfe87b891afdfae44d4b68d6a6800ce": { "jp": { - "updatedAt": "2025-12-02T22:57:12.877Z" + "updatedAt": "2025-12-04T20:16:57.121Z", + "postProcessHash": "f4ad7f6c868b6615ad22f62bc10bc830b03740782c2b55cdf4fe2f7a2c976fad" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.879Z" + "updatedAt": "2025-12-04T20:16:57.126Z", + "postProcessHash": "e2bf1e77d33b8cdc25c733b697ad11ebb0128882f0dbc3e434f8c4e8543d9f48" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.872Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "e804064451ea2242eecfc50247c86f54a7294c78da07731a6cfc77cce90cfdf2" } } }, "f03ea3286759068addb766b5b98317ea84803343105fd081b75322828bf9d201": { "8049194481456bef5558bf7d7d6cc3b522680055cc050dd06c21001990efaa95": { "jp": { - "updatedAt": "2025-12-02T22:57:44.694Z" + "updatedAt": "2025-12-04T20:16:57.092Z", + "postProcessHash": "578eaff567044b576bfd8e4bab251b77439e84c2038ec37a9981859489b68663" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.689Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "7004c9e5a784d3881e88638381c33eac1f1e70b4a183c6efa6640fd1a8185e6c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.697Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "39936badc6a3c1a9d3a1c20b66495c2366523a180756d3890407f4c7c3d1aead" } } }, "f820ad66299aa0044ecdcc3298f5727903d52ea9ce19686054f70d9df707a8ec": { "1c6d8e151f574eb1c808a7932e470939d01ddf3adbd9a088012790d70765d510": { "jp": { - "updatedAt": "2025-12-02T22:57:44.695Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "e74f323e0326c245608db3f2977f269016ebc0ad867ad3a7fe42fb8081c1d74e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.696Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "639113873896772a7ce29e932d1c1706206f6f84d81c704584b8f6ce26658ce4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.690Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "08b2cccc240dff88ad8696e144fa64727998f5662fc9ed54b39af7050a7dc5c2" } } }, "fe8dc3e8a42089566aa7dbdc1b163232b94dab755cd8f716e5050a20b9da71be": { "8e5a24d4923c146d3ff29a36c8d08b801a6681568d413d11ee21ab25c5a588ff": { "jp": { - "updatedAt": "2025-12-02T22:57:44.695Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "c38419d127f58261f3e08988e08753b83587585af775b381c711ed39c7ec2e0b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.691Z" + "updatedAt": "2025-12-04T20:16:57.091Z", + "postProcessHash": "adcdedc2aceff68a729528a8dee3fe66024070cd52de7cdade2f21d1c0e5e64c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.696Z" + "updatedAt": "2025-12-04T20:16:57.093Z", + "postProcessHash": "9b922dbb18d534fe3d92fe2d786df964c99b1189e9002fe919f5dadf38737d02" } } }, "1468ae293b5d12d0ded8668dbb023988cbdb44ac496923a1ef6653864352d921": { "99c4f7270820d4fdcb92c4d24d5487f3eaa377c46e721e913d45645dba75a74f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.905Z" + "updatedAt": "2025-12-04T20:16:57.105Z", + "postProcessHash": "9fd2de53de1f48cd56068767fd1572f66212224c1bcdd71437981e589cda9b49" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.906Z" + "updatedAt": "2025-12-04T20:16:57.117Z", + "postProcessHash": "ecd43e08bc35775fbbd12e8ac1f372595ff680cd108a6b8d4b856e863d413004" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.906Z" + "updatedAt": "2025-12-04T20:16:57.111Z", + "postProcessHash": "56e0652c1236a636df1304de76ccd7d483b65ebd2d106a39a0a4db329b353f6c" } } }, "1c112808a954d78a709e3ae05703950bc5804f9e55e3e98efd93efb0f0f879e0": { "a0ef058ccb99a1b138a4a98ffca0037cb2b496f227c55108b8beef337ba82d66": { "jp": { - "updatedAt": "2025-12-02T22:57:12.889Z" + "updatedAt": "2025-12-04T20:16:57.139Z", + "postProcessHash": "c51dade10787b3550b39945c4f896b99e4c967418b320eb077d13d6fff71abd5" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.880Z" + "updatedAt": "2025-12-04T20:16:57.127Z", + "postProcessHash": "c7f7865cf2cf2b97d1528d8926991bcbd00f38929d3bb038cf583d204a14ff4f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.879Z" + "updatedAt": "2025-12-04T20:16:57.123Z", + "postProcessHash": "ae82e92ca42886e6eabe46821c07a5b219b17885d6847647373603c310bbc5ca" } } }, "2317505b4b1b1557458b6ec9caf09937e43cf133543d04e2637e9cd6e0693bc2": { "8b6d58a1ca1a770a40180a524a20350aef1a747a1a0f59ef6bd9eb53764a7d1b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.907Z" + "updatedAt": "2025-12-04T20:16:57.127Z", + "postProcessHash": "d8ab4ecb6fe54078f4763fd8850ec52cde8160317f417733f818d874a548596a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.904Z" + "updatedAt": "2025-12-04T20:16:57.104Z", + "postProcessHash": "8e983f50852b2821d89b3751447864cdc10162d51b470ba644d5fbb021696fb7" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.906Z" + "updatedAt": "2025-12-04T20:16:57.126Z", + "postProcessHash": "90b00cb44383bba4ee1315d3cac296b62540c822afde78e2dcb60be44fdd5c74" } } }, @@ -1019,702 +1220,864 @@ }, "8f390179712abdfde1d16a03f079c6ebbbd781d3f020e59b2ef3af3be4bb0205": { "ru": { - "updatedAt": "2025-12-02T22:57:53.407Z" + "updatedAt": "2025-12-04T20:16:57.149Z", + "postProcessHash": "2bbd5586bf08f4e6b473d782963fac871a1e0c2d0b6161010580251d99a9a8ac" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.408Z" + "updatedAt": "2025-12-04T20:16:57.149Z", + "postProcessHash": "c6fa718553de9e42f3e784658fd2f1e5c3b6a985b20706164fbdf97fbe529803" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.408Z" + "updatedAt": "2025-12-04T20:16:57.149Z", + "postProcessHash": "9bb9b50a947f9ee702259aa8b717616c65ca7cf7fa67ed2486d636ff116aa632" } } }, "3371d95238c92603c162eaed8138395ca44e47b22ad969c5099f7e599ec16c22": { "2a161bba41a266518443feea5a759cf299dbc3fdeb7b00fd74b546abae68dff0": { "jp": { - "updatedAt": "2025-12-02T22:57:12.891Z" + "updatedAt": "2025-12-04T20:16:57.141Z", + "postProcessHash": "1f5febc845026f669bdc79503175f4992a144a463c0d30b4884c3d5c738a6d40" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.892Z" + "updatedAt": "2025-12-04T20:16:57.143Z", + "postProcessHash": "60699833b3145a56fed180989c614e26463ede4b3ada0b655b1a3ee5d5333759" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.890Z" + "updatedAt": "2025-12-04T20:16:57.140Z", + "postProcessHash": "9c615fbf99635f39ce649ca26fd069f9a53ac8b781c8a5fc59a8f62ce58af6d9" } } }, "34148aef91a7ca42367acb2003b2045d6893d713fd20c6ef4a4a8fe6b505125c": { "0df15707cc19ce74ec40c00d884f8f77eb33786d03f5831e131804575fce02b5": { "jp": { - "updatedAt": "2025-12-02T22:57:12.898Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "8dcea106834ae5cf1be4b98a7e844b865a4492e275154f094062238b61149124" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.897Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "f14476558d7aca0830f51e95a7a3b85046650665241343955715c76ce1950c4f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.897Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "1daa36adc4b8391fa9e918440cab755f22f2e377e79070a0a0795f9f51bea6ba" } } }, "5c385581f9c65edaaae75a74b6646a142de547cd3f20a408953b75ba33586e2c": { "8dc4eb869f4a048ed04d5883545cce095cb2df351eba54b486a29c615fe29cb3": { "jp": { - "updatedAt": "2025-12-02T22:57:12.886Z" + "updatedAt": "2025-12-04T20:16:57.134Z", + "postProcessHash": "868bf288f73fd9c330c2a129e0a49ec0e123f0e87d78e00163b314ea885cfcd0" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.873Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "a70feb21a77120bff840967047fa293b23977599dfb58c8c950c53b6ed941394" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.885Z" + "updatedAt": "2025-12-04T20:16:57.134Z", + "postProcessHash": "400634f79b530331c42717f4122c416a77541de2ec36b6dad36b31594278fd1e" } } }, "650a560be33079fccc4800a89f7ceabf7333009b1cfb0105d0e77b22a9afd9c8": { "609636eeb62cf3a4bd5ce284becb34bda3b97c2382d2dfd89320e13d69bf22d7": { "jp": { - "updatedAt": "2025-12-02T22:57:12.899Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "11f4d2ca99c385d901022447ea87b600bf427da630599ef9fa49c58fde29ea7d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.395Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "f372f100bd225ca674d6ed59531e82c4e6d86bcefe83240f4ca1014ab6651f7c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.909Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "cba45047f1b75cb54924f441db6c4057b9c440dadfcaa13e37bf7e6099b90009" } } }, "66edaa5c8dc32a1f831593b8a49a8f90c9de66304dbe8e78969217a73f2200c0": { "3a20ac6682c2e8633f0f56d7c381698dc85b1777367c924c9a05d2c329c4fda0": { "jp": { - "updatedAt": "2025-12-02T22:57:12.888Z" + "updatedAt": "2025-12-04T20:16:57.148Z", + "postProcessHash": "51b6904d82e3c6c0d79453bc5131557272656270f4b3adcbf25f06431f4e7393" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.882Z" + "updatedAt": "2025-12-04T20:16:57.128Z", + "postProcessHash": "42b4ded2d79f28d4d60b1cf1abfd683ff7afd47089777aeea09279975a3ff67d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.886Z" + "updatedAt": "2025-12-04T20:16:57.134Z", + "postProcessHash": "eee7ebeabe5d1137c48637ce61c45b205c447e17786bfd0ad325c70f01a414f9" } } }, "67113cbc50d80beb99c25a836c1c97bf312030d10537561666f2d9afcf9f3145": { "bc5d1e200e64a767369cc0ffad68cd1dc62da9a6230b0c00c0c10c90dcbef298": { "jp": { - "updatedAt": "2025-12-02T22:57:12.883Z" + "updatedAt": "2025-12-04T20:16:57.130Z", + "postProcessHash": "f3551ef18bf29175e59380efe0c4b13dfaac38578d2b5462e1f3356ba5d373ca" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.885Z" + "updatedAt": "2025-12-04T20:16:57.133Z", + "postProcessHash": "11f56bfab3f8737fd34feb5c7ecf1ac75a3851e2558892ccf036bddf5caf60d4" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.882Z" + "updatedAt": "2025-12-04T20:16:57.129Z", + "postProcessHash": "ff5c17aa24858202338ab317fe23e766b0805af1a9187dfd9c5022fd143d41cb" } } }, "6986025ddfdb6e69c9d68bae98e09599b7bd5252a433fe1c14839522e57376a7": { "6a07a797478a7c19aa592d19f3fd5211e2bae00db7fd3cef33b175016a1b1b29": { "jp": { - "updatedAt": "2025-12-02T22:57:12.887Z" + "updatedAt": "2025-12-04T20:16:57.136Z", + "postProcessHash": "1016da6c8c867056e7d6033693e4e4fa89b90f0d4ffa1ff47ef25c442a9a23f6" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.889Z" + "updatedAt": "2025-12-04T20:16:57.139Z", + "postProcessHash": "d5ac21b59e7710e775098bf87cea59702b80bb383f0c43eadb507cdb37016a44" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.888Z" + "updatedAt": "2025-12-04T20:16:57.138Z", + "postProcessHash": "6c0ccf9d7c01e430b2b8b648bd66962bd5b33a4e91f034124a0534fea515b7f1" } } }, "8e1acaa9709e95b6354d4bb719b069fee08bc3794641756333aba5003eb9475d": { "e8f0f6277f744012426a53a6027257e33c4b16cb2ca45dda3d90d4b73b3d4c5b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.891Z" + "updatedAt": "2025-12-04T20:16:57.142Z", + "postProcessHash": "737751524ffe0c92aece74373fbcc59c2ce15b687b95a9a41b0fb434c8c49cf6" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.887Z" + "updatedAt": "2025-12-04T20:16:57.137Z", + "postProcessHash": "e5a096760ee4b7b14e4a8e4ed8771753e165493fbfd45971d03ec20331c88571" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.884Z" + "updatedAt": "2025-12-04T20:16:57.130Z", + "postProcessHash": "b16ea9375be145c8ca13f89ed03601e8dbfdd112d588e5b566432989f3dbc6e7" } } }, "a5aac8ce0e37bc2df7af5f69708607c2c9b46cbe068e3172847b3191394faffe": { "38d2828e9bd727652c3233af76ea089e954aba2db55328f8cf1f43ca609f19ff": { "jp": { - "updatedAt": "2025-12-02T22:57:12.904Z" + "updatedAt": "2025-12-04T20:16:57.101Z", + "postProcessHash": "b1a5911a91703322d9d3b5a72b5a833d7ee1a016b989b4229fa14ff34190bd6a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.905Z" + "updatedAt": "2025-12-04T20:16:57.109Z", + "postProcessHash": "9670319b7ddd379f085337a31ad61066cdc9669060a88e89f573d71eb30fc4a5" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.905Z" + "updatedAt": "2025-12-04T20:16:57.105Z", + "postProcessHash": "f7d78abc526a955a9cb24a12823ddab302b52b101c47f6ca76ac2c2175a3f106" } } }, "af5c9aba153f2323766f5c2833f6dfb1a669b295e319d579f4546ea448e8d7e7": { "0d9634f2d0d51799480d3e5d225d816eb09fdf75e544bf3b04b2fe1385fb9619": { "zh": { - "updatedAt": "2025-12-02T22:57:53.411Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "549861229c41e6730d3fe68f0ca940375b7924a113603ae96cb24fb1ee35219c" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.410Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "dca8fc7c0460e90fb3d623a878e1afc9d42a86a4729c272fa9444d7ab400b85e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.409Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "bb8b20123e0604ec76131e06e6087485fe385a69de6f845bf142f2fb2ddf3144" } } }, "b01e9e50dff0d52b1c86ddcce64d477f77a182599c27ebb6752763a0c4cf1884": { "4bf15471d437e48ecaf706869ad9127730c8b915f392e00ca4b38372ff596b01": { "jp": { - "updatedAt": "2025-12-02T22:57:12.895Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "bb540971c451f21cc686edcfe417cb7661f896b37c7459d1fbf6317c45bf7743" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.893Z" + "updatedAt": "2025-12-04T20:16:57.144Z", + "postProcessHash": "fb4f3b72b7be74c0f52a746cb58187909cdfaab70884701032186d2c635fccce" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.894Z" + "updatedAt": "2025-12-04T20:16:57.144Z", + "postProcessHash": "1ae46f504a1ded1d92eddda10817313a8067eae62c3d397374474628d6745048" } } }, "b721aaf83ea7701a82587311ffcd215fa0fddd0ac9d459193fd26188e0680183": { "906c00a6ef80e7715d21aae24374b2b2d044fcdc7b9d5c6c2c7341ecd0753821": { "jp": { - "updatedAt": "2025-12-02T22:57:12.872Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "f8fccdad4355400866118dde3418a4741e52620861b6c3d527f60d3ba9865414" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.877Z" + "updatedAt": "2025-12-04T20:16:57.121Z", + "postProcessHash": "f0e12311b51ca73e132406cdb80aace0fde45bfb42f8c3c1d3d7b4f193ecbb8c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.876Z" + "updatedAt": "2025-12-04T20:16:57.121Z", + "postProcessHash": "6a5c9221d66d6a1175ddaa96d31ee0ce0bc070aefb56cd177cbe7eba92614895" } } }, "de08ffcb57e92eb891276970020672bdbe190e2ad13861a7a5a14fe04f7eff24": { "b11091547782b23a3e69aa42aa789855dc525b51b00a033b1cffebdd4f69711f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.908Z" + "updatedAt": "2025-12-04T20:16:57.144Z", + "postProcessHash": "3fb3e911afa53e08fa65144da6486ede34cbe6ba503f6c7faa7912d44381f4a0" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.907Z" + "updatedAt": "2025-12-04T20:16:57.141Z", + "postProcessHash": "2ad58de060ac4af2de3e37cc4916ae4f12bc7e22831b55bdc48e1efd202d18c9" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.907Z" + "updatedAt": "2025-12-04T20:16:57.137Z", + "postProcessHash": "de158925d12dde6f2142aa7f357c2f179cc9dd982c3280274b0ae26b204d4461" } } }, "ea2d038b6989e3982d873c583fb3c15212b691b2e747de62d4d28c3e4b11a23d": { "68f32501aba4af446aa28658a6859e797a66b66f975249f4a21ec435c8e2e471": { "jp": { - "updatedAt": "2025-12-02T22:57:12.910Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "f9444e37956cf6f3b52a570c49e5e0d16ff927f05eaede0c6d295c4ce71a6196" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.909Z" + "updatedAt": "2025-12-04T20:16:57.145Z", + "postProcessHash": "b69f6a04ab63dbd1a0f153e1bfc3fe578ef19433882d01c18525bcdc1b6c2cee" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.908Z" + "updatedAt": "2025-12-04T20:16:57.142Z", + "postProcessHash": "fb49844c0784a68c306942bcb10fac5ce563ed032f48dc8fe2c849bf2267cbf4" } } }, "f56b183aebaa9c102a1630d41b724bdd0ef7984c2f5be9f15f51bb83994e0265": { "0e4b6a498cb6259a81c3b89b57fc27d109c9f7c4517473e5f6371c0a4d14e7e7": { "jp": { - "updatedAt": "2025-12-02T22:57:53.415Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "c176556ad4a151b62d018d6d5b312029f1f14f3b7237a0c7d140505d6b589d51" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.415Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "6112a4a4ba935f968ddc8ac9a885a151927d834b0f0eb5abd09dfb1dd582120f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.415Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "403a8c7ac0aec29e2b71d08436d381b04fb84264393e14c673169ae8376fcff8" } } }, "f80606d0e748135944fda4f0b2bd5df8b58807fb2f4c06c85b06e12fca82e935": { "2aa54fbd8a8eef1da3872abeaa7ad8858d0e7a55684ee9afd514540bcb055f29": { "jp": { - "updatedAt": "2025-12-02T22:57:12.896Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "ec7459d80e21533182e82ffc942e9b571fb02a8ffb0d538fd141404ff1048c53" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.895Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "0604b47020a2e62eddd30dde224cee6d77bb6c94335c42fb162b0c622ed8e876" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.896Z" + "updatedAt": "2025-12-04T20:16:57.146Z", + "postProcessHash": "653db3a67de9f963efeb42e7abd4dcb57999a2703af29fb865ef8c0fa6206a69" } } }, "f90130006ab67f0f1f9729094d7e71d602684a6c03306792b40387ebeda24cbd": { "044f9d08748a2a48a556c183ed0bada874cc4ce848cad6b1bf87fba782fe7d9b": { "jp": { - "updatedAt": "2025-12-02T22:57:53.410Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "aa01c7221a2e95022b44dbd371f21586d23683555d375749c61daa4ff195acdd" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.408Z" + "updatedAt": "2025-12-04T20:16:57.149Z", + "postProcessHash": "4d8f95b6e449ec2a5b0bcb452a03d0fd3ecb4b4db93079982ee11df37415efa6" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.409Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "422b789245f141eb48d14e2d8bb7c081181b5229e0661c1abb2ea2e872dda4e4" } } }, "fff1cff77ce23873924a1766144be6a0a4bc145a4beaf1c7902459c008cbd536": { "6b16dc8b034758efca2a7dec7fe695e186e4ef2f750e4a6ba872d28a906012b3": { "jp": { - "updatedAt": "2025-12-02T22:57:12.880Z" + "updatedAt": "2025-12-04T20:16:57.127Z", + "postProcessHash": "f8d1686e959624443c8a39ba2fc187294556dd22492676745e46bf6df0acf308" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.881Z" + "updatedAt": "2025-12-04T20:16:57.128Z", + "postProcessHash": "948d2775b001c3b85aaeef3e6faa353c8fb892076ad921a48b83ca302ae5200f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.881Z" + "updatedAt": "2025-12-04T20:16:57.127Z", + "postProcessHash": "46efaaae764138b2b63c17ef9aa23157386b320c7f70b1d24e1a5d1d750a1086" } } }, "0007ef5eb0fc8520aeab373a05b58e2db16ece5be3074e20646fd984e7bb2153": { "534ae688e369810666e881d18767610a7df7671083edd5debe450d3827e074c5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.719Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "92f4b19c5a92f7d812366ce8d236584202ca5f735d0e3ebfe1a15efe403b315d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.428Z" + "updatedAt": "2025-12-04T20:16:57.148Z", + "postProcessHash": "6fd15ebfee3832acf1faaa51a5861d21ac0497c5637d1f6d767c7af89b0e01fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.721Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "130d9480e1f47edc73846eb99b8c5acc8dbf423dbb4957d2cc591ef6ba100ca8" } } }, "05fa5078290d9319b91e520b8d624cd018e97d963be0d0e1cd22ca7e37e899e9": { "4a4c4d4b7b75c17db47caabac407cb6678d38f795ad11e688adfe6762b928d79": { "jp": { - "updatedAt": "2025-12-02T22:57:53.413Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "76019731d4cdd4d8176d273df256b186999d82ad183ea905a7e5deb828f8e460" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.413Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "09269f2c2f312ba4210b07157e3dcdce2c118162e6309e9534f2345233c611f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.412Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "1f04e24e98dcb482fbe1af59f1dcbe70346ba26e7e6c21267090188ee0ccf730" } } }, "16a9baec9aea4c6dd78355c05288783f630be08b0af1a257fb205b45c7adc066": { "b1a72f898456e3c08b49f6f0e73a4fc33fa3bad39fab513c1db89294a3fb923a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.719Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "3bb1ce0ee462e950fdad9930985525b75615acf2eb9db4b73fdbe415fd337263" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.708Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "bde953bae180e37cd3d4591a46d91dcdf6e5667e1089333f6adbc59e98f8f9cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.709Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "2e8e1c1206ef52519a54a87a4e3d8fc21493f4ec8cb1d80d5927cecd8af8e2b9" } } }, "24203d0280dc684588776442ac330a354e834de5789e13b7f7068042627350dc": { "19fc846b48f319f018e4f670ace8976874e318a091bb09940eed158a6c8e8569": { "ru": { - "updatedAt": "2025-12-02T22:57:12.937Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "62e7436a79c3071a1a2fe80e3460731e603184e17db6ed0e982a747f053e7906" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.938Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "60feead5f1e26c8c8724eab4ddc10e80d2d9c4ce3b22a80ebee6e9960e933cf7" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.925Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "91ade27d2cd9d1433a867dabfde1a17107ef9c1be2f98f5f64db35474c18cfa1" } } }, "2fa693bc37b3a10adc8d79217e3b09168dc83b1d1e169414c8ff196815fec6f9": { "9e33b9e6995d58fab1e0c61f6a5436f2184d7c49af88577359d93f178ead07d6": { "jp": { - "updatedAt": "2025-12-02T22:57:12.934Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "0969c06e893321491d7e9431c66bba91e0f62a041106124f0c0c44592bd7fc7b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.940Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "415b8e2ea42b190c995781c0858c50bb6e62a65c85f9b9ff4fa2b4d29cd856ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.946Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "f09f13324ecf40b49df15222b42ee46aab32ac3679e1684085b90177b511bbcb" } } }, "437aab42be9088c95b44d049c562b541333adb34c7167b0341f25eeb6f1da633": { "673a9dec5d05173b117bf71c194bcbd9250ea1e8e6162c76a5ac07819b4a0314": { "jp": { - "updatedAt": "2025-12-02T22:57:53.413Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "9f19c04ba7523eae636b7bab379863d1dd053e51947e12c386129879bbf905af" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.414Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "70af063c3d3a53cc274f22beca4893c3a72050e35dd066fc5a745e4ae55b40ff" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.414Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "83112c13fe296a83b78d78aff6b9a79955661453c41eaa10090581928ffa6ae8" } } }, "4d14e175d2ad5b7f1f59197782ca672764811be0a7694da0d93c40a71707c218": { "2f6f3975ac07a17d2e6c12809f029b5fcecdc238f96cab5409c924b908db77fe": { "jp": { - "updatedAt": "2025-12-02T22:57:44.709Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "f09bc8ee67d610de44e74ae0a1aa2eadd45ccfb7c4d7a83e2728cdacba96c989" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.710Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "587029793f5bbc1b9f6d5e77f18133301b36b71b4474e3ad55f0faf2464d09ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.710Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "99b0848f9c83e4eb4da4a2a4287e691cc501b449988eb2df9cbacd35d8eed00a" } } }, "57e4e9dfa0451001fd8054b08c62e1b7e7899bf69d75440b300be4c4a727b99e": { "37f3dda8e8d9a3dd2ccbec3bdd564d2de4200f5a0108f14e3cb3cbe1f05fbe96": { "jp": { - "updatedAt": "2025-12-02T22:57:44.720Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "0fbc96092fc9ab88199e3b6799b6aa4c702e8ffabd509f5e3755eb59225f22f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.709Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "d229910f7b06b1d993409bbffb9597ca86056d34e0395b96f32a4787ea302346" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.720Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "7d26332b638cc80811279dfb302004d5671ab8b8f4b0118eb4946fba460dc317" } } }, "60e6b2c95bf2975a8ad16addf92ca2f2b8ef9b6f0267eedb1b1609cf83bd7bf0": { "8b24d0eebf933022f5b7646dbd76005a200ed0eb134c91ef2ce37429b92f838e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.742Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "7bee01b2ae04b2649b1d5ca2db06a8faf376e5b1c9a3a0086891396807ef77c6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.743Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "8a95e5de4f61495a9fb36a7c0e0ae6b4b5bc067c650d0c6650866312184696f1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.742Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "bc0e58cc0fddf4f5be9404da3e736004a2e1bd77f3db4de71e40108505bc0128" } } }, "666059b00db591c1a56ce4963af6165fb3c9b12689bc7bd2d002ad9f8261acdb": { "60035e65e48fd5fdb3a14661c3ac4811bb8496f2b211e4fe284e3d6b420921c0": { "jp": { - "updatedAt": "2025-12-02T22:57:12.938Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "09fe5213c66c05e3a526de2504115c5d6adb8a70827ec04f061bdf06f3139c91" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.943Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "cf25e8f2a9c55dca6797ee7b87363615179048099e8738c9c943dfcbc8a65871" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.944Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "d45205a9ded23299dba0c78af8974e0f99a5a3c145c4ea2779b9e17b437501ce" } } }, "7431d11049418c30c908694305424392c5e608ecfdf0bd5bb5e82ff877dd01f3": { "3c1e299227977efd8ca6ccf93ac2673c11fbfdfe441a0d0784400200278822ac": { "jp": { - "updatedAt": "2025-12-02T22:57:44.741Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "f860074eadece8a0f5607c7098565c3d20757a4dfe4c47004f946c59b2a58004" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.741Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "ad1fae46f1320142cc7b57cbc6660819b52d09b873350c8ee5b302a2f812fae1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.741Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "5551242ba57397f0238cbbb0a8214d2afb215b7363732b62b562e8c8ba5c451b" } } }, "7cad50f4cd617547f24613bf26b7d92863268b13a23a167f7afafe1105d9b80d": { "fc4b5c37a2e9cd403b127f9b0e95af107c0815b1c7bb98e1eebae04bc96ad554": { "jp": { - "updatedAt": "2025-12-02T22:57:44.707Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "3b2d84b2994f0e8881586477b49df971af42a2312f2dc7dd43f4aed1823a686b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.706Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "55baff9fa41d09a8d92951b2f5740275e0d19de37eb7d2487ea40ef7d3f1c882" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.707Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "819bd8a5bfc405a8039cebbf4352ef51f9377a65c02787a374709db39af602b1" } } }, "8b1e7b5824a25229b63b6cae491572266d76a2f3619bbb37de99f10f9cb281d7": { "b39b1a9501a0d4efe97c7c462447f2f7f762c085e32781115e4e01abed9470bf": { "jp": { - "updatedAt": "2025-12-02T22:57:44.721Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "c3fa1e169e46e4b9e09488bf9ad56b86359a945dfba10718ce098c031a4d3bba" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.721Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "f67b6bf5423d3540261403b47f2a0c198dd2db8a2daba4052a126b1d21ec0f93" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.720Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "9aa180e1301e4a77adf31843f44f6ff7fab1804d67eca7ae5d6fb7a60e1f4799" } } }, "a3a1fbd31e0aaa187d657bd8045fa61bc9f31995880bcb5d5758a3e184f5ecb5": { "28ea6e40b848e91414d2d23698b6689414783f37c844f3f15e49942c2f8d0f73": { "jp": { - "updatedAt": "2025-12-02T22:57:53.412Z" + "updatedAt": "2025-12-04T20:16:57.152Z", + "postProcessHash": "50fccf4cb7e4166698e0f96044ce330a27c525dc568383947bd4f395409d4bb2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.411Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "393a15f3ce1e8c8b9c4d520c9eca0d069fd7c82418b1690d20bb8eb62c2bdbc4" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.412Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "86599b5deff7e14fa07312a47fc6bd5b2be7a5ac1afdc9297f52afa1bffb73f1" } } }, "adb57f6c330a361767cc8e018fdeac391e70be9310b007ddc867750c55383217": { "6bffe63c913aa6f222b1d3f7660678d89871583dfc5b85a5472e73ccd48f0852": { "jp": { - "updatedAt": "2025-12-02T22:57:12.945Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "901b6b6881f9ff3a48e1dfc492d04fc6a98b99b46dc2d5f1d40ed10f3ebcec79" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.938Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "1915bdf6c4283c0c5799311e8a2d790c3fbeca8e22a5d828825f2997e13f13ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.941Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "1ba5ec36b27c141d0d9b6b6a51f2fd6ac1cc88d2a2ad8a4cff44ea475f56e424" } } }, "b0f947d3a4638d92601c813f2511beb5008821e82e066594946d2230ae518888": { "e2d7964de87a21a4f56589f9ef750a5f70e553620f06ce8ed541c52c8e2fd182": { "jp": { - "updatedAt": "2025-12-02T22:57:44.707Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "e028264c61e17aafe10d88b807c255caafcbb7d27dfcf5804bf521d513d5f2ad" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.708Z" + "updatedAt": "2025-12-04T20:16:57.167Z", + "postProcessHash": "b6f41d70ed033c489d3e5f2cf51c17ee5a1b0d057600222077ae426956aad6e9" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.710Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "d0d8586c653105f5a9de55f434817d0a00c1fc8aa2a06cbde2544cb65810da09" } } }, "b581e8a0971d1d07fd92c09611201fbc0ec1f2ad10e9a9e9462297b6dbe79f67": { "49ae124d0469e31fa1e3318ed468a02b4e75af99b0ad807441a4e18f29afb644": { "jp": { - "updatedAt": "2025-12-02T22:57:44.706Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "b885b782a21fd53e3fd121f5fed4bdb1cccd227b4aa46814c1977053006e2207" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.708Z" + "updatedAt": "2025-12-04T20:16:57.166Z", + "postProcessHash": "3b9337910f6e934135a223ccda9f7a366f2351e0ea2b79f52f3a96b5405afcc3" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.429Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "d3e4f0a7ae8538b6020cd27f4c69af03c8858ced178dc40a91dd945e80bebbfc" } } }, "c5ec668978bc00da55afaed8bf387ab8e40f7e8cc8a5c3c85b6528469658dbac": { "2760c235bba120190e9792afc2791f4b14241f22634e6dcfd806b0f0c8a2f30f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.411Z" + "updatedAt": "2025-12-04T20:16:57.151Z", + "postProcessHash": "c7eaa88409c81ed9f3d7579a15e1dd29085d7b536e90e06f2d50fcb29fb822bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.409Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "1859ee75a3a7f21a9392f99b08b2e1dbf3cd7dd137d1bd122bc30eb75d473966" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.410Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "2726def088c7ec8bc0954a9bff36f0fa7b2a5f3e20f943bb84d0bbb5b655d1dd" } } }, "d0878e46ea2d9748ef2ef35fa15820d74801d2e823a8c466520717410dca0e30": { "34f3e7285aa8956a7287c85c05fbbc6f82a3d73d51e58594a18dd7c4e673674b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.945Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "a6480cafe65e8930e94181be030e63e4cb802780c9edd93fccb9fe016f17f546" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.939Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "b12d883ecb54de26c9abb16b819b94f396c7299f1a256324b569b687c8da5e59" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.742Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "6d18e38ca4ab2d081b2d36768daf712d7d9c3010dbee8b8ca6ce2228e6447e3b" } } }, "d84d842f939c18587480808dae2c357d93b19f0503165ffbbb5df5723ed8d18f": { "78c6fc1825dfef395f2920f37ae3b83e7a55e08e381e14e11ade4b0633972ca7": { "jp": { - "updatedAt": "2025-12-02T22:57:44.711Z" + "updatedAt": "2025-12-04T20:16:57.168Z", + "postProcessHash": "8f5b076307237de05c86d294a405839a08bfd00c38d41361063fe1e5a1afeb58" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.429Z" + "updatedAt": "2025-12-04T20:16:57.150Z", + "postProcessHash": "8304f8302bb20922c073062d269dcb0acf4ed50c8d6e57f87821b87e30800531" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.706Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "c3f6a64dcbfef58603a7a2a23e40c5c8cf897992e05615d7d7e7f6a00b384927" } } }, "e16fa51bab7c52534a6634130d4aa9d5f4eaf5a9199be40465cc25c632091ca6": { "9a45c83991713cae83ff2b9ff52e3fac9bc7cf89dc4ce06aee3062459ba62f83": { "jp": { - "updatedAt": "2025-12-02T22:57:12.939Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "4171eae06b1f4fa147b28dac4ef2ccc8f890f5e15c7e7ad69818ea19bcb03772" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.934Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "ab858f1fa1e875663cc264595b2a77bcf7c1f7d69173d25b382bedefae1fd423" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.935Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "3edc9571c17b0ed268d1fa925e5646d9d3084259c672a71304844c5ae1ea70dd" } } }, "00385c907824ee916e1d2ab90ec1343952049a30fbb273cd705e54e19e5e54dd": { "a1e228059158c6496d116286e96a0ffb78b193d02679d41dffd889c4ae3f4ae5": { "jp": { - "updatedAt": "2025-12-02T22:57:12.947Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "547d41151a1591f7eb4ceb1bd90a8af3ead21ce0a36d9fcc8fdda6c94538637a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.947Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "270b3f5b7064698157e218151bf01aa29bc504e4f46446af848a0570211f1b32" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.948Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "1a3e57ca469cabc46f638c0071b6db392764bf98c1f3c089fe4b43a6f1ac63b6" } } }, "2026a346cf904938db3b958bccd4a5998e0f9c3e806206b6a7de6c5a43e41346": { "99e01b88c76b26cea06cf6daf392581a33f358c37c5d4b5081a274912cfb4fdd": { "jp": { - "updatedAt": "2025-12-02T22:57:12.926Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "bd5362849b253e6d73d07590fbdf8d5b130c183e47b83cc12c529186979a870b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.933Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "af84fa29c0c52dc0230331f8d3e00a03250259a02126f3f7640e0625ed0cc228" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.923Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "be7ad39e811b3c0e866bc6c0e8a479863e1742fabfb09317b3b524627e4eadaf" } } }, "2283119a59e486c7e332715c4be76c78e6606cc8fef66284fa0397e91f6e9842": { "89e926971a9cb3deeda49f638cbf8679ad56a009190bf99db1a5f7d3b55c106e": { "jp": { - "updatedAt": "2025-12-02T22:57:12.922Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "2305ab9ffa2742865e60129bbd9d2e696a284cd322aafbfbf56759f9ec12c7c7" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.926Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "a276804ad226b3742a7909a7ae0832b1b286a6e75198bc7c05c23f4389923988" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.922Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "368cba54b9f5dc70d3d26f0cfc08794cfccd517a8e07117e45f7b71fbc02fe0e" } } }, "3b5e4827235bde4cab69ea0d512c4769c70579291411c713544bf464dec162c8": { "e5ffb2aae3eda69d46997485801b157c3e85f0837446fbd682ac417320b69197": { "jp": { - "updatedAt": "2025-12-02T22:57:12.940Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "fe30d11faab6f858072513228ee887e3810f2662fe8d46eb026aba30a88cf095" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.932Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "96047ef5ab1a60d859f517abab6f372c705a6e2ee5a52d805ca1df55ea191bd5" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.933Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "cf67e322f6cc9b9d65a5eaf459d9051e7228796a22efecedfd76ee915781256e" } } }, "51a4e1d93b002b635941f3a0b969d77f5e76ffcf3ab01cc6c0302553a48f2dea": { "e3cf07cdc5c67cae3f9a9be2ea541fbdda42c2a33f509a3d16926cfb4c4fa296": { "jp": { - "updatedAt": "2025-12-02T22:57:12.925Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "d26fbf312f2735cfb47bedcfb3f27df874ec29e91010becf5be9510c38d83eff" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.921Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "871a7e2c8459ec641ba28eb30007b2dc2a0f62a19923e14c758776372b4b0969" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.923Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "709a2f6be3f64d5056ea3473ac104c9305926bf3aa3db4cd8051509d00b6192c" } } }, "51ffd052b5e18acec3f8c2fc6fc9f2de6d509c5f9b55c4e653df085e2f4cce96": { "e67a2d890c9d442e3c7a7f02a0d5c6afcdb1928ff906f575bbf304c7f7799b2f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.946Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "8f1ecf0d944bf9d07743cb1d8d479ac30bc7e2a791bfd93f8860c8bd40732c0f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.941Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "d1f8724142e72a23ea4e4715f5154e5596cc6cd575c284a823bc570ab2ab3239" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.946Z" + "updatedAt": "2025-12-04T20:16:57.188Z", + "postProcessHash": "d30a889ce8b532a5ee0383b41da491173bd96eed5488ab67e69f49ba76dc382d" } } }, "58660987b73352ad4963dda3033196dbfd0c791f7ea7184da7b8ed72a70d23c7": { "e6384b2ee9b82af275d9a7823132ca573a701a7955a267deaca2eba7848c0139": { "jp": { - "updatedAt": "2025-12-02T22:57:44.757Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "b61149369ef75ddeff990df999cbfb1e47e84e202ec9c3fd58bf0650043ab8d6" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.944Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "531355f3e5ddc0737d42dbac69698d413b474755828317a5863b60cf43a8d89f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.943Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "be2314612b0db55d6b578c6cc33929d71c651141f7558410648133ba1522f056" } } }, "5ae00fffd365a54fbda628a19a927576375cc455c591c16a26e7ed16b919a10f": { "2c1fe0f08e90b42f0362e7d55eb555bccf6bc9522b4eee5aa410eecb5a6ff63a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.760Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "2a7bda1a67d09c09d92ed6a982dc07789eb06b176110bbc4e2458efbb7a61829" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.761Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "fd8b047b611e3e0b405cdf7fc6cdb49af8b1027e5d590de7ad138019fa5760de" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.757Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "c879d79c735f9663cf3cd3ad8ce7faa7f5a6891d9d1418ea7055b6265463897b" } } }, "62a28a91cc967d2076cb4a8ae68eb32bb7dc0a91eac1089fc166676f54731dc3": { "4fb613d98fb6ff221944b46d4a102b8b41af0362055b5e31a68dcbedb5e8be6b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.925Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "e707a8ccbdc57b567febde1d27f63f094ff8d99cbe93fc9208f50f21fed5e94c" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.922Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "a5ac70c1db3977c93fcba2079aa3b426a804b1e1f39dbb142e52547b66ea2afe" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.937Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "add865fc6bcf8e0e67633efde7356433b66407f5cdab059cd27c50bcee069acc" } } }, "62cac186a0d5d595a384019a8da0f2587e8ec388e9fa723441881ad21746e53e": { "5315f9a99c66f3565ee182e7d8faf811aa2e4a227524f9f573eb826dc8b5c51e": { "jp": { - "updatedAt": "2025-12-02T22:57:12.932Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "594368233ac15b6bb9d12060c0be79b8fac35cd848d5bc2dac5ee43d3c35aebf" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.942Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "b950804ee19b1df5caec99cb36de7b67b4ab56a418e67bb1d5fb49a37962ac6c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.926Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "ff32e60cbeaf25870bbd77f110e8edb62c2131e99980c2423701fadc236615d7" } } }, "64c029683442a95f0d9971d2c2a2f011b21167a916369b96ea20390f74a96eb2": { "27ea13a9d6a87686196565d791a629223843e1c311b9bff9edf44c593e511703": { "jp": { - "updatedAt": "2025-12-02T22:57:44.763Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "fba7cd44d2475428e647b400c207e0d4538477cff1e2674691485e487910063d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.762Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "eaf81a80ac7da4c844fae6ae87a0d9c868cb16b5bb32924842490d860944ee45" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.756Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "35c5fa22d646a35f725d636c55d1b19309e195d458859ef1c1169b4750cb0fdd" } } }, "812c31122c49b26a28f2af399b63cac7fdb8dbff9b0eccb1a55146b1f53d9141": { "1ebe27a88b5652f04a87609b29cf3e09b5dc4ad9bfe9681936296ff736f2d7ce": { "jp": { - "updatedAt": "2025-12-02T22:57:12.936Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "b637b54a861da4db609bfb932c54717e8c7f3bc22685d6b292dcfbc41d21a13b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.942Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "acd7e5cf88e541a1e5046cb8ed5dce0c8991135f2ad5a1515bef87e3b5536f69" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.940Z" + "updatedAt": "2025-12-04T20:16:57.186Z", + "postProcessHash": "26e2aff89a6003b0a6d1774d83d35ebbabca584088650a4c2327c11b888d1a3d" } } }, "8aa6821981ce9839d00fc14d757392848b9750acc4bf8539c334cf2d5871f908": { "a27ad75b9e2993bcfc4ac7d0eda9c06a190e908e4e85725e849767c67999764d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.941Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "1bb565ed7765549b8d6d69ee766a259e97ee6dfb8f00d9b92d1ae0a97f31cc3b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.944Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "6b180ae2d17f55172543c8dec4e2cfef6e70e2c665c6a1bc78688501cd4169e3" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.935Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "590e5dd84759f4f3f06a1a0edd33450e9059694a44758ab933c69d923af97a68" } } }, "8f6142d5329a13cb865837bf5f90f1676c0ed34132ae0b7413c66ad9fee106c2": { "b5efc55478dd9c26c80dffe9ed741b395f4d2368d8eee6c9c3149cd4fc4eebc1": { "jp": { - "updatedAt": "2025-12-02T22:57:12.943Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "91675012a42a61384dcc9e53117fa61e6a9d9cab0d67dd1204d8d38aeb6c097a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.935Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "79ee2455f96d14367e4b175db89f42ca1bdc12242115b104d31fbd9a6679a3f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.945Z" + "updatedAt": "2025-12-04T20:16:57.187Z", + "postProcessHash": "220b572e1caf3bab32382617b48d948990c1a0a3ddb27bbc1f12a496fa1f2c3d" } } }, @@ -1734,390 +2097,480 @@ "afd2f2eebd8416c23bdeb683cdf48c7d32f86769fb59accaa3e0399bedfbc689": { "9b1791199c987e23d27abeedfa5722370720553cfd8a6405ee7112cebcc27c6d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.924Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "16cd2ee10c051c017171dad03e2c1eeb07825f45e798c38df1ec50020eafe9e5" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.922Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "3c4301b438e3efc3a0d341fb9bc174ce83c57eed421693eca9b226fd36aac980" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.932Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "ab525f8b57c01f69bb1c21c11d33fc294e3912e3b6362e447f7545fa31bcdac3" } } }, "b353f551e48bc3b4c88a7db0d857fefd25c028f8d05216430afdb76e3bd832b4": { "6d6603c2d993968e3e2fb68963df1f14bb64c291c769f84522294cc56cd80d73": { "jp": { - "updatedAt": "2025-12-02T22:57:12.924Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "0bb9eec0a41e8870f671b439e9563795a77e4ce5a3906e7b00d5c9fe30e27bfd" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.925Z" + "updatedAt": "2025-12-04T20:16:57.183Z", + "postProcessHash": "6eb66fdefe85821c25b001b7493b87185ea22f7fade828668e4a36c0c1415b14" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.936Z" + "updatedAt": "2025-12-04T20:16:57.185Z", + "postProcessHash": "d7fbf5e47e20ca0de009e9ada5a0b11a364db3196639dd1dc4b0a089ab819092" } } }, "ba6c4ca640fe7b3f714cda5b21aa83f56d6987a93c06b0f52403fcf16442d4a3": { "73a0749a7a37be27b2b679011c93ceeaf5407fff6130ef17dcbbbc612aee0d5f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.762Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "b8a208e0c90196019be73be76f746d64fed2a656a6aab066f7b8955098af39a9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.757Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "c20f8d68f01916e7b2ad2934eec8ab53dc236526537b97055abf849d31d1d075" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.761Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "c55f96584d853b6f6de857946d8284098568d4f1845a703f6444564d86d24562" } } }, "c85686859f3f25046db0082f882182fadaaa53c9674e2b8421280d74f206eb40": { "add68d9d7c2384a1f4236b30131c64724392237b73f94a4430f8fd215046f46f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.758Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "164ff547e422691ad7cadd85b063b08e83f7fd6557d6e2a9390f429fee192ce8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.766Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "509955a867f231ff7dda09eeb249a0f75bfee0cd000a4fdd2af4223d3ab45780" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.764Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "50cca8fe9c0272f6b13161923bceaf40c2d251ee8dce94ed1718ae289961434c" } } }, "e58beba1ecf7893bfe1389d8eb8c6388801ea9f76c74eaadcbaa400a86832dc0": { "80e13888b6bfca7d175470bafcc2e30a1e88dcbbdaa15cac209fa66c4f44bddb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.759Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "667bd03bf10b7329b7e8f207e0ab3e046b0e24859249942dc848ff0036f830c2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.758Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "376fdac73aa9f3d1f987d062e66c82759fe382172d60ffcaccfdc31d67baf8a7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.760Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "c2b4064a9b3342c2360e7985a015d4236c6afe38f602a017ff10fd5a61fab048" } } }, "f437d5d62e24e71773573d12295d6070b2013b4f10635e752fc5e0c0c6f3d5b6": { "69df1b4df06653852e7ced5d6197d910291dedd2d1b27599cd5608fd1b4a5214": { "jp": { - "updatedAt": "2025-12-02T22:57:12.921Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "872602a98938a05ac42d8a6d09fa654af66cebf04e415f18790f3196a4fe0905" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.924Z" + "updatedAt": "2025-12-04T20:16:57.182Z", + "postProcessHash": "191ea904ec76a8f2b9af4e7d06620277328ae21307fa13414cab7696f241936c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.933Z" + "updatedAt": "2025-12-04T20:16:57.184Z", + "postProcessHash": "54144fccf84826433452738228f0cb80264bb98d3fe9ec5525dc7149e91b3582" } } }, "03f61172ad909b158589d51b6d4f89a053de0b09127cf415c34413087bd48c4b": { "a5c008c72acddb7fec319268bb5dce0d0fb9a1f10d18c2c90a95d993d9f5a960": { "jp": { - "updatedAt": "2025-12-02T22:57:12.920Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "197fef65215e100ab3525bc3caa639b045abe8c605c24d29d809fcba2a6f6b4d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.767Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "2928ce8fce88801ea95f7703bd1bb4d2cce9bc0f631238e1a247c86c83f2bd20" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.951Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "61c87d3b97ad2d0535c599ff43dd2a29c8a478a5c54417be9e960698b82a64f4" } } }, "0b7dab5f7a039f1859e3a70738566e228a8859b0025e472a76cd8fa2c67c6c28": { "1d8df38a053cb69ce2a27d4691e5cdfd13a6b160e9a02fa3f683e748d317ea48": { "jp": { - "updatedAt": "2025-12-02T22:57:44.763Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "810412dac21458b9d38223c29768fb77fd64c9df38bd44e496dd6f07c9151ace" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.766Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "09e410d9887c6d603f357d7e640dcd17c4664fc7a850f5c6a9c828beed149975" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.754Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "882c7c8301044b3566bdde00ba4a75df6e282b8b4456c19a3170f3a6ff184296" } } }, "2864b967eeeaa7eaa2100c52550f0c77a534e954059ecfcc0991f21bc889bda3": { "feaa20d52a8757a137658d5066422bdbf2de0a87efa96000934a084ad78bfddf": { "jp": { - "updatedAt": "2025-12-02T22:57:44.767Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "10ad29a8371de33efc365a81d5cb0311eb18b712ab638bf5fc06c621cc315721" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.950Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "d9148ec82ef070fbf6110378a9e3539e31be97d011d6c5e32ea6df99561d9f8f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.949Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "016e160cd562ff90356dd92b713351549786b6f529048e5fb0bb38c087bc00c9" } } }, "36663ad730f89d83d4a65b5956ac48db373b0bcfbd0f2bb4062dc5f3bcaf2839": { "8841bb2bfdc1346e286a40346e8503829d958b3bac30b715d775b50f451b49ee": { "jp": { - "updatedAt": "2025-12-02T22:57:44.751Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "e6854ef41d08230a4eb5bc7465ff674139c32cc6c9f3933fb5da4d8f4ba35c11" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.952Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "8e2dae637e725852301f1388b1c655fae3ae43ea4fb1c685ed38a32b821f3a7d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.952Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "a411f0836853e68b849bf041838f625aad7b34bee14224d91b50196094b405b6" } } }, "374986e8dd5ccd248058ea18a5c0798d535a4a7501a33eff5fd9b80a782b7c15": { "7b0998df0969746e6c19524cb961e7ff6d7e59afe83c51976450a953fc8b3ffa": { "jp": { - "updatedAt": "2025-12-02T22:57:12.920Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "2bd00c50ce39d249f39992c4291f6d53f217188545a904543c35b2e60a6e0f55" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.951Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "bd9c1651d0ac8da4771fe3eef2365a2edb53217a15ae37cfae2a2daf6e101a92" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.950Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "e77f797cd9f6716424de500c5262866713ccbf2a174b138a2db0c12b00bcface" } } }, "3cb23211e097156c0f1a78ad405746a39a30a7fca3e113e221a2bbde60fc5c66": { "30bc5b33601dc47abebcade817fd66b12ac5351751c6ed875945668d80c959b2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.756Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "520cc8fdc2e3fdb59b70d3804bbe783b05f415b867c8d8523c44c7440915e429" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.761Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "9928f7c2e4e8c99c5a7b64c293ba271067ffe185b63abf7ae5f7d21986a9be45" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.755Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "12d276665499634579bf51356792ba4fcf77ddb22a68ad38e6dfec28de8a9e8a" } } }, "6ce48a90c46614816b7c3a238012b7692f39fa7b3d52104f4f0f92d895004b22": { "7e344ba2b2f6753012aae6adc6fcc5f046670439fd5badb29bee696648c4a317": { "jp": { - "updatedAt": "2025-12-02T22:57:53.433Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "e634c6503ee9538504c99a4f4af2d8d79d1f412b4e1d5c048f49db70cce1bea1" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.433Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "d0c91f522143c3d0261f63d8abcd009a118355edd68b8b91415c73f2d4adf8f1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.776Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "6a28d0bbaae743bc0587ae8e1281d1b6f4b7e70dfe12a5e62d01470324a82415" } } }, "707c0fedc72655fa1c912bcb76b320d66a9ab9c8fe5e939a4df2863fdd7f82b8": { "7c543d5ef5d836f674d6873f133f02c4ab70829715b347650c196ee93273deae": { "zh": { - "updatedAt": "2025-12-02T22:57:44.775Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "217101187dac78ae2740489167bafa26f8b04af3d9647ce91a08a7e97d922557" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.770Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "228db8a36247e3e727c6175925e76048ea58481a548876d69170d7a7745d865a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.770Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "879ca4c5333fbf17f142fb562a5a00b4afceb1d921667c5fee172081db2af522" } } }, "730dcd6bd51a2d8afa76fc973bedd9b4d7162629dcf690b192df4cac1fc39566": { "ed51d6c3026594d0ef90de441bf36dff57ad4a32048a288a0186952eb2f80596": { "jp": { - "updatedAt": "2025-12-02T22:57:44.779Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "5fb6ba7f3883edcd6e80d30e5d88e221524047390f4fcf57d1b4fe45ec3357dc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.430Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "6f678e393b47fd6972edab7a65e15740c905b117282aaf8243acc9309fd8fa15" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.948Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "aeb8394be71ed1061e83064d537f294b81d7edbf3bf22f408d1b12b885bacd65" } } }, "8c4b511502097e5142007ba6bf89d86ef9d582ca174f395180742175d5bd4f05": { "f3274830262e5f01f74d8474761446b9f8a9c83ae245d4cee233a6cd17284b39": { "jp": { - "updatedAt": "2025-12-02T22:57:44.763Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "7a4697adb34fa9088e45060af9f1e66364dbe2b2afa84e334178bcdeeeb07f98" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.756Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "106481d500fd853dd90be3a88b6bd4fab4c4a7d7c9e08ba754a5b0826b3bf6a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.760Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "2e8d08e308f4a39a97618bfa8233fb3719ab2ff41dfa5f2e63ce23377bb342a0" } } }, "8eeb2d38e63485d3f399d528cce00b3fa0310df2d513c8b5aed0077ee217c69c": { "87d6c2b8c54e666cd98b21f88f6b978a41ee92fbde390f5a595aae7d2c59164f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.431Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "9b50a119ae36cbec5ae0bd44d8ed43396a0c663abd7b4c63fd1b0f1bfc618c11" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.432Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "638300c10fb7bb0bfc1e23d13db8c79793bc22d06712afd94a11afd240300ae4" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.948Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "aa539caf9c592ffd6ad3d27244ea6598ef1b858b1b5913498a92e8c84dc94f74" } } }, "8efa94c2eaa8cf8e3ca888069c1494fbfe5679752549f9d0a41d641f2aad43da": { "481fbe7fef11ec970a0109b0e44e9a8165cf0e73e56a0466f038d0efcf74f657": { "jp": { - "updatedAt": "2025-12-02T22:57:53.434Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "ce93dfd31636439571655b2e413be87782ce3f0776341f3316c2dec8cfd44fe7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.775Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "7818cc6c258112a23d68a6031b0892b2536564fd60983b98ab7aece821ec9b1f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.434Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "499d2dc3246a0dc7f70d0c679d0f38bf81aff02a572d3dacae7939d3fb5a7dfb" } } }, "94747a3cb7498dd41f7f7aaed2f670f003087b3543cf7752be3b39b62c021927": { "f7bca2db0af5de7e2c67ebc1c65c226c309288e7f073d34318c2747b6d1e9327": { "jp": { - "updatedAt": "2025-12-02T22:57:44.752Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "303f063c889027de70b5be2e04aa27b1d58e67dbeeb65152fc3a4cbc92f0ef6e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.754Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "a0ed8ed6ca3f93d905ebb6e605f938f1b831bfa26344bb00cd76939e641f8e15" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.754Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "9b9052479db603ccd58855fbfa676e70b60c36a04e17aa0a91896a1c86de0341" } } }, "9be36d6e2bdbfee1f50c6de39175a6e538f2d986429211ef53b12ab0e0031ef0": { "1dee3abbec10bfa0b3995067899a721e47f20ee051715db74e0ac726fa434d54": { "jp": { - "updatedAt": "2025-12-02T22:57:12.950Z" + "updatedAt": "2025-12-04T20:16:57.180Z", + "postProcessHash": "ad922c4821c8a7d91ca0aa0bcbe2effc3a5150297c3095fc70c8ced6448fd6a7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.768Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "55e35c86a84eeb5ec620c5995fadda557662f06240dce5d68f8299663cf207fb" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.769Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "5fe54738c1df7dc17a400875b05a5686202448ea249927a95bcae62aef324b0a" } } }, "ba0db243d349404c81abcb5ac1b3df54c29742957ec4ab33b24830ddab68f7a2": { "1f879e7772ed8e095b07f85578bd401df3a64cd4e5498296092756cccd875121": { "jp": { - "updatedAt": "2025-12-02T22:57:44.758Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "f9afae0926a14c77b3928769b0c3c8c03774da75c824aea4256ecf7608468f64" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.753Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "26705e56e63a25acfc85bf50b4861ef27a61e47fc75c98ce0b81d96d9ffbeafc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.755Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "9b271e677db8a214cf45ad39fb0b08a2ad10d221eea598f3a8acdddf4e38f21c" } } }, "cb8f8c1219ce7a92277d5329ae659c90b78edb06139fda7cb67e9143f6a4f1a8": { "708faeaebbf5c4dabd6c9a9eb715cafd5178cbb6ceacc376b982a574ba6496b0": { "jp": { - "updatedAt": "2025-12-02T22:57:12.920Z" + "updatedAt": "2025-12-04T20:16:57.178Z", + "postProcessHash": "98d606493baa4f0810cc5013d19c5bfabadf8c2e8e618d3eb9ce4afd2f9a22eb" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.949Z" + "updatedAt": "2025-12-04T20:16:57.179Z", + "postProcessHash": "6a1915d2f4e2a7a3e6fc21fd9a5e4570a9ee769604cdeb01b6266d2537fe608a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.752Z" + "updatedAt": "2025-12-04T20:16:57.181Z", + "postProcessHash": "8a9fd17dc0c4d50254cd9b02dde416aacef804448349c3c573f3af3350fd6f25" } } }, "cf42c21f80f60055d0087c0e795d8976b1d91223e0fe30f342746b23878b6c6d": { "6d3f845905f3f2b2a1be610957281c22628e8585866ee195f1e005cecbd69e88": { "jp": { - "updatedAt": "2025-12-02T22:57:44.764Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "f7d25aa2f57de1b7e84f637a05fe53266e2ce7545ed5dc5bd4fb6f70fc140709" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.769Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "ff8c59b8c9260147d422ff8f4a17a1aa8e6d403af10a0afe1dd71699f9f898e8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.767Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "ce6adb9ac2bc5b92b02c5a9e468e7d6aa70efdbb1443b87d4af6f1ac47e39eb9" } } }, "d0e7cc516637ef8ff263a061c7c16bafdf014cfae7ce60448c7e0fcce8c6dfd7": { "e57a30777e558c8d76cfdd0c7355a7d8d9e150e93787b8eaedcd4f95150f489f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.429Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "88613d2fecfc9fdedfde36b7bd7a80c5db8d5a235e24d0809d74c91eb3087c71" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.949Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "95b3bb841d07fb0aa712851c4a28bcd5045dc9fe7a0388bbeffe4a876ac547fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.775Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "a7a1fcb6a4c71625bbbfeac8692166d9be5eac005d1b161d8f7788f133ac7467" } } }, "dc560181da04dee98b254f616102cfdbf1969c4c794080bd3b5dd88e33f63287": { "f7b3da6309249ba57146453a20fb02f1da444cc9f6b9ff15796e49d19986d9d8": { "jp": { - "updatedAt": "2025-12-02T22:57:44.765Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "baf7da290aa03a202779b7470f9cbf74ac69e3a03974e1153a7ebae29b22578a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.764Z" + "updatedAt": "2025-12-04T20:16:57.202Z", + "postProcessHash": "1cb0afdc3e2a428ba2a455f4b03af394775ec059c9dea426c41324cd13f692b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.755Z" + "updatedAt": "2025-12-04T20:16:57.200Z", + "postProcessHash": "22aed73b4ece0d68aa29da67e65725c4f351373dce550bbf7e207a6a727e289c" } } }, "e390b76711ccf2a7eb5d962d037354b40ec5f4bd6b5e88c7a59d4fe98d2af88f": { "959a1807df034b8088bb146f4b89e2d5ea2dea86233fa18c9a28c35bbea95719": { "jp": { - "updatedAt": "2025-12-02T22:57:44.760Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "e187b8d43b8784eb2466aeae72d77458ff16b5960c20e01176f0b47a4cb49606" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.768Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "9f4a120dcd58e31068122d02e2a45d6c8b7b2189e26d5c4821b84bbc9d8cc307" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.768Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "3fa1a834c43512f8f726f7064ce89b46f72612e2fab94e44a048549b8756f9a0" } } }, "f362b87c61313b355b28fda5f77765651cb599066809f44030b3c1010865fa5c": { "498198cf31ab4d64e31b4a2d37da8c4597bed364756e0feb2aad51f2859ac1fb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.759Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "33d5a00e241c526b0a1e1309d496ea54f083f35b002f9d28ab3b85f2327aea58" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.753Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "d0adaab362b9530944d514a397bdac0ca0638ba7032eecbd9dac6d0bdb71fa65" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.753Z" + "updatedAt": "2025-12-04T20:16:57.199Z", + "postProcessHash": "0eb06e1385a90f669877f0239605b43cdefc42579e0eb2c2817c3e19fd6d9baf" } } }, "f5a9bb73dfebbd60d3ebe96194e16c204bbf24a1a4ad7b46bb262a754dac54b2": { "e78c91e1856bb6bb61d73c20e01d2f69ad12b8495c3f0d7fef84e1558681ea40": { "jp": { - "updatedAt": "2025-12-02T22:57:44.768Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "6a2cb13df82f390ac14f43d55257aac639857710f9cfbbbaafcf47e93ef858f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.769Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "e18356622d3214d0578fd97e929297d6e64106138f33632dba8455299977fedd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.765Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "9bf47aba74c02c9f8ef363b0305be4c926bf034d26f0ad957d84abc679bd2fe8" } } }, "1370f12b87482a2e8d057a8b41e9ea94795da80127f778fde4628181bbdcc429": { "f8146d175696fd61b1124db8aa052124a23329de9472ab05df373240407f0ecd": { "jp": { - "updatedAt": "2025-12-02T22:57:53.442Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "399b593d592298dbd81b939fa80b0daddbd00eb08b41d95e3ea619f754b842b5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.772Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "0fa46e5a4480fb1370598fc0a8c43c26189a7a31bc68c0cac629706a63250b28" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.449Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "4a885d72e2e9d7e2d05b1ca201f552f1467baeb1fd35f7fe058917317e7a2eb0" } } }, "25e58c45c99cdd21fc20a817b3bc1c4d1448cfd9024cc4ed56ae9462032d790b": { "6bb9f7de8fea38f23bfe3fefc31fa9cf8d67d55bb09bb2f9a1806c8d39795f52": { "jp": { - "updatedAt": "2025-12-02T22:57:44.781Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "ef0c86093b0ee7c49f11ca2ab74c264205bf2d59a0945626343eccc69235594b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.785Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "68a085eaa2820abfce4525eb9476d9ef33c2be8e6b43348f385a32849bd1618a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.786Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "5ffa8bf3e2384cc20367f30a52132d1734bdd6fc0c2f08c6048cc37c2c188d54" } } }, @@ -2135,104 +2588,128 @@ }, "873620fff6c9ec3e37e3a93a410aa979617a1bdd7bcfd454702003458cbb9c7f": { "zh": { - "updatedAt": "2025-12-02T22:57:29.097Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "91c792009e657549f8108a39cfe3a78a2814555d93b8bc1cad5ce3e2bab6f85a" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.097Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "80cedd74b31e3cd8753f05174474e32fe79019593c063da26ef9770b4b6e63e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.098Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "65e295fb9e6248269c5537940fe6e779e6f5502f360e3ce8c453848ceb555928" } } }, "376f1f3d79070d024492b0852fcc46275cc6894795ef5c9378fe6a8039d04b64": { "57d1e9d86f14ce94f3b9606be0c45891a1cddf024b0cd28892082e2bebf224ff": { "jp": { - "updatedAt": "2025-12-02T22:57:53.448Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "fc3a7680cf93457c6d6d572d68d7e8442882ffe7ef2c50a838bbbfdd7a96fc90" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.450Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "9186675f7581f6e4ee67a3e56524218ccc49f1654dbd5415669184a0af1c5686" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.439Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "120b764e495cc4ba8984e25e38bf3cba8edebad60a8d9ea9a4c8da3b8b5724f0" } } }, "3b20b82fd209471b97a1109eecaadcd504d69c6421631143f81852d506036bfa": { "deab720ce649678d8772ed32576b254176937947561eccdb5dd266ddcf5b5d50": { "jp": { - "updatedAt": "2025-12-02T22:57:44.772Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "2548024e02fc54bfc850c38dbc8f2965464ce10069dfe07c44325bb41f7e47c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.449Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "277a6b63249f5cbb9c903235b961cc8d5e38c77274e50428081fcd731b4fb737" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.446Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "5fa27923fea48899a4996dc8d83063e01f748492289534bfdf3c1987849b7ef2" } } }, "5315751710a23b80f9bf1ed7f31831d089dbe93c3c8fb90d20b7744073d0bf57": { "a66560c3d607504cdffd12261e02d0e673e576056f78a84ca9ecdf329603c56d": { "jp": { - "updatedAt": "2025-12-02T22:57:53.441Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "2d91055199ad458ab9998515d84fc64b795d54c34f77ebfee67bc1bd3b605a4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.451Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "783e679441cb8e210f1f2b98ba7ee67906603f49c675b96b9e4d192268233926" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.451Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "eef5a6d5f3fc62ee6c7758ea304a57b78fe21f83580d10543e3b6beab0a849b6" } } }, "8d92e8b825b034ea42c644cd23567811b46adb33b6d540b842b64c0196ff3b53": { "292f22bc13c3bd83386dc5ae82bec9ed457e6f79b25efab444ce03844d88e825": { "jp": { - "updatedAt": "2025-12-02T22:57:44.774Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "a88c03365c0ae60de8251c932664091084b3e4c0365c5f2db102bec02cc9ce61" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.777Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "6cbc5e3acdfe96ca0c70a2fc41b097e715aad5a3cb1ff2eb4f4be0a3611bdbe2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.777Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "f002f5709e43c8cef9e73c4a26ec58efee6dfec186d5b7eed1d7c46fba6965c1" } } }, "9845c4be459de6543c79bb52ebef31089a7b6dde5c4bcbf294e6b614cb8b73ef": { "f7ab2f792dc532d79e54d2172ab842ea8bb45d24fbea3c48d921219d21bb9a5d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.775Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "5b3a70b4d7758eefae4657a96cac98cd1e9b43ff1f90c2f879b23e57655b1fc5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.773Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "e0d7d986a6a30948f3745311ec17a64ab8be68899af22ea0b814094698850d24" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.780Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "214ed830ac5a2a2ac5d1d441457bd9acca05234b597fad83a98222ac59c20f36" } } }, "994f995f28518f9032c149f031eb9e817c8a85f3b0139de3abda3966eec97f40": { "0299673d875da310e70873db6a17323b8be0705c8b4b17c562c9e797b225acf4": { "jp": { - "updatedAt": "2025-12-02T22:57:53.432Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "af1c7c78c249901e9762caf3101b52e3cbb1f6e1639615ec21d5f2c30691ddae" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.430Z" + "updatedAt": "2025-12-04T20:16:57.203Z", + "postProcessHash": "69b8f7bf089ed2d5ec42dfe1b9b23d322572e8c644d6fa50a13750a55f8b7cf5" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.431Z" + "updatedAt": "2025-12-04T20:16:57.204Z", + "postProcessHash": "842bb114376151a7c65fbcbd1a3446c4460a0a7f51c536ce8375a958d2c6f457" } } }, "9e812084882765188d8e23b9cfcbf9a3edeb29e9461a1cec110df416342b0289": { "e16e8324972fb51ec759f18c31f84b12438b5b468badc9732e3a35eecb40c277": { "jp": { - "updatedAt": "2025-12-02T22:57:53.439Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "d0535810116d72f9d9268d65aa2defdd65a954cf8208e6fc413c1795832c239c" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.449Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "b956046f8cdba0ead8f1d0f4af703a50123478e2338b4c055a428bb6023d548f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.448Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "803d155db64feaf954d1b39658fdbe0c8809546e24287aa3eb630206041978bf" } } }, @@ -2250,91 +2727,112 @@ }, "c1ad37c48321ab74d0fa77243447624e6c987f44ca3469a08d251c3e5a869de0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.782Z" + "updatedAt": "2025-12-04T20:16:57.197Z", + "postProcessHash": "a082d7006c68eb44c1d29612d124044c9635217b93001b62be9bc398f6680424" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.783Z" + "updatedAt": "2025-12-04T20:16:57.197Z", + "postProcessHash": "ed19b21fb4f97b147703bdcd255a81d9c70d42cddecc7f54eccda0b68f715912" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.784Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "eef6eb61a40b4aaf0e681df662c5d9d2df9394b24409cb0dd086053c788958bf" } } }, "a9a515c52dba44d2cbab844922d2f769a5af11a34775d83c1bd8d9c97e4bb6f3": { "85a2a4117446131c96b792674a9cf5594566bfe0b7f1098d2210537e80d0fb0d": { "jp": { - "updatedAt": "2025-12-02T22:57:53.446Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "97c5e83da201bdbd88ca53cff0cd24aabc1b119fbaea16458def25f7d8582445" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.446Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "53e792551a9b58f2fe60ca7b1f2b6e6df80166963835af1a441aef4358668c66" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.447Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "6ccd96ce98b3124a3db092f6c167dd689cb282711ef0140eab94d960db5cb592" } } }, "af360983b516284a69f939f103f1882eaf99d33139f9033142ae3561946f32c7": { "33be4cf9c98cef60c81c9a896da5f27cad1a7e71f69e85818494ce4b7ec03b2b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.785Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "868f65e60121162e5042e9e1567f9c3da49d79913abcb57370ec50c4909aa9de" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.785Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "32873aa90d277e851cbac6f46674b81d26e29cce9e772649cc2c810f3bd0e729" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.781Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "0b9db8a4a5f5df0540252e6c605a3c5c55cb5fa342e15743de5c21541d34c82e" } } }, "b204fb8610ce0fe2a5504ac8ae74eb658b2c80f1a1d885dc2b85d71bc34129bb": { "0aee55116dc7c452f61e8eb411e60595d3f877d5ebfa1d1c034f028155bf44bd": { "jp": { - "updatedAt": "2025-12-02T22:57:44.773Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "db3fbf849e09af5b41ad9139a6bb2aba6e4c110b74acfac871eab46f4d472f34" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.430Z" + "updatedAt": "2025-12-04T20:16:57.201Z", + "postProcessHash": "5ae09d5813e635c42f104119750babc531046477d15717a4a72a5dfec65f89b4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.776Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "647dccac25440fe8f86e174ea4fdb2b4cd409066fdf6dc9eecefe5b7f12a5b12" } } }, "bf09040d678e6760987c57861f7d46d0c83dc84b582441fa87e7ac9756c76f6b": { "ee66bac04fe1df0381e777810c8adb5c9d16229f663ce7ef30a2a0506899ac5c": { "ru": { - "updatedAt": "2025-12-02T22:57:53.446Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "a0a3116863abb7321953681d369eda91e11b116fe6a6f8279931e8f18a39a4a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.448Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "a96a4a118d83763271bf412a3f140df4147dbad391f5b509ed3601694c82692c" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.449Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "92afdbda10617e06cbb0a51772f671b50d2140aa0c73a78dc3a858510c519ee1" } } }, "c50d8bd0ecc6ee24b7f928b73255956cae71fabfe25096539cdb974c7f167191": { "f1fb2f5d8ab4009a1d0458d1d0604ea822a372927443fb49fae37168711e0dc8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.452Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "c8324bf39509d4de4d79961d53dd226e40fbd57459d26e84bba066e95973c793" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.450Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "26bd9bb3beeb731f770c853811590069620043bd4e78206d08e493f059b789a3" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.450Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "65fc9427f9f052772688a94ebc42e2fe40df78194989707dcbe0f02f14be4960" } } }, "cabd7d221b503f016f6d425976074155c6ab65f9918739e83cc1d703e06ce9c9": { "7ca705d224c1a2bae3bf661451d8d9ee2d0404abce117b56dcde2161981ea1cb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.780Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "ffdaddcf9cf35030db1d344c4c75f4e09df5b84697f9b6881a2a43ce2b28d891" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.778Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "49a63179fb70de44d514df59885d44792071c336f3d8e695ca1fb2fde0117924" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.780Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "4c7fe8ef21478a5bef73957366de1464bbd557e00813bad8c482b6b8902e6e90" } } }, @@ -2352,533 +2850,656 @@ }, "ef2a79c4910a450c4d299109576b26b3e4d3c1f0d7cbf8aec0cb3af68cf84848": { "zh": { - "updatedAt": "2025-12-02T22:57:44.782Z" + "updatedAt": "2025-12-04T20:16:57.197Z", + "postProcessHash": "f2c37b79f639dbf853f58e993676039efdd7d33fb24ccb9694b2c75b91a2b5c8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.783Z" + "updatedAt": "2025-12-04T20:16:57.197Z", + "postProcessHash": "98d325377b8a4e0d34a07446edcc9ba4b99bef9588b09074fe3500218efbe058" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.783Z" + "updatedAt": "2025-12-04T20:16:57.197Z", + "postProcessHash": "053326caf4fa83ee851a1b981b56aa428b12363ea35d944cb3b4c035ba51e780" } } }, "d6382580d57e06e3adb4f63249975c1e63e439afb1528089085bb16be9e0bfd5": { "e66f44bf486dac3ec176125edd9a39b1b6741ccec945cdd42e270d60579a2194": { "jp": { - "updatedAt": "2025-12-02T22:57:53.431Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "cb3dfef3c3edf276fa8d6a64edb71039adfa31645e8c8fcde85d8f149afa3274" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.773Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "1dea620a93fb863956febff0c6224cf341fbc90bf22d478eed936b0d1acbf5f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.432Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "4a17f65ce520d06d713a5a7f00a80075c15bc970b32d978dd0bab1d8a0ff340b" } } }, "dcbbbc894548f52a28f1dbe2761c66552c70c361ecde98f969015dcee3764a48": { "626e208c3631b5c7c63439845c92c76d534c35cdc0c557b51aac33578683ffb8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.447Z" + "updatedAt": "2025-12-04T20:16:57.212Z", + "postProcessHash": "eaf2bea7278242d8f184d99fed664af4f25cd6c3cf76bb05ffe273b82fc95606" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.435Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "2f173c1478e79d4d92fe856d4157a6515f327fa84b6793597978ec24b3d34aa2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.439Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "06aeda8693c0eaba941244a7af3251d0527d2cbe3b63bb86b66492dec1de9c73" } } }, "e3d2222cd9a6fac9acbf681cd3446dfd1fc4c2866524e4195206634d0d76acc6": { "7dd41862d4380d06fce8d5aee44728bdd5365a42f0ef1ef5d0a91b55cde5c29f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.784Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "0a9445819b71d8003219b862dc13d03825d681dcaf396818325a1152bf6b5fa2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.784Z" + "updatedAt": "2025-12-04T20:16:57.198Z", + "postProcessHash": "f1befe69ca2ce586078f07672678baa8935d1bf27dcd892dc5917dd8e6e68468" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.781Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "d530cb3a92456e006d9314813d3c4da19bd80f41b59081118c764ee577738ad0" } } }, "02291322e0d8f494b80f9d8e9c482282d8b548c6ae56afa37d022b164b99b054": { "14c2feb63b9f987820db166804e40ef404c44c5a695f647c2463bc6c7919d96e": { "jp": { - "updatedAt": "2025-12-02T22:57:53.465Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "1b9c2045d79decebd7e84621988d394bb18209f300616bdcced8f6a227175240" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.463Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "91d892d9f4045ec334acc06af9b01ea0a21531ac2d7c9ac9fc93d13b4a9b7c71" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.465Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "977ada528c20d1a91a73dd65a516e9e2de2669eeb12610a294c43c9c6155868d" } } }, "13dade465ba8d6e7014eb44c3923f9c834a481123903922ddf6e33bb4ee775db": { "d6e6aa07741897774555a1f0eac0954dd331322344f830c9f304dbdca6fc532c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.471Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "eae8724bc6492c203f7fe53589161bb84e977bf08a9726a85b849ef4a5d01cf8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.472Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "bc740155ccf1d06436def6a858d3e846ed55f577579b9fa8a168a1db830ef03e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.472Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "03f66c4a1053b288fe6ad458e6ea707f3c2ed7c7c100fa49d948651d70763adc" } } }, "1e6a9268be90fc10ba5ab851817ae61b0167c3ce6990f2a5d9ebdb1ee6eec11d": { "986717639b58978e5f1cc565ca5bcaef17e4babedbaaace23e272cc8c401372c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.471Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "f0fcb724b94055e56d02ab8f15f4c39fa0836c9f52e9f77300612ce5994a7d47" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.470Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "38a433ab93e32dfe963ce0635a9efcaf2cde7fe057c6be3b49c71f6a2285861e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.470Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "9e0e7e2a11edd86bd73829ec8abd39d69d080c6890d8cd2aa2a5ee0b1ddf2d25" } } }, "290372a9e8da36b9b0dbc38f3a77bf8307b785738d5ba00a31fddfd12681d63a": { "435164419830229ab742e3ae11858464c9c8878bcf4a2bb3d6166ec4642f545e": { "jp": { - "updatedAt": "2025-12-02T22:57:53.473Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "b5fb8d1d6ace877d02e1831cf1ac2cb7ddce0ce29ed2f983643bcd639e6bd3fe" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.473Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "e46630ff15a9f9566d2b490cbc0052d0f6351436a9e701123f09b579ebcb88df" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.470Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "7fcacc055f2cb07364b5d1c2675b812a446527a5459e9768e0c6954797bab0bd" } } }, "2aca9c20ab8bbeb98fd4fbb9f62e8ae777bccbfb6417cbddb786afea95af4499": { "866097183364ceafca0351ea2755c4d597ff856cbd6ea28946d96c0d30d28ff7": { "jp": { - "updatedAt": "2025-12-02T22:57:53.453Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "989e525057f778a0694f0cf12bd1ba67c83e7ee0538b8edd211e26f6c7c244fa" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.453Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "34447010903c8011ec00cf83a3e7568c4143543f655940d76d90eda9b97d75f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.452Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "33bd305c8d90c6ab8accc14a06792fb037c8bc7fa461eaf3ddb8d641a2263c57" } } }, "381da73f1de48015917a484d7c2e45bb2557d1a326b8ff4560cb55a72d1de6ce": { "58f15d2dfce6c37907b066f99ba2b6b1bad2cefdd56e52bb79e7839fed922624": { "jp": { - "updatedAt": "2025-12-02T22:57:53.470Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "796b9f3aaa34a8f6a731a0d9c198105bdde4e478999393ff3ece9b310724fa72" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.467Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "7a58759a71149c2e9a86b209f1880797fb5de0a6d4b347308f70d185b25b6fa3" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.463Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "aa917a6ed2b6f2f770a6cb59bb30f2f860c4aab75eaa3cb48b5ed1a711072f55" } } }, "40b25bc5f9906b1b6c1e3fb64539dfc6d270a427153142c668cd16a039ebcb00": { "957d995119871468184ae861bc8fb42689e205013b5a1a037710ce22110de58f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.468Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "5c48e115d13af8d4beb2204a59d8677b3bff2be59136fc087b1587585b6674e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.467Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "bbb1bc0d13b0a8b8336feb4d65e32bf17f738d337180d7827ed15717f7cec085" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.465Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "8b4876f5c660482c109726e836c28aff0b2b1afe67dc1e388f60784fb8e76859" } } }, "52853976e012785457c250daee6b0280f9e8e88fcbc6a4a02eaf7315f2758fc9": { "35936f5dd5b5ed9baf260d39b24862296fecf4c8c909f41e2a0999a8db0a3772": { "jp": { - "updatedAt": "2025-12-02T22:57:53.464Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "eea3af9e9f4f0c0ff213ed2168a050baf9d943d7090558c4842c8e8514c0b613" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.464Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "3910decc269bd763aee0e58e42339797ce7c71807f729845859bf5b7288cbfec" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.469Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "ef0b0cda94669d91706fd44f7ccab9e41122e147f5dadc5922e6f29f6b4af28d" } } }, "5a2a174332bfb9a0cdf7cfe65d8e91568153937327d15d632b2c09aba2aba728": { "e8ae2af14396db3064dca28b82c864d44d320c9ce456d8e334f9b158902bf4fe": { "jp": { - "updatedAt": "2025-12-02T22:57:53.463Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "62a4741cba839ad90336aab023c016dd099fb8d6361930cd4d4a865e6ee25d23" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.469Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "c4382049f37998315a9a77f41fc76511d28dba2adaf66df0df663b1c24c7cdca" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.467Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "8123f3855a4e9f6708c46e7a31581ca6c76c216976da6c0bb71e5e2eacaa89ca" } } }, "5f3d913c7a8c4ceda7fa835ce07f7441c4f601788cc68210c993d3dda60335e4": { "758768db465ee7223ab470e594b649587b038bfaa85fe340aea1e4aa3c4bd92a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.461Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "91fce01437c5d0399e391bd71bcfbd7122e0280d445bb2734ebbf1b385c13e74" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.461Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "597c2dcff65fb8e265b84d862f258b11624dd3704fd19edc800ea3bfbc1dc2d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.459Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "7255b4aa91f1ca505ef55113a8c5720bcb2be262e9038aeab1f553568e7d7167" } } }, "6312de56aa12f4e18450ee81ed026306d225a603f4e118425c63d475b267e36f": { "05a0d0bd2cfc6068766a3aeeefe69b1d78a4b4f120052aeaeddd00b384c6828c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.468Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "5e771ad5fb53a6560d0b44e8a8c721b603c809e7fc96545bcbba8cb79d4d8d02" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.464Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "d4afa9908fd5ea80fe1779cfaf9795a67dc432458dc362ec542bd84778e18666" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.466Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "fb83d58432cc0900411b4e136853b178eb2008c58c9f26b7532be07546418b24" } } }, "6439efcca906a994e35faf64afc92947e6ce60d7db71c07200375b94c1ec08a0": { "b590592b2b9abba8d294cbb837fba6f0bf9ec95a8c5f2d979542c7f80d2cae21": { "jp": { - "updatedAt": "2025-12-02T22:57:53.468Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "ac8181a258f48b83c3738d3ab0108d6fecdfad8b707490e025dce49b86ec2280" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.471Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "e1f199e8ed0c47463892eddaa9bc897420292ba94cba6da3f54e87a7162980d7" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.467Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "1fc67c6caab479bcf9d9eb5e80d1609540a7a3993db8cddd777350d2374a8b4f" } } }, "645c6b21a98f21e01e529f7978413fd1fd62c80916d3b27f0533877e73361460": { "9190dd15a568419dc8f69602836e2291f52c2c494b8a21b5d535f8100ce666fd": { "jp": { - "updatedAt": "2025-12-02T22:57:53.460Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "000ee1b52989989bec3ce9333d1115d8abc7b87dad08f976ed20e4974c58cbbc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.445Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "ae981a940a84fe8533c9822cd10b0ea42512ae9bbf278401aacd7b1443808a53" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.460Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "9a787fc54f01631465d69f7d5deb10f3e172fb8a35fed8996c2938cb19944870" } } }, "81e55d728a63e9d9620a0aa9a0f3152c86d8f4228a2480791e9cad5a8de39a05": { "0a7dd0ec6b5989e1b77f3754697c20347971441c557b816d890bf2b9648ca561": { "jp": { - "updatedAt": "2025-12-02T22:57:53.458Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "8033643e3d7fd14e6bf2f9fde05016f75df55da8da55ae75c80782aabad53775" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.444Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "78a73c41ac58b791b78bb7aa641114e81719f2d845ec2579b7e38a6e75331156" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.460Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "fe4c3778fc8f5117e94f336f4ec8e6a609a3163c6431e5c6e941de7f0ae99c2e" } } }, "99dad4c2046d97de9c9a10225dad41defe9ab46dd46ee1ebf18685fa87671a2e": { "06b367fa8b09d7fd9415ccb9f2fa0fb03df266adda026a80d2f81729bad14302": { "jp": { - "updatedAt": "2025-12-02T22:57:44.770Z" + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "4efaee3c2895f3144147a20511fab2b231bd58fb13be041ecfc61bc285ab2c30" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.441Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "f9f0e8be358dbde188f65c3650d2b3dce4282c677cb97af76c87999fa93331b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.452Z" + "updatedAt": "2025-12-04T20:16:57.214Z", + "postProcessHash": "d3437fee45fa0f3d05a4cb222f48a72b35c9691dd087cb65f097c70414b12115" } } }, "9d3e2980fe828b01727089a5b229444dc083a28f187a3ec09ad16a7eb1eb6d78": { "27aa4e4f10c34b32aa528db752d7176b33e61894bc9750f14367f23ebacba5e8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.451Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "96d17fef877404df6be39caabd0a12bd1f475055d06c0c50736e71422678501f" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.453Z" + "updatedAt": "2025-12-04T20:16:57.215Z", + "postProcessHash": "cb0f4ab7cc351f4eba587e45da704844243c1caae2b584c50424e12051972f40" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.452Z" + "updatedAt": "2025-12-04T20:16:57.213Z", + "postProcessHash": "11d9696b25361972371f2850d9f7807142d205ba818a4a2d9e44184bb1ef6d3c" } } }, "c491de2fc423ab10dbad66f7c1ced660f15c3f92d3220edeb2ccd84ee9575232": { "6fd80c5323889b79422bdbfe9fd8a32fb4bc56870affd9f018e096ec38fde0cd": { "jp": { - "updatedAt": "2025-12-02T22:57:53.472Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "e673ff8e48c6a00069377dc3b18c5873f1424dcef402c99c4d7c06618db7fac8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.474Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "5fb6b53499cfee7f625e98e404782fba10ec68baa2001239369adb2c805339cf" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.473Z" + "updatedAt": "2025-12-04T20:16:57.228Z", + "postProcessHash": "52bdc1744a33308963c277c44e4387a2e4d630960765a5594be1e3ae49125022" } } }, "cd73972a4d037347d81b6309d5ebdd4973e65b4708a5a1c61e961a7e349f0783": { "9206b8172e5adaad17f8c6eb0cded1360735c838b0a3363c600dce6cc6abbcef": { "jp": { - "updatedAt": "2025-12-02T22:57:53.458Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "231b918692e1d5db71f260e7cb27b5dde0342fee7b2e6b219b6beb1980893d08" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.444Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "dc4abfe8129f668194b56786d71970bcace56c5f6f564f33a71e5747156177fe" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.444Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "0d5afb5c9c1f0d813dc363b3bb3b0a6012df0ed762878d131ba7b202fe978df4" } } }, "cd764deae766f8c6f6cfe6b576e74bb1f602bfacbb3821340a5850510d57a818": { "b6693ed657d428f4853a8fcd97aaa704f7a982e5e86c5fb8e5ce372b12c11e69": { "jp": { - "updatedAt": "2025-12-02T22:57:53.474Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "232cc5b52ffae01c50a8d6fa29b654eb80ec3e2a107592684aec0ac4479ce7ad" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.474Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "c7e74f0c064864ba151efe4ec18afe94834ea6c7b301d4a102c210f43e79441a" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.474Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "6db3b0749adcb6166faf927e0f50730412600d2d81b9c99d07f8592d714b1f6a" } } }, "fd4807eb1e777b66ccc79335d7b822af7ba8bb6dcbbf18e3ae8c53f548f20928": { "455e4d7b70315644264125e3a1e3a329d14b945c29bd48454b379b5247f97bdd": { "jp": { - "updatedAt": "2025-12-02T22:57:53.466Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "5d850433c8150c58df8a0bffa285b0fe654ed668bb354194b4c9ad0a5bf2712e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.466Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "c50dcfbd225d31d4d35dafd7fb80fa49b4cf1096552a0d711f948fb54455b4bc" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.463Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "cc4fc729026fb36966fbee46e5ab858a282cc0cb5e4411b461faa3a81df69aff" } } }, "fdc92b085b658968ee987d63323feb976df8a0ac7995cde6f13318c84abd0c59": { "7843455825f9c1828f408c376329311aba7d3c1e14b345e73ef9ad5b93e5b005": { "jp": { - "updatedAt": "2025-12-02T22:57:53.469Z" + "updatedAt": "2025-12-04T20:16:57.227Z", + "postProcessHash": "bef558f38d37b85dd1ad162a9ba0801ce028318f5893d82f16b4fa4502b41997" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.448Z" + "updatedAt": "2025-12-04T20:16:57.226Z", + "postProcessHash": "0ee197a6f083f473108b471d7cf5b18fdcf8fda76a223924ca93c66a2c4ca177" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.447Z" + "updatedAt": "2025-12-04T20:16:57.225Z", + "postProcessHash": "be759f33e06f4393f58409ec5b7b5c2fdb5e760e37dee12c99083dabbc0793e5" } } }, "07722b6c1b943ed20bf3ff6d22b1b257d5a8695ae4b4553850f4bd4af3c5d2c7": { "2dcd7f352db514064395ba3b8d67b798124850b8ab716d08d01b773649c588b0": { "jp": { - "updatedAt": "2025-12-02T22:57:53.495Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "6424627ee5485a25a7e3e0a085e601f56baa9ee105994a7f910556eddd58f39e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.787Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "b032ea710313baa7a02a0cb3490cb1d59b4d9d13ae66625e913e16ad63c36fae" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.787Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "57f7a7d4d7dc6a29af8191afaecd149b3051b6e211e2558aff5e7a0fa4b9e224" } } }, "1777f3ba683ace75433dd34468539a9e9d67ef87d9f67a65737e86954611e774": { "3acf5735b7405bf65c0204cd16078ddc21713f4e46ed2d0238fb8871eb19b84c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.485Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "16ec1d348ae7be163da6a4634f2572410cc5fc5877cd0486ea665aff58bc092b" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.477Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "5658038603c3f67f7631ab654b672bf51bcdc6424ab231e7142bbfd3aee834a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.484Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "6bbaa3390f2a5ea479c355ed22bcf13c16b1680bb505bbf469c78876ca307c7d" } } }, "1d262ab176214afd2615461f6d7dcbc00bf893bd453c8cad2f9b625f5b34ed8e": { "2ba14b7281983a683a98e1fb919f7ee7317f7cf3b6fce775f1d12a76ea1e67e6": { "jp": { - "updatedAt": "2025-12-02T22:57:53.481Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "e7f30522af3fd2472599fbae5522503864ddc353b2227dadcb976c85469dbff7" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.488Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "a8366e24176f5e93922ad9b6c9e1cb44bd9d847f832e7d213f122c3b54c001a5" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.477Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "39ffbdf82dae7b9f7b32c501dd75559c55a3618a3e5ce35bce5de62b4e138b95" } } }, "34dd8a3ad912132054af7846a7af1d6c6f27c8de9f83f63c9354d5a326b6a82c": { "8e8980f8eff31a76117d3215f17a1cba9a0ee6234c2cce918781f484742ac397": { "jp": { - "updatedAt": "2025-12-02T22:57:53.484Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "f7c22e3452f4740e0630686505021bad6827b8d8e981986195840a341b567c93" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.486Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "61000294eb8a839ac95cc389da8aa3171092d87ffbbbc76f97f4b9a7372b9f78" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.493Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "360e939f63807e786665f6ecc0b26d3c6e709728b30d8f011f4752f0bbffc608" } } }, "3e5df6c1938919084ef3c24cc3aa0a9f834e4dc5475270adb64943fc1f2c818e": { "a27fbee07ebfb6548a8a222874fceb3def1e176c740f36e8bb3fa452c9d32b53": { "jp": { - "updatedAt": "2025-12-02T22:57:53.490Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "afc17f58ccbb5928670ccb7001086276470c8c3ac782832e6326a7adebf956e5" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.492Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "4ae1ba52733afd62e305311f0662b5f9863c76b8182170fe037779d9000d0d42" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.491Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "ad7855d0ae8ba12a5990bdf7bf14ba59e681fbeed460707349f1efb8f3fbbb7d" } } }, "44b3f5422fc4c4f447ece76e0f8066bb34f3affc30e7419ca74089bfa8055925": { "b2e193e55be108c5582fcb93204b7255923583e86eda8e23c2ec5a7fb530f958": { "jp": { - "updatedAt": "2025-12-02T22:57:53.480Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "c7e691a28edb674f2adaa1e365cce55f51cfdaaed040ddcc76ff0b5fd1eb057a" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.486Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "58f74551fe99693f2d2584ec891bb5946be8831cfa5f6fcb08709d6c56112241" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.481Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "10da1772efc4f97a3c93ae6dd1e74b262a69be8803cad9d676c411ca94f56916" } } }, "4e56f5a34b33c4d6df45c30f812200b60d80663863d11d65cf1450dcca806457": { "4705c821297fd380138640ab626f9d4f597a2b1853b0c84c3688cc33f5d4dd5e": { "jp": { - "updatedAt": "2025-12-02T22:57:53.462Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "1c35941a5748d64b79a38bcdd5ee336f45031ee985c27dd81719150ef23a6583" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.459Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "7f81800708f741e006a64e3f6eeef19d911b1e725146e96e97a14337b2cefd2d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.462Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "dda9429f06cd7134d8d759fb295897ec5c37329b3710eb73134d63ace122732c" } } }, "80d3d6543dd83a7957930d9045025b4815c1307c41a15c290f7acf0eae734cda": { "41c8219de2e81a989c9aa442e0f7b45929280d651e3c0f00f28c5e946e5b9487": { "jp": { - "updatedAt": "2025-12-02T22:57:53.483Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "ddf735b955d8d39f8941e68fc16267eeb80f2acfb46f85e5460d2786d2ca3577" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.456Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "2fffbd146cfe0b38a1ccf73814539b3a0fced9d346b461cd1d19484406937020" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.486Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "6773a0b784363c8b4dc2de40f7601a607bcf651a0574dc0634fe83151ea7712d" } } }, "92105bab40be708ce10315bc6e1bb96fe7701142a5cccef12ac85e9bd6c2ad0a": { "f2e5adfccb04fbdb23894f4f9466bac4d65302edaa3ab747b455eca21fec899a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.492Z" + "updatedAt": "2025-12-04T20:16:57.250Z", + "postProcessHash": "8ffb152b361ee8708d500d67c309729ba9586abf98f09ead9be9686df3b7bddc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.476Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "7ded3088147f57b29350aed9ee402515ac05116c3849018ce2170eb9e190a51c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.489Z" + "updatedAt": "2025-12-04T20:16:57.250Z", + "postProcessHash": "04b59a155ff0ad872389970258b78e46f8a951d26536e0e102a008aeaba2172c" } } }, "98c24f1533f177815a78f76de8765482cd98558271c182e9ea70175821ff82db": { "59cffa3acd22af2478ea31099f73412223d91eb1301172207a61ac51e8cba72d": { "jp": { - "updatedAt": "2025-12-02T22:57:53.456Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "e2fe867f931a09f40fa8b95c2ccd54bd4aa14a93bd9ed3f85ceeb0a19e77d0f8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.477Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "3db95998a0178d92b1dcc743da63e048a86682ec5a0090f4ec5bdc47dcdbae4d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.455Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "ac7269cd92033aab6942857f08992b23567f65f5f6d7e42d2d630fd54645929c" } } }, "99393522afef2d07d814a10cdd78d55ffbbf63cbc84caf67a25cbbb6702d7b29": { "df2e38e726ad5701168a107a3233f7e582b27aaddc17196034ab06c247a2cbb1": { "zh": { - "updatedAt": "2025-12-02T22:57:53.490Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "583fce10592418ca22bb016c6a0a88b97a849fd001dcce9b51bc568ff38caa1c" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.479Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "b063f92d009833c9db1f5d43368d42d80f8250096f0bcd9659500b5fa1553545" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.492Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "eda49adff09d99bc79202c10653856e7511b5c535ad5ccc2e27e87d5dc7ecb5c" } } }, "9c36f42318908cee7394ac7bdffe1f0b4dc78d18dafbeff49083e3a25b9a0c0d": { "e03b65e2958329c1310e8961f72be96a59122375e8ea0f5d7d4a488588e62cf4": { "jp": { - "updatedAt": "2025-12-02T22:57:53.487Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "9c96c70490e687d2aa5fcf28f084776204787208f402de3584443af31e149626" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.786Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "453b9e628bea20f52063d6f3edc12b7c8d6db9e8b4a722dc7f42cfc42b52bde8" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.491Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "6a5b767aa81f1561ff5d2550d1ef3f7e14f7be546a96918df580a3edb0a90ba1" } } }, "a017ed6c9e204f49a926704a46e850a434a064d54ab74c5196dcbbbbf095a5f5": { "a2adde35cfc427e42fa09ac65d97536a831e1059c7400f85820e2263a1b87c36": { "jp": { - "updatedAt": "2025-12-02T22:57:53.455Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "2ea24afc4c6643c7b04b3bb5df169e3886146c6d8e215bd3357b0910e749a72c" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.479Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "dc74999e4a714b2dbf62512ecc22160e7a0ad7a0cd984b5a130b9bca76fc296d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.490Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "afb705de65ffafe1724be435528c1d0a5eb478bf7b9c2e0e5701271eabf88ce7" } } }, "a02c9804673e90df5f360bc1d48dc4d9b7a2120b7b838de45da8f0bd5dcc7bfb": { "6dba5895ccf72ae7b5a8b301d42e25be067755be6a3b1a5bcb26acdc5cb58138": { "jp": { - "updatedAt": "2025-12-02T22:57:53.457Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "ec25d832ca1df4de8324d24a82fda16015db33878447126427fbae622340cd40" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.457Z" + "updatedAt": "2025-12-04T20:16:57.209Z", + "postProcessHash": "0345664667b4b741d47e5d8bbd8a961da2fff2cf731ab77ca9d3a6fc39b9268d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.457Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "5f181b39879f69ca2f57055c45deb36bfb81b89cb58c7e1523e12010f9012d8a" } } }, "ae924afae0c519cbcd8b9677316f11c74c707cb0d60447e3f16db759e6be95d7": { "10c1fb6d0471791e0662f2e4888a314601486dae17ed953b398d3ded8b18d82c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.788Z" + "updatedAt": "2025-12-04T20:16:57.250Z", + "postProcessHash": "31a2bee6969c8c9f5b8a43070e80f46d173c31f565fce71937c4f6c941625b9d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.494Z" + "updatedAt": "2025-12-04T20:16:57.223Z", + "postProcessHash": "37d196e5119c8d380fc6844594dbd69586c08d20a5f26bc41ca8a4421dedee54" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.493Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "f689c37ad31b4fd10d1ced2d5cff6b59f363be56013d323f7fd7b82da936f16e" } } }, "bdf6a99d4e97b12fb653dbfa5271fb1f3f759447def3b014fa132fc4a51905e8": { "70ae38ea604bbab68a53eb544cbd0f2cdbeea7e09ac7cd839c84eef1978dec29": { "jp": { - "updatedAt": "2025-12-02T22:57:53.494Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "8f7d621523b898fb6809b9798efd0e575cd6007e608e357213b4404b43dede81" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.488Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "71c24d13a0467ee125bbabb9471ab9c8a5e94d3dd9ec4f2cf83a2819808bbdc1" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.495Z" + "updatedAt": "2025-12-04T20:16:57.224Z", + "postProcessHash": "2f489f01dbf43aa947d0e7dc1f24c95541dff3beb80a229ec234b069e7d4ef53" } } }, @@ -2893,1097 +3514,1363 @@ "zh": { "updatedAt": "2025-12-02T22:57:53.459Z" } + }, + "c658b35c333a9b84fce26b6d52d08ee2316bee590e65f60012410d5d45a42663": { + "jp": { + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "f96a6a01956b16a4ede2343d360e0052433b512babf20358f98d274c70cce8c5" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.250Z", + "postProcessHash": "30e391222a4793b0c194336bb1566115986b340051d955cb376833d76b706fb5" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.250Z", + "postProcessHash": "fd3f4445c0baecea01ff164acd46bd59d67e10c1f990f083ea1f68ac3e916ac7" + } } }, "e0a0f58749dbc99214f8d73d23e3e103bb972d6cb973f80440fb3b9b4b81c305": { "0f27725ca1d515bacca9b8aa1e23bb35c69b895c1d9563452e075aee634e4939": { "jp": { - "updatedAt": "2025-12-02T22:57:53.479Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "7091684c1020bc4f3285af7993469c520f68c1c5bb01014a41fb714610be8530" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.488Z" + "updatedAt": "2025-12-04T20:16:57.222Z", + "postProcessHash": "1dfd050fc86164290fbf1dfb794ff2dfb75be6c0992a5d0077ccd86dce4d3f99" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.485Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "dee208244f32bcc71c0912d9c3e459a917f4154e5fa861089731837fe901ec50" } } }, "e1027f068c086d68bcd19c94e1c760c747883dda4912d769a49634e99a298bf2": { "327dfaab66de7353575183e0fe7d40b596436f7254ab77cbe35383223ad4ff3a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.482Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "5a3b24873410fa96553328b72e9eeaad61bd7e71c7fd1178570e0ab3ffd3ab80" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.487Z" + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "9bbc8b35677cdd0313da0538e2210c50dd162896b821bdd59fa1bacf0ddba348" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.476Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "529518f34ca28cad7f5ee7e61eabfed1fd8fff351361ed4a381743f9241f8210" } } }, "ea52d1bf57d6eca260482d6e9db0b3e4ba187ca46f787a3ec41ccbabccdafc29": { "7792c45b9f12363c758a86312cea564fda8789130772fc2a238a348aa77232bb": { "jp": { - "updatedAt": "2025-12-02T22:57:53.443Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "67cb1bf38bf4f68a98a027ec428c9f5cb650b88e09886da54a10b52d8b691caa" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.443Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "511a63c44b7f0d3cd3751db19efc8ff1198754ee03149dd785a74d144f384e4a" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.458Z" + "updatedAt": "2025-12-04T20:16:57.210Z", + "postProcessHash": "c5fdceb61a73106218b9fafae029ad625690e7063b490206cb8da29c53877bdd" } } }, "f2dd481c53ba67e19f970889ce242cd474c9da5ed1571b9d4f5878551ed45889": { "70876690558307749f06728cb6ac14fce7075dc54a6e8cf0695beae2917c50cb": { "jp": { - "updatedAt": "2025-12-02T22:57:53.480Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "45adb49ec2feaffee808af764e78732fd85d0f6b6d4cfc13bbe73903cc7d5f32" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.455Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "0929fa32d4338a72d75cc10aa9ec92ed4d024f858b1038709f7e54cffd860e4c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.485Z" + "updatedAt": "2025-12-04T20:16:57.221Z", + "postProcessHash": "b705b6bde5ae196e277481cd5a1d093a71367bf37706aded2aeca80823b6ae9b" } } }, "f4deb9d37929966d7a5d1040cf9c1132679840850e80dd91a4e56e059568b817": { "e1dc787a6d679d3f64b0e02ce3b61001ea2f9d7a3aab736ee5ae17d2bc4a4c63": { "jp": { - "updatedAt": "2025-12-02T22:57:53.481Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "fc060eecead4677c35c0262dc3e6ebee823fcef982dd5c3b71b12a0cc3ab26d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.480Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "29524baa856f194fff0a4eb876abb055c440bb40372470633b15a1d08f23f709" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.477Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "ac9391d176c936dc61a50050b4f540970dee2bcc6df2ec0a9128a1429251db8a" } } }, "046cf4465fa1fb7186678766ac47cbd78704f28064400523e5b59a245d53c970": { "b13281a5fbb00f880845872b5516d8f9e9039198c4bf031942d0ceec737def68": { "jp": { - "updatedAt": "2025-12-02T22:57:44.800Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "d048601ebc083f130a666e195fc2e15bef8b4208d033c7e3ba2f579774dfcce2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.803Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "355168d1b10fa59e286971b001d0f8c84494736354652c15211efdd4f1aa0672" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.797Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "bcabb8c382ea4d19d3b7323129a2446dfee3a100ce1b1e85272e7e846a9a396c" } } }, "0cdb3f54f81ff077472e66fb0a57247ee5bf3d2a93abeb493538e948840b724c": { "2beff12ea84429b1b15d3cd9ba939104aa74a91c9770800974ecc16582d6d010": { "jp": { - "updatedAt": "2025-12-02T22:57:44.799Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "f6f1e41c2670ae8d50cdea7d15d969a3daac0f0bfa8426b1d685b0c73e220470" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.798Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "196d86037310e9c6ec330f5a320ad193ac920fcbce51dee60b65c98097a77777" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.802Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "93d98f792a702a2d0dd159956e3b86c06f6a772a66caefd147b37d9fd90d95c2" } } }, "1ac7bdd9222c1e2ffa17fc544f1241b28da0cad7f185b619d29ac27e0aa8c077": { "3f8afe531fdd885aba914642b81b85afea758c6f849a7250bfeebc09887cc894": { "jp": { - "updatedAt": "2025-12-02T22:57:53.484Z" + "updatedAt": "2025-12-04T20:16:57.230Z", + "postProcessHash": "93b7801f2b13a5e9fd205722df4d47dc46240039a5f6a00c321dcf1b2a09c0ab" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.489Z" + "updatedAt": "2025-12-04T20:16:57.230Z", + "postProcessHash": "67ba2ce9bc3818dc25aa39804e9bfb92d1d2f8f981a0b27d74bb557d4faab11f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.483Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "c9398f904d21f07a3cf924ea12a56a1f09004c6be82bfbe7a69364be959db5df" } } }, "2a7b92dadf95743e702b18a30f74eb67e67fef5ea4124220e608f258c6950c9e": { "c66b9e2d0f4d5e382ea43aee7020fd1c7ff170725159ddc185d674bc64b0d24b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.794Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "afc108898171185e56cc785a491d1b7fb5963544aff304a7c9234298446d28b7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.795Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "14ba3e5e781aac133077f908c553b87ffebe4ed5fcbd38ffb86aff0533132881" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.795Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "33fd046aa0df0dad145c1f0b7ed950393fe0947d1a092df8f639bf15048029a3" } } }, "2f0873b2704cad58fd2172ec86c842a8262cb2a7c1e6cfbf1e9851fa843f4357": { "d4282945578d91a5ae49277f6ca146ca130e3b3df3c0341a5de3414625c2c903": { "jp": { - "updatedAt": "2025-12-02T22:57:44.806Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "f57539ecf926d81785496f6d8ca44c294b3245e06d7ed8769f8df77b64e40bc1" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.801Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "ba70038e8c9b42cb918cd17ba65c187adc7f6582d0696ce412b0e41e616a8e83" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.800Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "f162187a1b711095ce7647c7eff0aa4de0d2add53b0aef67e6a2b2ee24ff8d94" } } }, "583a274e308afe89671f40f27b3be757e2c9e98eeb513765c802185f5ec14e29": { "17f1e539b1b6e7759a4aa628833db4667a7e74444abb42880111b4568a28ffe6": { "jp": { - "updatedAt": "2025-12-02T22:57:53.475Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "f1d4084294e021000c02574dfe7b76a8d0d86cf77c794678450c270db6f9d3d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.475Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "22f383300e6fc8457e445bccf094a34a8d19b5d70a66032c2fa808b3407ef1b9" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.795Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "73965d355e4b340404b97ff3e107e6b5bfb90ef69ffb72f1d7e51532c54c115e" } } }, "60a5d6b5624fc995f6807d15e66c5a5b6bc86dc212e1745ef8bef3f5dc15c3df": { "c3d809b05c72707e6bb1947b6db7f23f641f83155cd0f83a5e6deedee8d07adc": { "jp": { - "updatedAt": "2025-12-02T22:57:44.803Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "e88f7f99394e8a814600d55fab7b7cb28f91cc3288b88e611d9a79fdfe947ff5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.799Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "20bdcc381d83b27f0cd36eeeb114197041262306f5582ac43b80c16bb3041d53" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.798Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "1665fd8e8be66e5286e7e3730b43cd1e965b44b65b2abb2b8df647143a1df4c1" } } }, "65c3d5357d49f1617e6e959d8e29071102eaf8d0d9e4d1c2fb7cad70b6173a35": { "4cc1991c7b87b22d25ccb176e3b79b021cdde65ce0a2f2e4414efe519cc65f89": { "jp": { - "updatedAt": "2025-12-02T22:57:53.482Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "33adf94e8cc69d92d9719ff335e01947080998abec1114506be8adc1821c0e24" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.478Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "75618e441b5cba42230dc889bb71d42822df9ce1c732c6588fc63bfd987f4a84" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.456Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "bae812e33f6ea4027c2253022a50d998bd59bebb0bc5c9fe5509e14ed331c864" } } }, "6e5e66ee5bbbba58fcfeffbe0603dfd55d38dd278fbff14d70aa5595ee971bd7": { "c4a33214adceb28be9e704611bd58cf7f2b17ce705ec29ba0ffd552d82a3d73f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.797Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "7934894812ab8dd37af9fda996912f128d15b5ef4974832153a9e5734e8a723e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.796Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "aa6c58538c24ad5230c56e2539ca3b47a4c02753be3c3df8bcb0252b301c7388" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.794Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "ae5911f0723bcd9c07a8be53f50176547b8064aec1e21158a03b40fd1918cf7d" } } }, "823e98e23e42535697ba133dc5c2d8b57c41a87124d772a59a7bbf545ac0dd84": { "d6ac975393106fe00f3edd51a065ab64a5e99a9aad622632a08705ff68ad4b36": { "ru": { - "updatedAt": "2025-12-02T22:57:44.808Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "677687d65299252d2326a45ac7ee190bac323ce7c21940c3507cd5643e7b88de" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.809Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "956439ca0f5a1b664ca5a25e09dd1c7d8c9f10494fbc1c38a83fef1c6f827e3e" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.811Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "2e2f471bc233a5126e1c297b055bca3439de25a3282808f31900448b1b566a7e" } } }, "907c6e7bab21d938402f694e7407939915297c82eafd1730100c830df4918778": { "c3a2fac6bf16acdaf76f1ef65dc3317e37696c35df4e8526e1bb887fa5cfdeb2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.797Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "aa78ebda4b74f413aea7558aa44924094f6130ccf074966e72a9fbef35639e67" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.796Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "d686717d86bd8df1da7b8e8a51015db018b7f39af96a32de178ffd0fd48b91aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.799Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "444956a8bc2aaf6d366dd649c4aae26166db62a77e0fd05ddb1be3e80d6828ce" } } }, "9840f3c84ffc8a8f881de9eca8454a7e8de6c5bd5c30b6a27784816805453183": { "491cb45d3cfae94c2b0cdeaaaf82b4ad9d2198ed681060717d4f79378fc92714": { "jp": { - "updatedAt": "2025-12-02T22:57:44.803Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "94db1353c16cb1af7e481e37d53606c66deadf6ce21d885f493ecf19416cd601" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.804Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "e8d8735c23e9f38886189fe5e7406e5127e9ec027759c969ab66864c7dc124b1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.794Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "7b3a5a9e929fcafc336f7ae5f7a409b5cb2917b782d57c99fada20da41d666e2" } } }, "acee1d54d44425817e527bc2a3215f623d6ebd68804cdb7f18541efb76fb830f": { "53b8019634b145bda892aa14cca4d68066dd9ed1223e68c1450a60c3d6af3368": { "jp": { - "updatedAt": "2025-12-02T22:57:44.795Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "72972c6ecf003db1d4784ae9ef4696cc86677e0ea8dc40467b07246f6bdf6d39" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.806Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "a0a20793419c07b3555fbef9c0561eb1132fb3868f1f894452027d54b65f1491" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.802Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "85ac1bb864fd45919f48fdf26181cc62e85e7aaec4d55a3dbcecde7bd0aeb101" } } }, "b66cad86246e7e672bea77de5487ab3238a0cbd0d829ebb54fd0e68f3cbcee09": { "9cf089c5df430ee74bddf608da84394fafc963e1bd03cd0e94fe2c6c179ecce7": { "jp": { - "updatedAt": "2025-12-02T22:57:44.796Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "19d2a099f9a5950aa401e46fbd7e7e967ebce0c3b42817bfe21ea033cd251490" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.798Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "36328d3e8561914a8d70fc1b9b899f3a15866373c51ea3ad15ee8c9519f0cd1a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.804Z" + "updatedAt": "2025-12-04T20:16:57.238Z", + "postProcessHash": "1a4de4bea2534121914e1c65cd6f6f2902f347742d4fa521459c060565945cc4" } } }, "bc72b7a9222edd97db763cb5ebbf3a678bd9d11ef3bc3a2c28fd6797dd845434": { "ab1bcc3128e7fca61edfa8cb48cc7969450a097b52da47b30b191820f3c2d949": { "jp": { - "updatedAt": "2025-12-02T22:57:44.808Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "fd5c48a4fb1e8f99f54eee38c2dadf9a04b318d0c7d32a82475d96768fe293ab" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.807Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "4f3cd4c563071cdbeb11b6ef1ad5aa419abc9b80cabd41d282db0f132964dedf" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.791Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "92c098cc673075b9d834057671192b12ec3a8c29fffe60cb46b37d1e98e168d9" } } }, "cbf8d771d3e60af84970fcb0a9a3b216e9fa9d6604d8b59222876988a0f9a23c": { "05073dfddb68903600de4505e9ef4203c4b4f5979a1ad1001059a7e6a6c36293": { "jp": { - "updatedAt": "2025-12-02T22:57:44.805Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "ffe5981ebc4444742f75e8412d4025ab7b6f6853d036f1f165a538af6def4921" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.794Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "27d157dc9febd4e16ba59231ddfd3fb2c108f6d1e9a808cefb7455ae687f2cb5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.800Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "33fa33056cc431f63461f682fe3fdeac57034d57df18236716afd96e19f2c6e1" } } }, "d259b209c3435b62d81021240a05134c0eea6d95e4ac914c175f7122e5bcdbb9": { "2336e34b998efec4cc83f0643bbd8fc97a4fb0afa00c3343a22139925e777a12": { "jp": { - "updatedAt": "2025-12-02T22:57:53.475Z" + "updatedAt": "2025-12-04T20:16:57.217Z", + "postProcessHash": "4065bd0bbdfc75bdfb96c6dec7f78c1e4e79feb9f1a6fa8d43890e6e35effd23" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.811Z" + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "1fe3a00133f5cbf685126b236afff76b470c83319856c44dabd2b6437731a737" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.810Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "abbefbeeadc704e0b94af64d2dd86fc69890fd2410c3b48d9960bc50c88519c8" } } }, "e01a6937e1ad5b11af43515d1a1e66db9e6168b75b5528ca1b7b9d7f3c195868": { "2c6fc2afd47aebe8d703b5147ab0245326aebcd6663db747fdeae29badcd7caa": { "jp": { - "updatedAt": "2025-12-02T22:57:44.810Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "fd6e046a02f99277a22ac1cc7ba3954c8ce3ca711c98c848aa32b8c07ddf65a2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.810Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "4af45f28d313ac11ecb81fa239202733771cbba4bb4d394387174630084eab47" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.808Z" + "updatedAt": "2025-12-04T20:16:57.248Z", + "postProcessHash": "c0516279736a75deb1a07f71db6ebb1318c4c1b261cb537d25551fd2e1cc09f8" } } }, "eac642db564baa4ce1f75ca03dc5a23f44db2e588ad4390c7c3cb746e81f695a": { "4bcedeede08560e01db76c1f8f3c871bd8e8aebd281853aeef86bbc3841fd68e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.799Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "1b2f8d66e7ae16772e969f1037ff20fb84172e15604299d73707346fc0a48560" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.801Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "0c9ab96dae93b6505e17cd7c22f6750b075c72d405547b841789eaee04edb086" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.796Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "99f9557e87f09bd2946feefefc7f5bad26452348453cfb0b9cc2af9f7d495f36" } } }, "f5ec5f1c0bd0776f9a2a7bff868a443e7cbb090161004e7da40277d9999a5e0f": { "1d3bbb34461ec824f8f745ff89fbbe7930bf3ca75ffcf25386fa8e097845e074": { "jp": { - "updatedAt": "2025-12-02T22:57:44.793Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "276a47a8cea0773d7ab1b7faf852327042c190cd29494d2b5f11815c2abb2eca" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.801Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "4af16cef9597591b53ec296dc5ff9c0c72d9356aa144fbf32f1501d21e27e890" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.802Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "8a01efac34375649fb0f6a70803cc5e4fead16bce8971298a9aa4015c15564ac" } } }, "faf7c1208ac6cebd805f74b867ef0352238bb675f4e78c25744706e43a0bbf35": { "067bee4f308eb8fb0ee42187bb88647c1df461930299cda269dae6be1e92e2b2": { "jp": { - "updatedAt": "2025-12-02T22:57:53.478Z" + "updatedAt": "2025-12-04T20:16:57.218Z", + "postProcessHash": "f938803cbe858f13e1e3c804babe5a470022286a8ba78fb3df063111308f3ff1" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.479Z" + "updatedAt": "2025-12-04T20:16:57.219Z", + "postProcessHash": "523f310d175f1ab9fa687562729f8d30eb2e3cb0a282bcf4ada6c0f97c6ac6e0" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.482Z" + "updatedAt": "2025-12-04T20:16:57.220Z", + "postProcessHash": "3c06dee87ac93a95d760285d044f458ab0f18382e3a7720ae8013912c7d07198" } } }, "010f8d66bb60666d0d358628411eeb2b1df5cd86161855532ace6b686750bb2f": { "0feb62388a935eebc64bf1d2d0c742a3b9d17f4ae18ff4e0ed9f4fe6e68ce776": { "jp": { - "updatedAt": "2025-12-02T22:57:44.791Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "d50e1f7ca87935d7223cb7ea930e78e3653488d8d740d443efcb180688dfa89c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.789Z" + "updatedAt": "2025-12-04T20:16:57.231Z", + "postProcessHash": "c43f725d9f7e869b97d6a43557c9a2cc6de8a62347f1cf93669a0e6a1ff06ce1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.790Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "00048b446415a086b97e1d6dd8b898b89fbec371f044d79b5b67b4aa0b2d2616" } } }, "050352a11ca817f5bab4911101cd95e7ae7dc5d8954cd551028380c728411a57": { "6cc2916b976989ba2663dd50f541fbe9751c56f179ac300fc3381ca3479e452b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.959Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "c88638e893d5ad1bc29c4d49f4de3de01f1a63223782c910ebf58fbf54af178c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.805Z" + "updatedAt": "2025-12-04T20:16:57.238Z", + "postProcessHash": "44dececa7198dc3d0911bca5daec67ae7b2590570f6d71e38529f73172fbac31" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.803Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "bdee9f435b9ec3bef606a664dbf0f5e8a3a858394fb4348d0c807606a04c8271" } } }, "09a42960aa106a95a5cbe49be7b12f9120aefe3ef067ddb1c1b026342239f3be": { "eb1dc019fb90478f30509956caa9e4f342a6e2b031332733edb6a6b927bc71e8": { "jp": { - "updatedAt": "2025-12-02T22:57:44.820Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "5e9c0943fd0fbd8c76d30a8bc627ac2c230a612d30c370759f6231aeae30d4ec" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.819Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "fadfce066fd576a2db9608e65c1dabb71cec5cfd7ed9792107e42b51073c16b6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.820Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "0a8c2b6c9429fee97a046b8833986f5a2022e41da004f3d892cbf07b0805d1e5" } } }, "12be1e0f0e04bb9eee1f814b983cb24150e4b9b4f2e86f8c6cf33f7dd28edf16": { "25966e125c5b0d0e09bfbe0bb6c4eced38f8afae86050666be245c00bb7f240c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.793Z" + "updatedAt": "2025-12-04T20:16:57.231Z", + "postProcessHash": "d4f07af17b92e59af2540d7be1ef3b690266bf147dd26548c2249dfea421fee9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.793Z" + "updatedAt": "2025-12-04T20:16:57.230Z", + "postProcessHash": "2402447a6648dfc155775d180bfa13a2d7815d7128e10343566ed1224184cb1c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.812Z" + "updatedAt": "2025-12-04T20:16:57.231Z", + "postProcessHash": "e880bc07ae90ecdb6ca8de4cd21fb76de5261feaef2fc32daad0cc3fcd27033c" } } }, "130f0cbb1d6c82f8ae457bc5d3dfde8dafaeebcec17cebf6c1ec40eb99cd1392": { "4b5db766a70f9027101f584180002e5dd6f63ed99aa3d036eafd61435ddb4812": { "jp": { - "updatedAt": "2025-12-02T22:57:12.973Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "ed3b539371fdd4a64001a5062440ea039a5dff748eb1e0abd4dc065b71dd67aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.970Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "1596457814a5ef0ea5aae0856d5915254d42eb2411a596d0bbd279c1f98e73cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.792Z" + "updatedAt": "2025-12-04T20:16:57.230Z", + "postProcessHash": "901054cbf8e0cb89290be6a18d2ea782c3f86718dd507f99d5de23a7ba41c69b" } } }, "30c2729724c6bee568ae40c9d3da452323fc101c8c757841c99647d3bf63f057": { "4eb3058a8a2fa3f5f9687fb24c64b384670b5101c5582da6a348ce515411116b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.975Z" + "updatedAt": "2025-12-04T20:16:57.264Z", + "postProcessHash": "13b9c4f9faccdd980af2a4f1a67105ac6afa9127ed8b12301c472a9618495dbf" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.975Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "99a3b4d44b328f0c47671b75a7e756ab98e8779e71c0ee14023089677dab295a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.975Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "eee4034255d872ab4b5002e4d502d14a43f882b4a0c39aa9c5152d94ba603887" } } }, "377591bbd1fd99159d49074a1492a22294d47fb29e338af9ccb456a44f4a181c": { "79d09c5dbf435fb10ca29153a98d5b635aee732c2aa61594fcc2f065989ce327": { "jp": { - "updatedAt": "2025-12-02T22:57:12.970Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "b226493ff8d6f51f9e0d63f6e0dc625a9c446190401b56b83d6565987693e975" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.965Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "bb260852dcec4e537fa1808e3b89d61dbfbc1435aea44bd9e05081437a4611ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.791Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "c1e4dd82c4214a1037e81e2702ecd87af9e6cf33c836b02873ccddaf88f78ff3" } } }, "40a262fc5e1c5d47aaac955e5d56710c58b184834fced0236175665ec187d93f": { "d9751428d997f059562f26e9bd7ac68c276f0bbf0c513551408f0513601e3d16": { "jp": { - "updatedAt": "2025-12-02T22:57:12.964Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "335674181cc56907dfbbd944fa2b0bbd9bf3e6944669c333fb7f96195b45c96a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.968Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "a77cc700167153235d1ea2b6cc99feb1ae7daf924ae602aa1fdaa581b7089186" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.963Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "1ad24121fb591ef287a7c856a6d30ae02d05bfd9242ae37980ce61770f7fd1b1" } } }, "46dbee6938843b18fe050245cf7379885770dc6f9f8ed8013ccf5222d1c306d9": { "1c26addde8215daf58446cd375e5e150c2d5ceeefaa8b2acfdb9c9c8afb9953d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.818Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "323f08c92f947e72571a009a9233f56518863ba11c6cd5d9dd898b9cd23be5e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.821Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "86592071e80f8b766ae09c676ab51c3265fb0919fbdf379581fce9e2591edf3d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.821Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "ac2e40f9260fc567245862c2594cbfe54d95f16f442b558d58f28d9a69378e03" } } }, "4c1ad3942b4184430a7d30de866388373d48c1a27718ee8712e351668b5b2c7b": { "7f0ff3de1f2f3ef36f7c5bcbadc179455a3ae55c4a9e388b8148b18a4dfe6b7b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.805Z" + "updatedAt": "2025-12-04T20:16:57.246Z", + "postProcessHash": "568d913b347d3ded050107be12417cd4f0962e2e542e644ae87dee951209f03c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.807Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "fe07f4fc961f04f82e80f5add48b5fafa9d01fc3ed87117ad8c247a9ac9a8f15" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.807Z" + "updatedAt": "2025-12-04T20:16:57.247Z", + "postProcessHash": "742103106ac76367d32835e0eca4b38af12823dcf7906ec04c897fd65b026d2f" } } }, "8d0001685270931055d17a8eb50155f983dcec73c892d71e3bffe9004c1cacd4": { "c26606f99e8098f4ed8f1e29ccce89dec0e9cca96fa2536b375d31a3b9fb8036": { "jp": { - "updatedAt": "2025-12-02T22:57:12.969Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "b44273e11f67943672ce36a3b04b3c51e3b9f0a1dc88cd8979c79d7c55cf3ee6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.821Z" + "updatedAt": "2025-12-04T20:16:57.238Z", + "postProcessHash": "aef07e8400202fd1faae12221f0b80f927ed5d397e006cbeb42f9f59bf18d5e6" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.970Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "54d10413eb6dd938c65b2855428b63c5b51fbe331f4f3ded2a1f4bd73a5d2fcb" } } }, "ac35f8f55935d4ecd0e3d7d6c02b398d04b18c830570d65f6551b9b4ff45bb74": { "09c8a0f7de8fedbc086a20b8603b6ad2439fbb800e29c34ecc840908cfa41148": { "jp": { - "updatedAt": "2025-12-02T22:57:44.816Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "90f2c4922a7f86a0fd024dbe4bb3dbb6482f25b7f08f94137e4b69dae26e131d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.818Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "53079436557cce2b97757a30e5615dabf3a0aed3dff0ef70f5e4229ad32f8c8f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.822Z" + "updatedAt": "2025-12-04T20:16:57.246Z", + "postProcessHash": "4f0559149c4057d0830afc58da4ee78644deb51f97a1afe4423ee45c04971872" } } }, "b949b99783c59002d6d1611f53562639a71143cfb90e027a848ef13b70877e4d": { "65ed1ef87fa32188d6b83d9345586ca7be9701ab437946eec188e8d638e56014": { "jp": { - "updatedAt": "2025-12-02T22:57:12.973Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "a7341a4650c58ecd83f1069f4174e31d1f0c5580c7e082fa99433ae4aaeb4214" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.974Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "f7bfc9e343fea89d15c10f57ef4dc22a8e795aa1c05a7486a4e5ccce29556765" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.971Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "9a8671217a31fc1e826c9a84d693264321610b6e84e532a28d60821d296b2c27" } } }, "cba0abc4ab65e9d030139163304a053ef5b1fe651a26215e77c9e551fe3b8191": { "62328876676efd5312772f4062f7342ab3fbcced0fec39177d7de554d93c9005": { "jp": { - "updatedAt": "2025-12-02T22:57:44.815Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "f46a0be96af319e5c6ba45db98bb06c87c070f23f00ca363341afe0d287ea871" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.816Z" + "updatedAt": "2025-12-04T20:16:57.234Z", + "postProcessHash": "65d62d010947a43a854c0a51814366547f7d9955f8fc0cb1366f39b894f6fc6d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.817Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "e89140cc0ad6b29daf0caa2fa47312aa32e8115460c78513f1b9727d4203bbff" } } }, "cbf50a3e7f149ed504ecfb70077f07ab9e2fed847c539a9e27d5aa88c015e5f3": { "2db80f4884390b5613f02ed18bdd9194f0f0ca4c9123aaf5b1a68e1c52e263f2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.811Z" + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "b309fe63dd499dbc2b5e53a02528e10c06f80b04acacdf0b277c2298f9f02dec" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.812Z" + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "9e5386ba1b2472082d953cd9366a0820a30c6a63d5de227e78f69c86a4b47a8f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.812Z" + "updatedAt": "2025-12-04T20:16:57.249Z", + "postProcessHash": "c7df439546742fb72650a2a46343be4fcc234a9bebcfeb5ba54cd24b3c09531b" } } }, "cc4204c3e95911221d74a7265dd2e67515e9b01a1b9394863f065398c955594d": { "9538d72bcd29de25ee9a900cfa42747f8ab0f5767963a08a3028ab7f3b189a13": { "jp": { - "updatedAt": "2025-12-02T22:57:12.966Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "5f1c665b37c3e50ff775ee713881a4117d9997c9150f8bea4196acb9709161bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.972Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "019f914cd7fd0eeb6c4723831071171db725465b955476778c500bf2f10f2cee" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.972Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "81ea8535aa2c732a871f8a1549b18890cb3be1f722161837c490853d63f4e2c0" } } }, "e15247f6690c752be9eb052c65d6188bf83aa3aa12e45c3969ebd294c52787ad": { "e8049a4edea61ad5f86939e7938566c1c4db909e94409eedf5fec35ac6d46e8c": { "jp": { - "updatedAt": "2025-12-02T22:57:12.971Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "5167abcd9a2eb17ab9b0f2f641ce3e99221e0a07d56739dfc1bcd52679ff82fe" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.792Z" + "updatedAt": "2025-12-04T20:16:57.230Z", + "postProcessHash": "3224d1eb56a5303623abd5d8d51e83d6921d050ee3c24bb21b21400fb1198e68" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.972Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "fd3d743b9545f23e5e052a68a8e3c0b6384298c3a2cffa4a5daefd131c2dd9d9" } } }, "e979381df042f92438f9f485ef68a9901f7ebe9aae3e09ec14dd65b67c2d842d": { "67bbc03e619fab0b6f99efec8b0f2fb38df1395be3d50b3ed225f0da4b3f4452": { "jp": { - "updatedAt": "2025-12-02T22:57:12.968Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "494cf58aa1b97ff95ae9b33bd6ce4d92e4d2ca882adf8d3cad0f0bc68695d627" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.966Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "c1b8bc968a1beb176529c0932c7f6a227cb64cf48edb59c5938e5135d037c7ca" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.822Z" + "updatedAt": "2025-12-04T20:16:57.246Z", + "postProcessHash": "0bb52dc208d0e4ad82b5c43943a23c54bf900d614e4f0a1b54711a1e52da03cd" } } }, "edbc39ef9c56e581bb84c8896ac7b1374e3d19f83b554328b6e8d3d38fe01125": { "1f975f6dea1c15645a72a9eac157d5d94cb767124fa4ad2e367bc8233d6b225f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.961Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "f0c1f8731711410b0dedaaebe4d0e0410e2a7d59c08045410d7e9c55ef2c362d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.792Z" + "updatedAt": "2025-12-04T20:16:57.229Z", + "postProcessHash": "eb2de7b75beb7c2cc68c4395815aa53c8b0a5fb7a2e862439b36d9414cf6a713" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.967Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "34fe39d1074c8d7834ec69eb9782e23ab725e0878166cff190a18ab2d602e593" } } }, "fbe0f20b7a71a4be3f802731a84f0eda5afbf565f744180f030a4474dd0b950a": { "acb4ae581b304b32468cac562d7016a47f6ce4fe713075ab12bd276f5d04a0cc": { "jp": { - "updatedAt": "2025-12-02T22:57:12.963Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "fa1537fb5d23d3b0217839d785c2663cd21a95c33c65ed888df46738ba6e5d8d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.820Z" + "updatedAt": "2025-12-04T20:16:57.237Z", + "postProcessHash": "3d928a3c66b7d878755fce4ce44d10ab4629339407faa2524f7e60ff884247b4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.817Z" + "updatedAt": "2025-12-04T20:16:57.235Z", + "postProcessHash": "530c753422a5c814ffca67d47fca97502b949106eb2dd75bbbd63be8737cfe79" } } }, "fee41c1b851550b4068c1cdd9e5a18a829a2d27697fe22a9678a8c0d0e87836f": { "5d6d7dab6e54977464b77d2be0fe3967209334b0d1e2cf141000a53098cdb64e": { "jp": { - "updatedAt": "2025-12-02T22:57:12.967Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "3f06fa43f8aebcadd6b8eea081651bdf6b5d4ee71ab3d70b72e08933c87d7bf8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.974Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "ef722adcbff7349ac8c1cade0aa739f7ca0ad853d3014f7b36c10a932b489db5" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.969Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "673053edd746ceb7a82487294606d047bc48675f3a5c70456da16632c696c62e" } } }, "00f878a9534e344ca38d2f13a2d0b58a40257c9f7c696adfbc337ee5148c5894": { "d7ae2149e8a1eca5c76f2e499f9ddf19c90a2c840a153acd2e820b96f79c4e3d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.998Z" + "updatedAt": "2025-12-04T20:16:57.264Z", + "postProcessHash": "df40770c860afeaae645be9cdeda2147700d277ea96056e7ba11f71036cc0b11" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.999Z" + "updatedAt": "2025-12-04T20:16:57.264Z", + "postProcessHash": "87ac07fe1abb689e3f5b0353d74e0c1ef063fbd05922fe724542aceb96d83845" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.998Z" + "updatedAt": "2025-12-04T20:16:57.264Z", + "postProcessHash": "08d0f6dea2bde99c1b7660082a73cb0134c55f3b1d972aa88a6cb3204f2de193" } } }, "262ef21ffee6eb3348b7484a2cb16afdc22c4c02ce86edaa851cad0979d13067": { "5e4f687928ed10c1ab9ee1e070abf78ab7adc2bce9345de9158ca65c8a403093": { "zh": { - "updatedAt": "2025-12-02T22:57:12.993Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "d5ac1f3a2d7dbc1af61903b641a3fe9128808603cbee3fa8a93c5edf5aa0da23" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.992Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "237a256cd5ce4b0b7d9495d12b2d7ad32b89d71432cd4b5c5b4d9d587b4e95d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.993Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "ee0150b4d0c1ec84384f69ff4eff6a9c78f634f8a11b57430f953d356de6a028" } } }, "42014f03b2e5e17b4e9d8b4cd80cfebbf2e3bca570177e4a0f46c977d992d00b": { "1713044e3cccefd79213a1fea6cb08cc00fcb5a3cdf023fa1b265af8ff27f097": { "jp": { - "updatedAt": "2025-12-02T22:57:12.987Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "860ffc7cef6e14995e9d34d344354cbb41eb050b047bc49ac229263ae4c74a2d" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.986Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "99a46ea682373599f8cea5b2681a91c891467248eb7ffae4aa0407640ddb40ab" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.990Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "633d85a864c7ece89638e85962f5f3162e2e6a0103261aa6d6549153c4e1f1bd" } } }, "4c05567fa33cc5dc9787df23810bac6451ac3a0fea9f57dbfe51135476f2af9a": { "539aa35729dd5eb9f1172afd319421d880ea5ac2efe1aac243083236a1389aa5": { "jp": { - "updatedAt": "2025-12-02T22:57:12.997Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "0bbe07c3c7de97a38d1596a38cc6cbd7f986b9dd5d454f13f7683dcbde628f89" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.996Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "e97d9316a112e473b3a3062fe3146b0517dab09cc40d705233d2a3218783ce30" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.995Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "d8aaff67b5b895c07da4251c71c08a6be15abf0eb870315bb8800896439e1774" } } }, "4ec20679bc9c3514801ed7e21c4718d82ab75932df1a07eb0572f662a5730d98": { "86d2c497abf25c94fa112b01bc6df68914ef1bdec7669aac57b740da912b33d9": { "jp": { - "updatedAt": "2025-12-02T22:57:12.983Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "ce1327955038b91ccac97eecac51643a6711d6be23ba6a68f595a0b5019cc121" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.814Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "8d8a6d055d9c476b2a373ba0019500e56f90e11c7ba4c1e799fe8d94238d3078" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.983Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "f788a8c8038a4f8a85e8ca4676dd899a73f9aaa2edd08d98f869ed80e9ba2964" } } }, "5a79d1e559ea1ad9f3ddadfdb2a43b047724a8158973368a06de949a009e4a82": { "f10bce44ecc97a7f7fbb9e4dd3135a3443539faf27799c8357608d1f78f0ea0d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.988Z" + "updatedAt": "2025-12-04T20:16:57.257Z", + "postProcessHash": "d6d9a1cca7ba3b8a4871bae1af9511d6911b9e328ec9b31b42bf3a0420233da7" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.984Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "614309a3f126d35921127863c8b35082290f78be3c229cedbb1f5e8a39d309c0" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.985Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "4e8fb172161fef82ec53f20d1f60843639451c46db2fdece12f9c9ff3d431ea0" } } }, "5ea715da4571fccc329fc033348aeecf183417b55c28bbdac60956aa1debf704": { "2a8b05277ff4a9cbe8def769d30fe9965fd38e380148a45171afc696a707de97": { "jp": { - "updatedAt": "2025-12-02T22:57:12.987Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "31f2183901a97ce7ee8de6ba71af576afe2f8f7ff35b214bd874181c76aed3db" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.983Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "f3edb56226e3441c0d545e063c73c412257daf41916cac0e5e3aa479c2ef9ce8" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.989Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "8fa117406f9827c64376e61898a01c68df2b92027462590b7e0b97085429ecc1" } } }, "6577565180fdc2dd6b58e525087f761a4a641e5fcccec17b8d198f112e8867a2": { "457a7fd8ab504d03ed723c9475bd87417db7fa6b8d538f336eab293e7c2db492": { "jp": { - "updatedAt": "2025-12-02T22:57:12.985Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "a5b6a76266ac3f54703cafb4ea761e30db6c257186aa2cd8a67dda4ddb6b3384" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.984Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "556aa2964b881428f3a6e9121aefcf9fa122756b90612c8903172c8f35741194" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.980Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "22d078c82140e2aa58e9b0ac8b3511f3e7cf1b81884feb9cbe62ec00575e18c0" } } }, "65f86c7c3a06da5be6ca7c02d2ebc67707b92772d464e19a9f17a4ed1f5068e0": { "816a9dda53486f2f740142aa953a0c567c672d1d673898a9ad9493dd248c9c0b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.815Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "7e11ed0c5dcb0c6e76dc391775ef3e9c74d1a5e48a5004f371dda96f913e9623" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.817Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "62edd7029884c10ee1282b11c818559ead4e77f96745f0fd1259fec0c4fd795e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.816Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "0b8436350b4cb1bc60cf44ffc071f14e2b4bee8eebf50f09a600c52cbd6b8171" } } }, "69e3ba4ff50b5b7c3475f46f967bf675b4e5a81f02e3546d810018e6a3fe12c7": { "d64fa7ded50ab81c30dff31ff460cf6ba0811be8f95127b0bbec04487a124039": { "jp": { - "updatedAt": "2025-12-02T22:57:12.986Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "c25931352a601b17751ee0e0b869c2cecd731aabde88b7281bd74ffccb06fd3a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.981Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "19690bd7c6617c2fcebf8298314f3925da5f66d4acdd09e01a0e8c2d6a10fa22" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.990Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "8f42b6b78c9b882d30aad93dae1ee8dbb24deaea4bf8632faf4f88718b3fc8a5" } } }, "741985413cbcc54cd08f4f04379dfece525dc97edf44e2f8df78b544f7dd91e9": { "2bd4eecf6148d08318f581143d8ed2830a034f2bd9d72c70252b27c1cf3654bc": { "jp": { - "updatedAt": "2025-12-02T22:57:12.961Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "1c7d7fa3357a33cf274acf245f314c2d7287f602fc8ef13c33d17a28cd61aebe" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.819Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "90a4eab0ae92aa4fab9a2cc0d94e53763e7eab70104038936e5f63fa6fc2c742" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.962Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "8595f00982e0133c1b7fdf88a7899a84ee3db48a306a467da9e7cb1cbe70b27a" } } }, "8679a4ec12ab69d4d68a5bb2c5cea4b7f0881bbdd39e33ed0dbce1f7a96a02b2": { "6dafd0d4cd13c07a59291f74e30693ff78bc11afb76dbd58ffb368da7e83a065": { "jp": { - "updatedAt": "2025-12-02T22:57:12.978Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "e2588bc8df242d24a9e9123fbaa25ef5448dada1857a06307a26e43aaf575be8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.983Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "4a215f44d9f5d4b22c0f15a94a5227c3439468eb0991139874aabe8b822f708b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.814Z" + "updatedAt": "2025-12-04T20:16:57.233Z", + "postProcessHash": "ed9bf78f4ff71ccb8b0ffc750837338e74ddc2e0616c57d9cf2063f8be003e4f" } } }, "8a737109d61aff4ff62c3cea1b972f0a7863c8fef9c1f3658e42f4cb31df1392": { "132aab96d1afacf12308b65ac1af9345cb2b097664b24dcf5c773ca74a90c659": { "jp": { - "updatedAt": "2025-12-02T22:57:12.987Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "3f7a168e74a7627fc7daab807134475e80e96c53ff165d65bed7d12a4331d8fc" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.982Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "381fe7370fec56a1bb9ab42191478dd19ba64518aebc902c30129732651162f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.984Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "383953701791a4c96b05846f7afca38e4f4a482c11a35ee1f48aacf559ddcb2f" } } }, "8b22e50ae696d72046531764b190a2ea3daa28284aebf2f2f2721e1db7b9a752": { "a3ec1a8f31c388fb6d343bd343994dbc83607b4c1aa74c136db042c2472a32d0": { "jp": { - "updatedAt": "2025-12-02T22:57:12.982Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "61de05c7fe1074510583fce78b558195758a907e68445598af306170f280f244" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.815Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "c5d24e5cce7d1a076e86ebe9526a32b1b39dde402a5f629f466e319f4b3a9d23" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.981Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "3480b21d7e3185bf61195477ac8c7352998fc1ce5e667847ae3339e2441571a0" } } }, "8cf48a0bc486c9b8e497ecc604e48b940db90a9be71802444fc6568bc64fd85a": { "2204d84ab0794f79cb34e93c0671da7bbce325d19d8d5bbb80030251d39917ee": { "jp": { - "updatedAt": "2025-12-02T22:57:12.992Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "967848d1bc240410f03fb19997c166bba46a963075853b34290e1299feeeafa8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.993Z" + "updatedAt": "2025-12-04T20:16:57.261Z", + "postProcessHash": "2e9691deafa16504f2a9bde91237094d4e60a6367b7560c35cd998fcb5a14df3" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.994Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "509bcbd6a29f27bf6380c4f72b68d339cefb869799273cbc9610bcb2f838c3b1" } } }, "8f767913276b5f3417959156454a31c70c834a5c7093a2081510ef83903f4795": { "bce52080edbc2ef28e9154b8f007ec28a5e436114ad9041d55ab9bd299d603f2": { "jp": { - "updatedAt": "2025-12-02T22:57:12.991Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "787ed5d133591e18248e5182186fe828cc27cac4965580f752d1cbace5e13b76" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.988Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "f8d45cb76eb6d35f1246cb455f3dab0210640cd811c22f0e3ecd8b21071fac6c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.991Z" + "updatedAt": "2025-12-04T20:16:57.259Z", + "postProcessHash": "1f70d8cd03dcd5c0f1a184581b78c7be1d4565d0820271ce5ab75982361ca6ae" } } }, "961e4fd08064e39aa2526ab854951754ce9cab815f42e5e159992babeeaa5b0f": { "cf7f511889edff19a30680bf294dfbeedaefa3ea56faf9de40db511b5d58efdd": { "jp": { - "updatedAt": "2025-12-02T22:57:12.997Z" + "updatedAt": "2025-12-04T20:16:57.264Z", + "postProcessHash": "eb14db603ab301dd02b41ed1725a597e778dc7fdb9fca79c4952eddc92cdebdf" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.996Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "3bff82a8abc3056195ed787256b4711852970cf4149236930a8a1a3b4e4fbfbf" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.997Z" + "updatedAt": "2025-12-04T20:16:57.263Z", + "postProcessHash": "c0dbf2c8ae7a18c26631da5e9e69e70711c8116f2517e3043865035dc00f9f5c" } } }, "c237b65e74a71bfcdfb33228aa085209a607cb0227f57e434c617a7ced16d975": { "cab8ecccbc0fcc08ad057ca251274b94773a36f8f2f5c0968c4007592472503d": { "jp": { - "updatedAt": "2025-12-02T22:57:12.995Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "17fa12384afdb76ac251b9f9f1cee64b6d74f75643bca20c95d0a1fc0f6cac70" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.994Z" + "updatedAt": "2025-12-04T20:16:57.262Z", + "postProcessHash": "3593d01d6bb41384406832a83eab47afa3e5da9d1c6e6444bbe36c4ca83d91b3" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.992Z" + "updatedAt": "2025-12-04T20:16:57.260Z", + "postProcessHash": "285b3f9f94fad9b553a0e41825e6579bb26373d911b46eb92b7c244ff0c67bda" } } }, "c3bbfaf5ba432f3383f9213e2c564cedcf64baf52ca43663bcd031fc79f65fad": { "46c4379cf36fa439d614c84a7b1f2a6e319d2f3a5e352e7f3079aa72e1634e3c": { "jp": { - "updatedAt": "2025-12-02T22:57:12.979Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "3ee2490201e7ee7efcdf4899b5b94420d7ce7cd1ec057a192d5f662bb0708694" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.979Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "1c4d27caf301ee4cba8c032a7875259f280fb9c7fb439a99b05ccb924bb89a0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.980Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "9e0569b4f60f6f14fa85751dab508908f84ae3a0bc6c637fd33a761ac410a39f" } } }, "ca7eb037869880c9ebb1a34c0000cdbfc8fdc9225de1f230ad67b8fceeb858de": { "fb2d804909b58e74a6d190031cfb86ce2cfa560d0444d3bb3d0a0af94da23268": { "jp": { - "updatedAt": "2025-12-02T22:57:12.980Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "7adef56a9c57f6616e24ecc290ec66d9f28864107431a78bd3ea2b6bb9ea3f5a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.981Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "dabab5f1aed2bba71dea04c8cc1cc80dfdb1699c28c6f5c07b7e0233740a008c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.979Z" + "updatedAt": "2025-12-04T20:16:57.252Z", + "postProcessHash": "8d2d028a04d16d36af130415b78c5c5d2ebe46b83066a3a72d02de41d65081d0" } } }, "d6a2aef23a40b1f742ecc4bbf44e21b915daaca32e6106a813cece2855459b4a": { "c2bbc1291a1d9794a9a8424dacda644c27086e3d431d4f0bb25b88182d583c5f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.962Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "68f6672d84d00f6148cf616a2890807ddef5a59fabad96fbf06e03f7b0ad782f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.960Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "5ff3bc6b75c810def93dab90fd441b4bccb7312ca8117493dd21a8593d76d297" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.818Z" + "updatedAt": "2025-12-04T20:16:57.236Z", + "postProcessHash": "d4f5decd8f3967064084da71c0ce542f01685c58980adfd8012ee7d4358241de" } } }, "ddcf8dfb6b1a4d5a1ed98c2017cdd7ae1fe774db2009725b2bf3d5ca4a50b322": { "4f4dfdc7521283f8c0348d0878aa061e186e3e3aad4e92d55841f1902f00e3d3": { "jp": { - "updatedAt": "2025-12-02T22:57:12.967Z" + "updatedAt": "2025-12-04T20:16:57.256Z", + "postProcessHash": "91b5d02f7e9dbdf53ebf4d708a1a26990acc1ad859b03d431c28e3f579d24ef4" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.965Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "078d802f99a6ed6241a5793679ae6c4284d860921b892052bb9415b782b43810" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.964Z" + "updatedAt": "2025-12-04T20:16:57.255Z", + "postProcessHash": "3dd670a2f49e0f9fbcf97709325a2e1031f46f2b86f6282bfe7b24498e98230c" } } }, "059de09a546e5fd7f343688a18f5ae23fe63e31ccd72bd1d8e0ef1ccff248e9e": { "e0133670b30030462807054fabd8948f4d58d68bda6f5fc806435ba96fdc2531": { "jp": { - "updatedAt": "2025-12-02T22:57:44.839Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "0fbd94cc312492ee425010f3ed9c2c7d77fcf6a10401b34e4d16fdbb0ff4ef01" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.839Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "5e57577ed1946c3a638dec31e504c9c8ac89a0941df460750e15c46a8d4721d6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.836Z" + "updatedAt": "2025-12-04T20:16:57.272Z", + "postProcessHash": "2093e1e4aa6180daf296333a4470d3e198f3ac03bdb479eea24245eed63fd9c6" } } }, "0e59ff691e81e6bb5df727b7bb1a30005ab315602d293b41cb391ed4b5409e8e": { "ab3c2315a32f46dcd77506c38fcb11173ad15a3ad7597e20a3af0f8b3c8e1c02": { "jp": { - "updatedAt": "2025-12-02T22:57:44.829Z" + "updatedAt": "2025-12-04T20:16:57.269Z", + "postProcessHash": "f39c6e7c8d34a1390b11e5d68c442de0b8b1b2089c19362b727f7941d4f21a32" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.825Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "d8b40c770fc344df04c79d376257959210b3033d37ecce0c32fd62f93034309d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.827Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "f172f6267e6d06e65e3a58d1519f2f960746d8a5b752d3fc9d843960050d636b" } } }, "1be2e6251cf6bfefceeb9a1d2a2cdfcbca4f3dc24d4303c2a666b520ce7dbc5e": { "79ae2db2ede93c3db9f3aa10741077dfe47e966f67fbb578af090bc05ef54683": { "jp": { - "updatedAt": "2025-12-02T22:57:44.838Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "2b32dcc52e6a1846728161a3e60c3090f9a51c0f1d9446fe0abea2358547b235" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.842Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "fe44b860fd0b2b54b523f751c72a34e6003b713d8d0039111077c667ba26f9b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.976Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "9a1a5296f0907306e40953ae2fe5e4d19d06607b1ffaa1ffa216606d81c403c6" } } }, "240885d8a55bf641313a779462f9d5afe9dc23030aa7263fae65179e8d79b9cf": { "0f3c6f532be1ff66173a6f491090bc401c5f5ad396a065d669cf8be23b790fbd": { "jp": { - "updatedAt": "2025-12-02T22:57:44.836Z" + "updatedAt": "2025-12-04T20:16:57.272Z", + "postProcessHash": "e446cffee4246edd6798b4c6d223fa61ceef5af5df95defc73a801f82f52b633" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.840Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "72ea8b48152c271b676be5c6da6eb3c1b28ad640ffdb423237beb8f61499e2ab" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.835Z" + "updatedAt": "2025-12-04T20:16:57.272Z", + "postProcessHash": "f6b1f228ca410c974ef0b29fc8d4a5c1878e4260aa61af060de96a09c061829b" } } }, "327d9de85dcf4e9908ef639e81f0b4c26211261cdc7427d31c00d57a68f9ea57": { "defbbc0826e47d88fbafb696aa0613a205a13036670b5b16d9f7262852215ad4": { "jp": { - "updatedAt": "2025-12-02T22:57:12.977Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "c35e838171d6f8927304173cf11acbb921e9d6fe2f451e4bd9539ff840cc6dc9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.830Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "f7cf6ff7bd9c0508a73382cb2f5d508f2d9245464d6fa68e4f208e14a5b8c26b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.830Z" + "updatedAt": "2025-12-04T20:16:57.269Z", + "postProcessHash": "8249e2a0ac5d4177647b1f0e4612ece5dfcfea3681fca1db5d8cf5455066ce18" } } }, "34fc130494be0d69639ef51384f698c85712275c82f72ea0884fc912c61fdf98": { "92c9764efaeac8ae2150358dd44c1bb27f41eb7fecfcbaeaa5223b274ca6abf2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.829Z" + "updatedAt": "2025-12-04T20:16:57.269Z", + "postProcessHash": "b6426b1f6b4d7e1731282e09a35c0d786e2ba4ebfc0ed95c48c3fddbbedc52d8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.977Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "addb883ee2a47a66e9aae4ade848f7d69c138cb1e4224ec11abdd678b3c99211" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.828Z" + "updatedAt": "2025-12-04T20:16:57.269Z", + "postProcessHash": "26a4cc9b0310ff3d3728b584968165b870dfa236383b90ff4a9c334cc2aa3d02" } } }, "3d292af39191f27d31948c49e58c34422323914d2d835dd3b8be63d271aafaeb": { "6c24a188e7d85e8dc525f5000fb2f41b08e17a821ce60ddfa9341db9802fcdb2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.844Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "7ba94ff82df3fd481e47e74ead3c76c5b21456f9b96dc44e07f0dadd9f6b12a8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.841Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "07ef180689940db1efb0b9721afa2688e89c5f3c99c21c79ed95b6e52954eb0d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.842Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "de9de58d102888d28d2f04f8a0221d1b9db3d576517ce310aa595a017161b8d6" } } }, "4b025a8d2616c548c48183c20f38fd63b3419b7d2754a037d1a3a71de57c5a3b": { "ff303dcd7cec8ced40bda437d563bc42f245742fe9f5d04eda4a24a951b0a458": { "jp": { - "updatedAt": "2025-12-02T22:57:44.838Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "60a5a813f3e2cdef5729ea46b7065b976faaf1ba451cc2f76460a130cb21b80b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.836Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "afd39e5a2051777e8d94216f23f57929dea339dfb5e610afa75d389b0fcdfbe6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.837Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "35f67d1be6c8f34be4e5024002d8820c52d34036f60a08d59e89c8c5b200d3e3" } } }, "4be2dfff7ee7eb8ba7e00bea4338d4b73e59739bd67763683573c2c8a87c9e3d": { "37c83798ddd19c1e72b3674657a3635ca49e5d5bf74e74f2fa7bab5c89d58316": { "jp": { - "updatedAt": "2025-12-02T22:57:53.517Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "80e06d059e5a825bddd8ff633ffc01a75aeb4fbe540892e2c4709524204d2441" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.518Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "457212cb126bf31d9a7e694300aeba62b29fd2d10a0b9a53a8c390c0b324c5b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.518Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "4e67bb3786189cf1091fe9fa60691376e297ce68bd3c6a5eaaf8482dabc22659" } } }, "508c2be06359376eba3e09eb266a71fd1a64aba5ea5c127642c386bdcf720d00": { "32a1e97aa76cb271770dca75fd904e715623cf504f26d889bcb51a382ae083e8": { "jp": { - "updatedAt": "2025-12-02T22:57:44.842Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "a870d833e74e0f129af622db38767046af780c048455d60e7a7db1ee78bbfb0b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.838Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "de9bc4c3c41808a0ca598e689736d24aec0eec7ecd0be16c7baacbfbf8807edc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.840Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "2f4638e32f0897552dfed7797b01d3bd9eff67a0d2beb3c6dcdc5c640b1754f8" } } }, "6547aef5926a6b2487f43dbec05e0957fe924c3749b2e7aeeb9c8724921310c6": { "d72d4d5d1769fb68537cb2b0120c647b9e45e7282fdf4303b4b3b3ba33eb151f": { "jp": { - "updatedAt": "2025-12-02T22:57:12.976Z" + "updatedAt": "2025-12-04T20:16:57.251Z", + "postProcessHash": "06c0e21a89d105292d9779c5d5198cf75c097502b1bbbf76868255fd0b1821eb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.837Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "c7735914915c26fe7fde985367727fc418e33b5d7355723213757ad9aa249300" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.844Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "c428a425a82691e88852192b639382b6f69e6ba8eb0638bcf78764bfc8e1a902" } } }, "742de82015fab9560f32bc67cc0f07a9ca9e1ed3e7aeb11eb4303fa8a580185f": { "e8e388627f1d46545b74abb196d0b01e87cea3cc02063cec9c7cf6835a4f7d7b": { "jp": { - "updatedAt": "2025-12-02T22:57:12.978Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "340d61541e72a53f9289c87a2813288a6dafe8460e3577d74b53328bc6f4c850" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.825Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "b4fc8a886062a73c95553fa76149f8457042ea6a864c0a39cf156d6e87325e4a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.978Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "06f9a98ca9d574acfe00c99c9c2ebaa8171aa830a3bda2e4f4bb487d9fcc697c" } } }, "77a9c51767cd665f3dd2df3d7ddefaa1effd2f1271cde0211ccbb68de9869a6c": { "1c1de24396b6e6f16f0f9b41c9ee154414738e50d2c294ceeedb57d2b780396f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.845Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "f389febda1e33d88219d0c8ba5e84e83296e19921003b47e43c8b19a2be77f08" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.844Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "8f2b884f51b22232ccd3a1ea29169bfef31aa0ed3444da9ced558d4627cffc73" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.989Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "0f83f72abc4a035386f84aae8680706600d9e2c577847610cf0c9a9af8316348" } } }, "90aeecc84affbe1a94ebd79e7e3236a66f9c627e327fbaeb50f05aa43d716a7a": { "a7b61a1bd22ae77b9b4f8fe2bc248f5fb8a900c9c853a0f4b28e2114edba6edb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.843Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "fa6ef185860d86f9e7fc90c6ba73e18521c3761d6bdd8c653db6c4642ab19342" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.841Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "69a42f71f08b83e17475d1355991a0ad552b105140a0f5ff6869eea202b77dea" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.845Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "57dcb313a572f055a1e7a7c2e0ee7c27e9687c38fe743a348e879ed9f0837b94" } } }, "9815f463df07221f4071a1a1bca594afe93b27adf83236c69b1a77b1ebe508a0": { "007c21ba67676302542c1fff75925930501f8226edd684ec93ea8a9d480c18c1": { "jp": { - "updatedAt": "2025-12-02T22:57:44.846Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "c8672af7f37b95b4f8dd97c0e0977c7b38180eb1b0f70838227ae77bc73ba99b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.846Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "c5c7f9898cad8fbfbfdcbe642ef47f5d0657c2283464fb81a43c1b910f9eaeee" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.847Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "442f53bf6416e654c589e99c3cac07e12e88fbabbe6b09159e685ba578d2582d" } } }, @@ -4001,520 +4888,640 @@ }, "471cf465239242ec9f9d784205ced7fc1640f6da4c8228d46163e7757979aa8a": { "ru": { - "updatedAt": "2025-12-02T22:57:44.828Z" + "updatedAt": "2025-12-04T20:16:57.269Z", + "postProcessHash": "aac03e7c8b683a70452d02041b909911380e4f5e31c90880e22c76a3ca347d4e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.827Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "73e241a031ff4974f5050a7dd51c18832110554a9fcde411512fbd31be8e9f90" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.826Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "2f72a35e2ff8f8d4cf477a8bb155aa3c2d26e05cbe70a55c9fc7554440032e5c" } } }, "af79bbae5029e0964764673ad906f12ea5d0cbd9f6358c69ef5ef5e1e2abf9c8": { "2ac53c6a243d501aa141cc7a46939a9b6d8d89958a13b73f7e3def4acf386114": { "jp": { - "updatedAt": "2025-12-02T22:57:44.843Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "d5d32fb2e9d73e0610beda9187d540c75d9b748cd530fd9d70865755dc54eafe" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.845Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "a1e2e2e5101fad0e049165f91a12221da6b863c86c816dc94570be8f47ae9366" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.843Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "63994e3a304f549a9ed93f964ae6b7ca924c8d0c92f551cce7ddbe01d48cf9e7" } } }, "c26d90fc85acd6879286c1468a93acb164acd86eea2a927516015902a9a832be": { "7cecd0f5d3861eb201c695566fbb8efba35f90080e6ff53cfb99227a455a7433": { "jp": { - "updatedAt": "2025-12-02T22:57:44.837Z" + "updatedAt": "2025-12-04T20:16:57.273Z", + "postProcessHash": "e0adf9887529ba419e856dcbcc8eb6ad6d3df49d9a855d6fbb207bf23de0b168" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.839Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "99e46e5fe0861758a4d3dd40541c2552312940650e5f487eda2a13b0c11bcd3f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.840Z" + "updatedAt": "2025-12-04T20:16:57.274Z", + "postProcessHash": "5dbfef12f8ff14cc2e9df89828a3ddf0b1a82675edc3f08b81447ae715ee3845" } } }, "c8e894dbaf5047cc3cabc950a4a8ff475057f2bc769f6e66960185717ec18b52": { "53f949f10b8d348067c1f595ef08a9cee2ae03679b3e38fbfe1a67bd2cf12eef": { "jp": { - "updatedAt": "2025-12-02T22:57:12.991Z" + "updatedAt": "2025-12-04T20:16:57.276Z", + "postProcessHash": "69239bf07804c5566d1f13e076a4033a7b8ae5f11ff9cc242a94e1301a7264d3" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.986Z" + "updatedAt": "2025-12-04T20:16:57.275Z", + "postProcessHash": "627010be4c4e17a0f55706ce9e9c831365aaf89ba5567e80c3bf20b5e8991ec9" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.989Z" + "updatedAt": "2025-12-04T20:16:57.258Z", + "postProcessHash": "e163dcc9e7ba43a1e3354a882c7f0cfe268965b70346a299f209cff89774df6b" } } }, "d6b97ab54d7597109de2eeed733aaedaf2f8744ebeed7ec1031b8460e9c545c2": { "60328591af08fa91508ef8597f7a9b54e083806e1906b2740d4ec5802abe7ecd": { "ru": { - "updatedAt": "2025-12-02T22:57:53.521Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "58ed6d4b2fe0f9eb55c232029e6a49a6c23d04543a13ac9d54f91c1175e59086" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.521Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "df1a38a3bd599ff26d7acb6ef4c9651eec66f69bffcafbe8b6dbb1724fcfdbcb" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.522Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "4b8ada8aeee40d487d325b3c7b309908f162aa5ac09d18c22b0dfbefb9b8f63c" } } }, "dc33a2eb5786282387491dfbb49c8ff622ea41f11b3278436c7f82ab857f0228": { "6d34c7aa55a8fa5def4e3f2bff389c666852c48291ebab26dbe11069e1977d67": { "jp": { - "updatedAt": "2025-12-02T22:57:12.988Z" + "updatedAt": "2025-12-04T20:16:57.257Z", + "postProcessHash": "81155067880a75c3a21cd90f228cc9e8274dbf04631922f87ce9ee62220d2f80" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.982Z" + "updatedAt": "2025-12-04T20:16:57.253Z", + "postProcessHash": "cc6cc272208fa7d4e48a5739f0c24cb49276ba0f4aea94320ad57d0b99e9300a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.985Z" + "updatedAt": "2025-12-04T20:16:57.254Z", + "postProcessHash": "3f2e9b48e55fc63e3099ec176dcddb9287c29130128d89447c493cc52349c208" } } }, "0b6d9c8bcd38a3dcf622f85a9b9f97289107d754955596db63086c5a1f0de013": { "62bc03adcac1853c2ff1d41eab5ec55613571c9634311e2e305ff20b78db334b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.031Z" + "updatedAt": "2025-12-04T20:16:57.672Z", + "postProcessHash": "d63768dc574036d586b509666102bc4745b550eb4a0916ad09aa46bc6ea918ef" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.030Z" + "updatedAt": "2025-12-04T20:16:57.671Z", + "postProcessHash": "a4b63a05e9dbb382444b46e66794b45bfa7f18aff144a2987da110c0da901655" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.064Z" + "updatedAt": "2025-12-04T20:16:57.672Z", + "postProcessHash": "f6e11e5dc1b3d69c4608f269b827bc958a2df181020fa06708fb118b686b85f9" } } }, "13e624cf649963b0430c85b33066c42e9a463e53696049fdef557841854d666d": { "81c2903aa8b7c3295234e5c1b7fdf2be7dbc55fdc9edac19c3d4675fd1215205": { "jp": { - "updatedAt": "2025-12-02T22:57:45.066Z" + "updatedAt": "2025-12-04T20:16:57.673Z", + "postProcessHash": "ccb05d4cc992719509f6d42cef212e5d5593b15ad0e40768ff151d3d044452c0" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.067Z" + "updatedAt": "2025-12-04T20:16:57.673Z", + "postProcessHash": "6d82e0b4e9bf4bfb513cc54c67a06ca222ec37d019eee3a1a759264705ee37d5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.072Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "4c039f012a14136b8a242ceeef17b68fc0b1246a1a6fa38f5a4d8f3bb7f88a80" } } }, "2ed1c4bf7fd0d1e9a3aa0e5f13e3c86bcaa77e12c79c9d2fd35be9b8cb485fdb": { "042d7dbf05f1c54ecb628a3aec1b03eb4e2b6e21cb8aa57b9ada88ffcae4f8df": { "jp": { - "updatedAt": "2025-12-02T22:57:28.669Z" + "updatedAt": "2025-12-04T20:16:57.381Z", + "postProcessHash": "56261aebde53698c90f498d37ab3c7667f606de2377fdef5e7ecba384c4654b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.672Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "4595e89695bb69359f51c3f0d5d2e38b191b0638208c28cefc01ac68079db161" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.671Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "3d03b37953de6db506edafad47eaa287a296c877f037ba1f99c4c8eb4c589438" } } }, "3d2059239ad6af1a2ddfd59349dac15c70518ae11885267fd488f16281699791": { "bb8598cd736f9055ff9d8ee57cfbaf381f8b9b7dd5b8bedf4b973dba8c441a2a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.673Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "90f309ceb199a2f2ddc2ec44f1f2bc226425453040c9639c353d640224ccf740" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.079Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "63a9796e38eeaf27b7ba737d34a9446507bcb45127a2b0e45da40215331b1dd9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.078Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "d711fe36633b65f9b271de1bf928ecc7be26588d8ad9006a06322c2a40c29106" } } }, "3ea83a8ef84ec6bbe25f2090619db1abe347ff2b73bca590d6c93d68a42e4e64": { "d03f731b06fef8fcaf928f6e3faf509894d47eaf5b4921a111e9884783dfaf7d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.067Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "29bfdeb6f8b8d9626b56ed0dd1002a69382f9832c11210a5a353cbc4f00ae182" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.071Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "04e7d21c0b19dd7c87eb122f09988abae82651e90a37a63c348255976a065c46" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.070Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "c687841c890738f20980479ed2c471361080d6e22b9e8ed68e6181c069232e60" } } }, "4ac2aa31459a0a92af805200fec9ac7d528d83083a8813c71176539ce30a55d5": { "47965995534ac0fbc4b623464960445019f4dbe230323078f5ba06347fc0188f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.077Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "34f0590751305c02af97c01a30afae21dc10aaac698deb67985a2b92441cd34d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.079Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "dac8fdd892da3f718c90b217cf5abd4061b152a240d487baabea529320ee8a61" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.074Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "7d9619ac94c55d448b77dcbc34c6e821b7e3a67a6de1fd17c669e5664198738a" } } }, "4ada93142f1fa23e960fcf0c89e6d17aa2696229485742f034de4ee6593c2071": { "2f19a7e891dd293775fe6638aa903e735c6029210bbf3a17860c69e6f1f6bb6b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.030Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "132bb84090bb36450086ca1f8703d230d2e964a71abe9b6c7b57faeba8a29ec3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.029Z" + "updatedAt": "2025-12-04T20:16:57.671Z", + "postProcessHash": "2624aae6459465a88507479c09cd51ddad295c05d799969fcea3e58990721b3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.030Z" + "updatedAt": "2025-12-04T20:16:57.755Z", + "postProcessHash": "da0ff42e3c9ff6c18ccf7424379c5e6d5956cefee625e8397562add05eb3c657" } } }, "5e4520555f04067ffa7eb5af85e61960bb9ef0b5e53db65b7b0471c0eb67e3ca": { "7bb096151a00169df14ef9af359bf6d8949aae217704606f9a6b10a44d8ed7c0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.066Z" + "updatedAt": "2025-12-04T20:16:57.673Z", + "postProcessHash": "464eb882a62ab3b54c71dfa1ea9d4f4a4c458a796d299720ee5933a2a7f2b5c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.068Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "4c3ec37b945f383ca2a46d93b78ea2d04d15164262222bae00a0f045ff94970b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.069Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "d375efc434797b3c906f84e8307ff848d8cb9d6fdab32280e89f5070b20029c6" } } }, "736bf0149d53b024ca3bd9e7977f0bc63d265b1f25ebfb6dfdefeb025d67a838": { "dea965238a83d73269b02031548818dad6e76024fdd545d4ebfad71b6ea7f2f6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.067Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "2da733158d1d9653ee99e70a11076f3d3066f7170b50f297bf84260adef16fc1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.069Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "4224c6191b8670ede654cc0530f4a6848a95cc399af16795d2dd0b585c87cc61" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.073Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "f40b1b8336f00d8a5fabb74209d38fc06de3ac1780ea286af5409ccf1f2be251" } } }, "78374142cbe93e8f6c7c78c21fae25fb7304d36492b6bf841f120cb0b757622b": { "8c65e21fe9e7b63afe26dee2f144ad334fde661179f2df54cde98ef19f746770": { "jp": { - "updatedAt": "2025-12-02T22:57:53.521Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "33bbf26be7963ff5ddd789b9725c63d08099ae3b7ef66d9ee77d5e19d7bf3974" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.521Z" + "updatedAt": "2025-12-04T20:16:57.284Z", + "postProcessHash": "60fecb25094c9d568e17efad847ccc1d5e2132c5a78ce0e9646d77d4eaf9d0cf" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.519Z" + "updatedAt": "2025-12-04T20:16:57.284Z", + "postProcessHash": "49af3737faf0124ef76846a69709213ec5ecf0f816f7b54c9ec7e57e4d3f6e58" } } }, "7d77ec1ad6a5f022e0b46f5c3c3ce2c3fea37ff042d1b5dc03023407e067e3da": { "a014826091cc7de6ffe26de700b6870df49479656119a1c4582ab3ba9f32f66c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.667Z" + "updatedAt": "2025-12-04T20:16:57.380Z", + "postProcessHash": "28d30e9ffd86c777116434fd09c1629c7fe6760bd1be9c1feb114446b3645f02" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.667Z" + "updatedAt": "2025-12-04T20:16:57.380Z", + "postProcessHash": "9bfaf77c9c05cb01587424baa181998b9b298c0ead28103145c47ec050f3954c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.668Z" + "updatedAt": "2025-12-04T20:16:57.381Z", + "postProcessHash": "78881d3fe7b7645d68b001022dbf922aa6d7d61ab02601b61a52ddf5c63d43b9" } } }, "8c7d4f3fdba3bb4edd06686b726948493ddc13a3c70be44e45a5101013e47060": { "e1a3f32eec379181f97de3483a7955652a71670ed2c2d3ea34c65b17fdc5961d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.670Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "3ede0328568d5c7549d9135b096e63504ae324d43f655b78e433d31f9f02ec38" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.669Z" + "updatedAt": "2025-12-04T20:16:57.381Z", + "postProcessHash": "b410556e30ea38efe6a7546c4b1b0d4bf3539c25600d7c451e3588b5cb8713f2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.672Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "21e2fbcb8ff9f7faf8f73f4ebd03dd476bd7e3f02bc8fa5563fff168733f75ff" } } }, "98ee65248863652e985d674cf1372dd020bd6094b7f3998ae6f4a646d94892b6": { "1bd995b679039ca6bce9ee0b09736ef8f967620b8b89d51a62c70a4d312caa42": { "jp": { - "updatedAt": "2025-12-02T22:57:28.689Z" + "updatedAt": "2025-12-04T20:16:57.740Z", + "postProcessHash": "952c2578817d6143f9333e2d974cc637be4542c4479f8542630326a702a0cd58" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.679Z" + "updatedAt": "2025-12-04T20:16:57.733Z", + "postProcessHash": "8f418d3e7abc10c5f5ed18dca3bf288cae45918be3ec843feeedee51f5254281" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.008Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "cba25ce3dfab609ad1e0eb64a60da67e05f180463c53a7a69022d248f0819139" } } }, "995a2e3a8b7d9f74a6263555c02ac239faad9cd474831a38bb8fbe02a8eb4930": { "9cf1d6f4f93a189585be6125df675ba7e1d73f8db3dbffd354c683519bf24dc5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.073Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "2b9861eae4ddc4340b80806d6c313fd9fc21e594f73c149f8f697ebda363543d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.074Z" + "updatedAt": "2025-12-04T20:16:57.711Z", + "postProcessHash": "10280c4a4174b1aa7fb6417542338c1795c88b4bf74441c2d00f22c3e4b9707d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.071Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "89c96dde19e437b03567189ec01de2a24f8f45c24f263f4307b45009b5ccdfd3" } } }, "a0768d64d8480213582b5d2b019ac82a6fe9572e3115c707079ccd2a6665834f": { "f53e89f4c4f5f43c018862a8bcb2458cf38a59a2eed7d3a2bac21d2ed57cd772": { "jp": { - "updatedAt": "2025-12-02T22:57:45.075Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "718d1ae7160166c76cea94595a70f98ea6ca4c34ba4f6918de0671db7752b826" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.075Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "1f4ec45a75555ae0d49dce761ec601476ca56330cc4d4118aa04bc6fc38ed4e9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.076Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "02158904b5aed39ebe7a322467d08919b7b9c279b9657356373f602741797bea" } } }, "b5acaeeec7ee7e0b3d8c363ae84792dfc90953fe82cb345bd9a76003f6857008": { "becf724869353de9ac0fbdf72d34274bf02c4477ca8efc26bf383f25cab477b9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.673Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "66cf8ae473c72b5984076baa34bc6de21e5ca231d47e40668893fb146f19f105" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.669Z" + "updatedAt": "2025-12-04T20:16:57.381Z", + "postProcessHash": "28574569d4d4ae029a38b95ca1874b842ac81591cadc343c9990bca75b2b219b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.668Z" + "updatedAt": "2025-12-04T20:16:57.381Z", + "postProcessHash": "4cba74ae40a9e36a1e876b5426115438c9620d304ae7bcce9d8988314ecc999e" } } }, "b6cd16941758ca4a1cd018e80e59496c19b7711675f9eec3946a989810da8301": { "def5f58d34f9e59ee3bc906fda67f3a9ea90982c852224c86d9d02f3eb4daa81": { "jp": { - "updatedAt": "2025-12-02T22:57:45.070Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "76654743b8c2031b48266de69042939cb5da8a35a74f43220695da25e1912391" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.065Z" + "updatedAt": "2025-12-04T20:16:57.672Z", + "postProcessHash": "b90e392ed93a10dea2f13b745b19f17b23364bf0e35842e80b20dfdb88357e76" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.072Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "6daac5156a28b9705ac73531c827550da5ec3bc90c976987d85ce85fab784897" } } }, "c5c9fb1e01e8fd89820000126b65de13c1b1aa7723a21af8dd6a22b0c6ce61ab": { "f0bcc513afa858c10cd2907f4b418305889e8287702cf9cdb050972831c885a7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.068Z" + "updatedAt": "2025-12-04T20:16:57.709Z", + "postProcessHash": "56c494f24c8b34e87403cc08eda3b9dd5d88aedbb868c529c8e17ec0ca2596b9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.066Z" + "updatedAt": "2025-12-04T20:16:57.673Z", + "postProcessHash": "cf0c2d23b569ebedba23d6efdc413951cd38e3e0bdeac0783cccd71e378c1ac7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.072Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "690f32c52d5a4afd0dd5ed556ede05032d8381f53c384057896f17294b2ab8fd" } } }, "ced886ccae611b5ba4d14770da1f424b55ef56a32ab309f10b5ba3de061a0cbe": { "4c6f8e2e7974ca1e44a92dea680f0fe4823cb3dbd478d406583065fef1965c83": { "jp": { - "updatedAt": "2025-12-02T22:57:28.670Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "60567fb4e88224ce31b26131b5c3b0a55331a1b70eaecd7416925ce95e2a7164" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.671Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "415cce49b09b29fccff8f53b3105d49e87898984c7c95c410f110caf8c562401" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.671Z" + "updatedAt": "2025-12-04T20:16:57.382Z", + "postProcessHash": "31434d9f0cdbc11e43199356a6a425ce513faafeea1e29a61c5f0c716be36538" } } }, "f3dfcb7d93e8daf7246f1d7b55aef72c87092941f169ec834a03a5094039d22f": { "30c8a47e6bcddf07ce86164218209c750f1bf6a65eaa190202477bb3b35f8686": { "jp": { - "updatedAt": "2025-12-02T22:57:53.517Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "75ebe3318240c961e11223e2c2b60c794a9c54492e8945538b9a69ec841a06de" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.517Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "cd55a933196f62d521be4253f50f7b4dbdc020fe26175c2ecacbd57a47246c40" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.518Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "31e5ab17c42dcc91a4c269282fe5a0973ab5ea969e773ab571cf961640e9342d" } } }, "f84c373cff7dbac798db4f00e0218085b87659f099e72d499856efa42972f195": { "4b9492d3cf50402946edb0019de92a07ebf67ee41426a0a31d7cd82149581a9e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.065Z" + "updatedAt": "2025-12-04T20:16:57.755Z", + "postProcessHash": "8b230ca6e317fdf487921d90a7f84e7bd3b2811aed8b8f83b7879b27044e1532" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.065Z" + "updatedAt": "2025-12-04T20:16:57.755Z", + "postProcessHash": "f594c8ea938be6d309592e6c7235d773b9cb6e16bd76ed1bc9b424061c358471" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.064Z" + "updatedAt": "2025-12-04T20:16:57.755Z", + "postProcessHash": "ceb098422d10e1279b85584eab707498f575ce38dba744d87620f14fc4fd2b44" } } }, "018a46e784f4216bc572797ae4cfd925900c11b01082ddf5a2c9b5ed08891d85": { "0d31eaa79270bc25ade146c9f275b342537708966bfbae7622a921d0c569a2ee": { "jp": { - "updatedAt": "2025-12-02T22:57:28.680Z" + "updatedAt": "2025-12-04T20:16:57.733Z", + "postProcessHash": "c2a397e5603c9423289529856ac763ee13508dd183c4b255420becb66b0eae6a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.675Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "269f4ff65e0e1648dd13e5a5986723bff0c357374b9733aa1cbd5aab742e68b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.688Z" + "updatedAt": "2025-12-04T20:16:57.740Z", + "postProcessHash": "bc6429377c2e8829c7d2c7e1d5645435be56a68550e5f58e1a40ef097de7b28e" } } }, "171b148b39ffa6bfa68154f7f794bc9828036c905ec6ea0ed3ab52ea0ab68098": { "9b71315bfc1a5504ea574514ec21f8d0b8c75e646482a4fa10456513e23ec3be": { "jp": { - "updatedAt": "2025-12-02T22:57:28.721Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "4e59e66c763f3d06c7ea50e08f94012f9aaa063d17198ae60d15499a5b07ae17" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.734Z" + "updatedAt": "2025-12-04T20:16:57.740Z", + "postProcessHash": "c6c5fd1d5d904b0f23762de97c50cd298a07d123f9c4b7d0a4fa2d6e0227cf88" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.723Z" + "updatedAt": "2025-12-04T20:16:57.732Z", + "postProcessHash": "cfaef766331426d2260975d7dadd74beb0486acc4dca437face48cf974de6eeb" } } }, "24ff6950696e941c133754804fa0f7502ed10e3f273f7828f34d0ec98cc69169": { "9ffff4baa30bb8aedc5b7c4bed60c32432037227f50854a8cf0a554ca74b6742": { "jp": { - "updatedAt": "2025-12-02T22:57:28.718Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "f9156cd7d2cf3d4383bfe4314afe034e17dcc1ccf116dd50a7286d4b9825ff8a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.713Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "c13bf08f6631f982d63fa7aee0e8ff0013c4cc7ccd01e2b0ae2a50337f7ee99c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.712Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "fce984b5b91f401ec3f5f8fee69e1f7991461d5e41bf875ee0f855c3b981dd61" } } }, "2de6c7cb85bc8ce6037011a7cb84ceda700e54852ad5f8048c1b021e9505cfe2": { "cffde22dd20a99321b2469fa4c5f889ab0623f7597c7318cb5c82cc569be15bf": { "zh": { - "updatedAt": "2025-12-02T22:57:28.715Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "ebc0ae10fe844f57518d1641ffd20e384e6699e1c2adaa16fec9a7ddbb26f947" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.717Z" + "updatedAt": "2025-12-04T20:16:57.729Z", + "postProcessHash": "d55948e87e9ae186de04c866a6d93cb7ed4060b1feb8542762e672e5521f3caa" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.718Z" + "updatedAt": "2025-12-04T20:16:57.729Z", + "postProcessHash": "b01a14b85ce48cb59ddc61c78956184b3f7816e2951e0e6600f18403087d5a69" } } }, "34539b13bc46ae1ff86399ed0b4beced83470a47c23ade3688d97729e239c69b": { "1227956927c2e159479174df3466808d9bd9a1f2cdd1dba3233e8d80391d27c2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.718Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "4acabeccf21450b694e514783504da857b9ac4aa4d52aa8a2aba696a46e9c72c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.717Z" + "updatedAt": "2025-12-04T20:16:57.729Z", + "postProcessHash": "77b0c4b0ccd715ea1bc59290f69aeeff11f08da9617d42acc4c6191b8d5280bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.713Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "ef26676c7850bbde87bbfbb5e15ae5cb366a196963f67f8aebc6dd700c436b75" } } }, "397adfde7a860a0707618fd95e8f1f4db83c3ecc2e6141f24b64af0162bec70a": { "fa85899ec41f9998773c9e4dcae84709a75245ca0e0e75850cdc76516b7fd66b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.736Z" + "updatedAt": "2025-12-04T20:16:57.741Z", + "postProcessHash": "03469566aedf2758ac82634219206e64323421de66ac761066d2c77733ec12fd" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.739Z" + "updatedAt": "2025-12-04T20:16:57.744Z", + "postProcessHash": "2718f96cfd9cdaa98a238ea6385c70acd0d4ae0fa9fe012375da30dbb9426647" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.728Z" + "updatedAt": "2025-12-04T20:16:57.738Z", + "postProcessHash": "fcf83b9c2e4e1b52b0c3a713a8425c0c29cfa532267739cbc12bb7f9edc1bcad" } } }, "439776d4466dd70e3c5608271d0bffbce0782915faaf2fea75fff7b8e7835bee": { "eb302a76d12c1319056a47c6302ef68febf3a0648e4ce4f94b2b9cfe7bec8c8e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.722Z" + "updatedAt": "2025-12-04T20:16:57.732Z", + "postProcessHash": "53e6eb523453312137fbebfddc6c9ce4d1b162744d0937fe108bb31e342ceaef" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.721Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "c7dbc0b5293505002645a5939689f1b8f547dc98a59b35d7f1254a57be88a66c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.722Z" + "updatedAt": "2025-12-04T20:16:57.732Z", + "postProcessHash": "2eb6c36a382fef34bdbe99f9c2e1f67127ad1c9f02c338c154f05f37e4e35422" } } }, "5efbb4c7ed17158323437048f6f32b54a1665e8008c3b499bc27160f7cbf02df": { "06c63df1edaffeb10cb0def08a755d71c765dda9e99144cb3ca1eda2e783c187": { "jp": { - "updatedAt": "2025-12-02T22:57:28.676Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "46399582b55922d21fc46c5ce10455ddfa0cd1d288ff082e2a56fbfb6e8e7bc0" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.676Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "67b7c49cc91c4ece2061e15254556ace6a33748e64f629b90c52a7a18a9354d0" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.677Z" + "updatedAt": "2025-12-04T20:16:57.732Z", + "postProcessHash": "0605b745ad2d4f5330f0141d05fb37dd36644282724d42bfb4ff284d4c653cb9" } } }, "61b82c455342cbc02f4a1d8651204017609b443fce1a7cb57a4831730d7fc050": { "1d27a882dcff09d3f22870a4f6707da298747c547d36d3db2d61ebb22253f91e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.726Z" + "updatedAt": "2025-12-04T20:16:57.737Z", + "postProcessHash": "1caf3d357e377e68ee2b7484c6cf04c67c0dc197f63c72c2b061e2fba309f05a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.745Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "6c704a1e287e96b7febc179264f1c33c55ffc9b0e5d138c95223c9efd93c4d49" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.724Z" + "updatedAt": "2025-12-04T20:16:57.734Z", + "postProcessHash": "ff93a1b7b178edae5ea8cb18e9357c1a3c3a507f238a54ba084d4608093bd03d" } } }, "65514e61688950cbfdfadc40744ab73dd695de039206e57d83d48b00a2982161": { "c8edcf2ff1eff165beb006860951dfee61d76b4197857f2fbc085e60726d3e38": { "jp": { - "updatedAt": "2025-12-02T22:57:28.742Z" + "updatedAt": "2025-12-04T20:16:57.748Z", + "postProcessHash": "6b1f599cd11f6cf72e928578da9f1a92f900adee557b37fa4ab93d4680d9522c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.733Z" + "updatedAt": "2025-12-04T20:16:57.740Z", + "postProcessHash": "eedeaffd11a002f6757a3b70ccced0fbbedd1a55e84302f608615ebae6d3907b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.740Z" + "updatedAt": "2025-12-04T20:16:57.744Z", + "postProcessHash": "2b3ba7a5437b6cc88ff330026a483142b7821b88af7aea50cff6b8d9611caf4c" } } }, "745a92a844b6497c0310ad16eb03df90d655cde8d7482e58f32d1af9a9c6e68c": { "ed4640fd150472b99b01119068e79ab5dce8af8145d98d8e1f847e482439180c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.674Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "7c9001407975102ced5c1345f3bf64e8ad4c3eeeb1708e714f7029b23b2dfcfa" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.076Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "cd12faa8e5058beb0098f3e5a8fac428ceeff324a2a4e474b83a7a0cad2c94a0" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.076Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "bac63525c759ab6dc482bc680de30a0f5331e2df2ebc6a2a8cf41ccf152b532e" } } }, "7ca5e1494be65fba41fe95ee7a3461cd0844038fb74f44098aa4e3f88607c562": { "ac68f255dfedba5a9d7fc4021983a5c3dfb83430f46eefe29bc3204cdf2720ec": { "jp": { - "updatedAt": "2025-12-02T22:57:28.707Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "525f89d9b72f31c2e2449c1fcefeb50f42c9fa2a939b31601516f8a61431cf3b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.710Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "ec5d427dbeb4afda48d454aaa5a456bb7ceac286527227d9e49847c54882a0cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.712Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "5cf7a054f8284a87e5465e5e839b97d2ba3516788dc21c2b6ddac3d6e6b0aede" } } }, "8bd7dd424981003d655b71b52a223cd72ca57102e28da0b8baca6e8ed3256122": { "8c69f1a1f0d38fc584fc63dfbf0111f2d94d9ce8ad28c47314863119988ad693": { "jp": { - "updatedAt": "2025-12-02T22:57:45.063Z" + "updatedAt": "2025-12-04T20:16:57.708Z", + "postProcessHash": "8fc071f831870c447b6203ed57bf80545f512603df190f406adc9c590efce4cb" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.688Z" + "updatedAt": "2025-12-04T20:16:57.736Z", + "postProcessHash": "546915db3fdd023e342cf7eccd8537022a2fd3ee6198545e55b97170fec610f0" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.693Z" + "updatedAt": "2025-12-04T20:16:57.747Z", + "postProcessHash": "e897cb37d1a6f7ea6978ddc3355666bc02d184b4eaf24a51611497e981926695" } } }, @@ -4543,169 +5550,208 @@ }, "32f79342fda1521b32c3cbd5194d1c9682d16a53ade8cb05571f8a298e7705d3": { "zh": { - "updatedAt": "2025-12-02T22:57:28.698Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "43165b0e2305143d9d03f96a1bb7aecd68bde56b2442a506345193fb42398568" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.703Z" + "updatedAt": "2025-12-04T20:16:57.758Z", + "postProcessHash": "2c5df05e4fb1a57ccc9da9cb753b4bf655a0b850eefa7399e94aa65ad10bc0ee" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.706Z" + "updatedAt": "2025-12-04T20:16:57.756Z", + "postProcessHash": "d66a4f670d1a8b2b0b80904427db2c20eae3ba718b11dd09f2a6219975e589fe" } } }, "9b90a0dfa5536d6059d87dc8f5e817097c8b7bb53db517bff51a83c3e4c282ee": { "3e080983011ca5e98fc432fd4170067d4807f3aaa1e1114b8ec36d58af28fa38": { "jp": { - "updatedAt": "2025-12-02T22:57:28.709Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "7a2d10b335f63f31da6436e0466a182ff1f46d5d9a10152f358f31a78c990b59" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.710Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "954476987ebe6486ae39e25a4411783b3726e7b0185ae8529a7bfcb76b036efd" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.709Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "0f1c58beb60a440c3ed1de4fe1ee386ad4391dd0cdb2b7bbd6dffd0a6535052b" } } }, "9fe1ae047d397e67707e74c6e97afdec367a2fb4cf27a1ade86e3e2bebd7c4a1": { "9bf44240bd8b0398201f8cc05ed363d4bfa70d08d267473203007c092efe5287": { "jp": { - "updatedAt": "2025-12-02T22:57:45.060Z" + "updatedAt": "2025-12-04T20:16:57.704Z", + "postProcessHash": "d6e3d358e68f9920cfb99ae8d7afb6feaa9e15602ab9297349c53e7750673e6c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.058Z" + "updatedAt": "2025-12-04T20:16:57.699Z", + "postProcessHash": "b2ff77590897e0aea5d563943b9f75af20a4c11e6ab8f67aaefb6628c4a29507" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.717Z" + "updatedAt": "2025-12-04T20:16:57.729Z", + "postProcessHash": "6807687624100356fc61f4e68174487eaa755d701791f8f35cae36212a08c629" } } }, "b12e51a32d37bb2fb10917322e67f8c70cee8f595c143cd1b629cbf918f6b7b1": { "5014ad055f5a140206335e375c472557e174c690fe089774a9aa8c6d57d28567": { "jp": { - "updatedAt": "2025-12-02T22:57:45.029Z" + "updatedAt": "2025-12-04T20:16:57.706Z", + "postProcessHash": "579839d5cd4438a2180c05dbf9f1af6398402e150dee85b91892a796f60245d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.679Z" + "updatedAt": "2025-12-04T20:16:57.733Z", + "postProcessHash": "214d0a0e8fbec74074dc7e0a10b2fc88b41f8b4addf44a1ddb1126064707ba02" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.690Z" + "updatedAt": "2025-12-04T20:16:57.742Z", + "postProcessHash": "8e21fda480ccde5dbcc93c1044a819b0147d798e8e60097d9b1f51da75766d10" } } }, "bb0a1d7136d43d7b6bb4fa9a54f344ca0e81896a5eaf9cc6ef57b8c3aa682779": { "399cd03c18db8759846f978c253d288ef4caab87adb1838ee5aed970412744bb": { "jp": { - "updatedAt": "2025-12-02T22:57:28.709Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "1169f4ff5cdcb958e87e03dcc679ed282348f080a1e996b389c1eb55594d031f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.712Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "91b3864ebfb901b02158f7f6bfb0dce51fca72935a0a5356ff418eaba28612b9" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.715Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "82fdcc1ddd7ab988467dc457dd3c7f76bea931c2dc375cb86e55eb1b92606ffc" } } }, "c9bb01545754a986ab5cb4d637d8745f995e8c5243183cf90e72563584cc924f": { "efe17e7594347ac3238decf2b1daf336a87a883f6d30bf4a916bc5ae75b80dc6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.077Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "942b77d5cdcbb48a1962cef607bb00aad19bab18c13fad40a65e49052a7a6425" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.074Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "a365d9d0c4ef42abd2226a87b858f2f2226373eb037530a6a152e0390e489587" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.079Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "6c1452b831fc4f08167519de9ee7a195a4bcca7cab15d3befbac87bca0ad87e1" } } }, "e814a9ccad02d86ef7c915fb695045529731c882788157b39795b3d624875c39": { "e078c263c4a0f84949c189cd1b90be6b54b0117004a43d0171ca1e7dbbab8fa6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.673Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "f1463a2521bcc9d0cf8811cc498e91a33ce3236444436bf0af76585cce09b93b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.674Z" + "updatedAt": "2025-12-04T20:16:57.714Z", + "postProcessHash": "b6daa627bf73308af87e6a5a14c9a4061ef04187da792b2ac1efd8bc2b1d6077" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.078Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "1a3c532363f7cde26299d5ebaf455caa84ca603dded3e9e08e4179f766989811" } } }, "f4614a808acf58ed3907dbc80a1d159bc107dde839623cbee9705514996e1fc7": { "ad253066ead1dba2ae292160fbbd6c6d76963231fdc98e27296a51ffab627b05": { "jp": { - "updatedAt": "2025-12-02T22:57:45.077Z" + "updatedAt": "2025-12-04T20:16:57.713Z", + "postProcessHash": "a53d4fbcc3b815037ae072a6ed4e5561f75416003951ab8e784817a6b31d3d57" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.074Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "f3686ac5c4c89bb95381d8fe75f283f0b666ca9f5ae03f35419e74c248e6e81c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.075Z" + "updatedAt": "2025-12-04T20:16:57.712Z", + "postProcessHash": "48adb87b68132b026afda0d2b4932b98a40461a913d16e32e3afd2124a3121a9" } } }, "fb63ffa66c1f033c420fc44f068aac3712f16d06effcb9b76446564d05a24f47": { "1f15e6976c3b57e0c74fc54faa9674d3ad5afb9a87efa0a5368573923ad33611": { "jp": { - "updatedAt": "2025-12-02T22:57:28.714Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "4971fe48c27713c0e4f17ebb2c37911d17d6d7dccf3286db8ebef3d06c992bed" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.711Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "c44367768829bb039d676193b3daabd89874f2c7ffdad0c3cad90460ba5e34bc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.714Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "92cae3fab6f8fc7680c0eca36bf3bbbc658ba8bd8824fb71f57eaf182a2c3ad9" } } }, "168f630aa6a5caf3759f0040f89a49b660bf997cba3f9bb07f93ceae7eaaf55a": { "3b9ccf775a7eb6ed27d87bbe61d94bd4d43913c00f26010a4b8706daf4a6a956": { "jp": { - "updatedAt": "2025-12-02T22:57:28.708Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "f55625885db810ff25d8e67c86f59a168f03c086bceffd2ff346efe3c274f50a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.063Z" + "updatedAt": "2025-12-04T20:16:57.708Z", + "postProcessHash": "24b14bd0a0e6cf075514013fcf46631c8c10e98e9063df19da933510002e0e37" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.711Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "986278938d56d16b8aa1932a58cb4e70921ef0d657a0e2f6ff5e0fb2ec7214e5" } } }, "23e27d61b6423512c41b26e6c3b22921e93b5b301057fe1f453a2c0d3c1d15fa": { "7a7f792ff342a20689f60f0f265171128a171dee6f6e5a078ebb83a2cdf6ed03": { "jp": { - "updatedAt": "2025-12-02T22:57:28.752Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "e84c38d402c6c425faa56ead5613ddb9a256cce9064a8bc9fededc004337a1c5" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.752Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "fdb17d682f1962bd7e0384b6efa6254036a7528a6083797281e28c6c09e993a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.751Z" + "updatedAt": "2025-12-04T20:16:57.773Z", + "postProcessHash": "3f58bfe16f4d0c62aa900ae6ed08413adc4cfb6578317affdd6e3cdbd08e11b3" } } }, "296596880e307a8996c1b3d2f22b414f048332caf4d2083980ef5b77a8a5fdba": { "8891345d058983824a4006d332ff1e3d458871da85894bef04abd4b4a563fce5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.091Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "4d7d1537e85fd65498526329315f0a0ad52722a6017a0f1cd3ab8c687de48ff2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.090Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "eccb713fef59ca6d42473fea02a0edbb15930dc08d935cda2925286242fd9b67" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.091Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "33ed2606ee8c2f54ce72e71a3ba5890d79e79b45fc1254790063d9b8e2423ddc" } } }, "3147fdc69c8941ecf865e47d9d8db4472067857ced28a4db9f1732ab44a9e898": { "89c5c15673bafb792cce9d30449c0c07581ad4fc443060edb182f1287d36112c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.719Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "53055f95a0e726b2aa61d1f11bd9d6db9204068e483890c1fd56f6b98207986c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.064Z" + "updatedAt": "2025-12-04T20:16:57.708Z", + "postProcessHash": "1bff8b81e2cfe0013dddfba5728d6bc99cfabcbfae37048b7b9cd791acb3bf8f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.713Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "fa39ea06d2762387a6b6b9d6a196b2e286ac3c0a30c8d2369733808e7378b717" } } }, @@ -4723,52 +5769,64 @@ }, "5db8938ef552c8ae84a16d6794b1f42e0311f9de424256966572b9563b1ef3cc": { "zh": { - "updatedAt": "2025-12-02T22:57:28.693Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "637dc4d08d7301012a9378bff8632e8ae44e2da2042c9ded22f59ee94d175eed" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.693Z" + "updatedAt": "2025-12-04T20:16:57.773Z", + "postProcessHash": "00f8bbec065c2b1a93e29dc9bb900b48cbd89b43b788fe644b01c8229c7129e3" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.694Z" + "updatedAt": "2025-12-04T20:16:57.773Z", + "postProcessHash": "483478a9518b2ded58f2a8cce0fd779cd9da40fc3ddecd8e02ab999e47495072" } } }, "4088be7256afa16e0829b465fbe851d2600d3bbb21c2610210c4075f713ee668": { "5263f7887931f9fbf63f2e9b15b7ccdd2c7157a7fd13cb67ba7bb5a4724f5c9f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.087Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "efd6669ce81cf16cb972400b3312d74bd68921265d6fa38eea04516f95cc059f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.085Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "d3efb7c9f4b81afced913e57bec6c93a3291d3ef6eeb30e1785ed170d83c57cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.086Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "b0b2f3b1468df01a7b6593f89aa2db47b669a1f4b00c44980e8c918002dd4b97" } } }, "434c8c6575a1c96da70aa7b25b8f2386d3813854c5fc71c4731982bf93c5b551": { "33868413cbf230f1914b6622c0fa2f639a7ea45c3142a4368aa173e8a03fc411": { "jp": { - "updatedAt": "2025-12-02T22:57:45.086Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "9f18250dac4a95b0b4598ef83c15f0caee43068a8ff02a8c9ee3cc3da5b714d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.089Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "e848c56ac1efbd4ca32666091d39b46b7f4860c4a4cf048e4457f83b661d0fba" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.090Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "43f66fc674f5d1a1b01087b3dbedc92153888572310d40f8c15d8ea23f64b22e" } } }, "5199e54b28e8b206af31f52654ebdf21657caebae6cfe9e8a655ac120217243a": { "cce5c749f00809c0ebd64bf0b902ba923e07ffe3f6cf94b3e416613a539be455": { "jp": { - "updatedAt": "2025-12-02T22:57:28.750Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "ed9974b325e5df91bf2c35de43340f71e8a546760f61b53ee7cf74c6c21f6e15" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.751Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "ca7568fe00bffa738ba75cba865ae4979dd1180efac0ecdc08f05966aa7ea45b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.751Z" + "updatedAt": "2025-12-04T20:16:57.773Z", + "postProcessHash": "fd5d3a9436d49a764675f59e48355eda5d74581979286de1d3f75d62efa33789" } } }, @@ -4786,130 +5844,160 @@ }, "1eb91aaf2bc2576cd6efb9874122c0f7605ac4248fbdacbded71fbd2fa6cf73b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.093Z" + "updatedAt": "2025-12-04T20:16:57.773Z", + "postProcessHash": "11d8ad4dc4b44cc92dd20b330ab18a43adeb99078d3abc69ff74b4f15b98ed89" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.096Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "8123803c9414d62fcbb7ebec2a77b6c6275bba96ae5453ec43706ff21ecce293" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.097Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "7995fbdae8db58a5e8234ada2ad833403e3639be53eee52fb8fc3b73f3744d62" } } }, "7e14895b92e515a566c49df3e172fa8ef0794a3911f227fc4a71c6dba5f490d7": { "99b76fc928beec586c17a5cc43f58eacac997ef5729cc011bbfca37d37c70a79": { "jp": { - "updatedAt": "2025-12-02T22:57:28.708Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "63da98a9eeb5851d127538ef59ed7542b1b0cca28b2b3d5de4dfe4274fe2c7dd" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.707Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "67c8a0a8c9ec592c1ef47d654d418bf00842779e6f2e49d11dfe4fbed22938fe" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.708Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "9ccd5aa3156902f635306922a059f18e804327d40a0ee88e0a66ed6fe82b3b7a" } } }, "818f1c114e04624a9ce346e723231683afc9efb77f488e698cfae3f76123798c": { "7802fce1dd531f1d9274394e1014f26f608015405f1fca427d28159a91303ceb": { "jp": { - "updatedAt": "2025-12-02T22:57:45.082Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "314e76c3e76e5cacfa2c47795ead57344aab199343600fbbfed5c98ae997faeb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.086Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "a46aea925c8aa3c2954ea5771b744811d5adff38c432f1f5c8a4c88ad528a3c8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.745Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "a38c04f6b4ebde36ced2e3255381ce7e02bafa6affe24a29aa8688f9e7eddec8" } } }, "89be4ef20c9e5fe95e7e9565ff5aa903eef3eacf9ef5bbff1fa371c4ce7dca62": { "a6c4756c4f81974e9497aa328cf4f067d2e218a364817e6b3353285d9d897dbf": { "ru": { - "updatedAt": "2025-12-02T22:57:45.084Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "830d8110363df93dd710c28c0cf9af48e051141c818e171f42741f104c20e21a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.088Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "72ef369911e700b098d6ddea992e3115676ca2988a8ecd028bf93fe364a4e82b" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.083Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "4a1c82afe39eb440e3df274d6c3952d494de5ea09541b3ccaf686ea2581753d2" } } }, "92e7d4855f47bd7172f2143c5bf24c013dcd99fd681ef3d53e75a588365ef40f": { "4aba2abdc8ba16a13f0e130fc8a1c260887158a147901de0d5f87498741d53f4": { "jp": { - "updatedAt": "2025-12-02T22:57:45.087Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "176a4e84c63da6afbf21f97fc63746b71d600d981b7de07b422d63c053307750" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.089Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "eccf21c1d86f089fe6c6fa7f19907aa7fd8f6973b249141c515952045af8d10a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.088Z" + "updatedAt": "2025-12-04T20:16:57.763Z", + "postProcessHash": "c448e145128557ff7413d26f1d4ad758da772985638d3dee80f50ffcc5c7793c" } } }, "b82ca3ae71e7ca0bff5a8a1a958e415791b51606240790fabac0a24e99e5a8e5": { "4ed62ba9027cfba50a02993f949860b2fbf583b0d2272c93d49202621bd1c2b9": { "jp": { - "updatedAt": "2025-12-02T22:57:45.089Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "7494cf2868ca0bd6fe8e5a2bbf39695b30da17a41f04289b24642e959df5a2d4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.747Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "f7911af939a3555784c78a6e4740cf749d90963ebdda1713c10dcea34fe6201d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.090Z" + "updatedAt": "2025-12-04T20:16:57.764Z", + "postProcessHash": "664f7d7880628c9a67c211d3e4e7d579786c9e2be498e43cdb130e439ab1bbeb" } } }, "bfa678e1e376ec015ac9221d88b1803ce7811869f141b22241c78abacbd547fe": { "8a6e9f00b55f3b0576f01c6ef20c5163ebaa186d9ca2ba3a241ee00d1040de72": { "jp": { - "updatedAt": "2025-12-02T22:57:28.720Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "0e7389ac59c195cd404acf24ef5a8a4b2752ce520dd526056e782e71b293b34b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.720Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "32365a072051e80c4812d403f0e04b9b32d6ccc22e17db81a2e3482d232b7639" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.722Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "b6f9739211048690f28ae0326c65dcc8d2501061ee68b8ead85c2e6d1cda8ac5" } } }, "c0113692c1b839bd1d0b553b8a746fd8e901fea18a0365874a7d57b5c47410d1": { "fba4fb769bf604e65c2e862ea128a3429d4692c33e0b8ca43bea57e16c6781c6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.695Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "0df0401fa181f6afcb825f74f1c26dcbd74f83b3dc02c48442c81695a41a575f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.696Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "f57432b62ce4f74269795b4abfad56deccc8bbdf639be2e59dcb756cedb3fcc9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.071Z" + "updatedAt": "2025-12-04T20:16:57.710Z", + "postProcessHash": "296792f900c37a72bdb30cebb9b088b5151c013476ed49537b54273a784604ce" } } }, "c2fb7179016e62efedb581c777d5b3e618da9208a31a2d9a164ea0308a1143c8": { "795fa89dca9c3b26ee3aeaa8be7c8410b0abd1d329f364f1777a29c3bf6ae7de": { "jp": { - "updatedAt": "2025-12-02T22:57:28.721Z" + "updatedAt": "2025-12-04T20:16:57.731Z", + "postProcessHash": "58e1bc9663390143be137836371fc8e0c112aef769b2f0221574a0c3e31f138a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.719Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "34596e88c96336374ceec16557a905a7f42d56d61dca8b29c4821c31ac081429" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.719Z" + "updatedAt": "2025-12-04T20:16:57.730Z", + "postProcessHash": "530c202bad522f80d735bd60e3f9cbd73bb1fb1a5354bd53af94c7ecb304b1fb" } } }, "d853a1e0bc487c020a87d920028e6165d0cb3cc395e7fffd09047dee78720588": { "adec2ea632fca207a13f7608230126d9fa9e97108c03848018e30859a7144104": { "jp": { - "updatedAt": "2025-12-02T22:57:28.711Z" + "updatedAt": "2025-12-04T20:16:57.727Z", + "postProcessHash": "2776f6b854f6dd8092ff41a8d1c9a9f4fd65a9a1b9bd7b29abfc8ed8c232a8df" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.710Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "b1f3b2d8a196bb3690e13d3843097b3a9483874c47a73e00583efc5009679bb9" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.714Z" + "updatedAt": "2025-12-04T20:16:57.728Z", + "postProcessHash": "bfac1f60c2f0563b91b7e8fca47f55d047ea174b46652a1491b33cbf80162b37" } } }, @@ -4927,26 +6015,32 @@ }, "2e9fd2d3c490bc28c36a5d0ec21cb93e844dfdd34f2eb187c7f84f44c2e7cfbe": { "ru": { - "updatedAt": "2025-12-02T22:57:28.753Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "9ed534ba4eb3bf9ac332f25994d4c539afe66d30e2df432bcc554f84aa735e11" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.080Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "258b0979315293ad7f19d9c708a86d326969f505de98a96ebef6b2213ff67633" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.080Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "2be4eb38cb42cbb50c742ac3df2abc4ee2a8e97c9056b06e1e8c4aab88d0ef2a" } } }, "ed8b9299efe36240a44450f98875fb20ad838d0c4a97a4d6b56749d87a6c69aa": { "64421077253a2367928552f8ecfca1500ab1a3aa6470e26d805f6aae81b107b2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.746Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "430adc72e4482e0a77929fea628937f249f6ab5585a6e328198f3a61731ef996" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.081Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "1853e4481c6d5b8c840c9da1bb0e5b242f034824d6b6cc8ea574031d04cf7b3d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.748Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "7ae40617119c7a2a069089889358cd6b14074fb9c482141fd5cf7a31e29434b6" } } }, @@ -4964,52 +6058,64 @@ }, "66279b676a09e82d63bf92aedd7dd90d5d48d13d70786aa3d162976e96a2bf21": { "ru": { - "updatedAt": "2025-12-02T22:57:28.694Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "a716ae47586ae04fee13d615fa31ccc85bea240cb6bd7a5eba8faffd1e9797e4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.695Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "ae967363a40d0d0bae739efd4e74a401c21947e1d75813899f3ca2765335b68f" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.695Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "7ad81b640660878b8583d183c4f46dbbd7c0afc910aecc49e24550cab4ad76b1" } } }, "072403da5aaa82666ec2ac55aba2660e421b6711c6cb70c42b4eb6038b58554a": { "aa38bbbb12b1ed94ca667358f90437e09046357f71a6d1e0f8a508d57a4b5568": { "jp": { - "updatedAt": "2025-12-02T22:57:45.103Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "6efc374a7a7ee2b4f19d1ea4c26f944732777c8e76c062fd13185bf2c4cd2e5d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.105Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "88c3068751766bce370033fbe160668b8f0cdbc7f8547c840b31261e3961b3ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.084Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "08d8b574fbbea1f629a024e0c36f6826462859b57a4f74bf6492a7a234ac3bd6" } } }, "242c81539a7d39347e31852ff01c14ca7481b428f62ec2a9a8ef8923e319fd70": { "ff718abf7b9337cb72f9728d2ee59f8366fc732135cec35be718b34d911ff036": { "jp": { - "updatedAt": "2025-12-02T22:57:28.780Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "f5c4fefb3a92417306cf2838611df0be166b2fd73ce3f64891868c22694a4af9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.145Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "575d484e36e64b500016548ec18db0a2b6aaf082ed913f64ea5a917b2f5eae1b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.782Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "455e257c1e8dd6bb0e6f9c94fc33c55a966384a641367936988c1473d768c54a" } } }, "2ca1f06020b55585ef361cf2b43c9aa9e23ed32e9d0a80f58141cb6b357e2508": { "e8f70f164f2c79a05e20f2ea7598ea71abec4dd9a196fd990cb3b9f5f5250252": { "jp": { - "updatedAt": "2025-12-02T22:57:45.105Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "9c046930bcdb3eb58b3c68abe4ebe3f87f515eb645ca504c504b1ccf98563141" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.102Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "cc1ff01dbd89f5dc8a0bbe3f96c37b52e1d23486fdbdcaaf925c242159f855d2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.102Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "5dfb84272f507920b5200a07873956c67182107c9c67adff79c1025ae991e091" } } }, @@ -5027,117 +6133,144 @@ }, "6e3e04cc7119c0602d04810abb60bd15340766476b6dd90c89c802891040b74f": { "ru": { - "updatedAt": "2025-12-02T22:57:45.096Z" + "updatedAt": "2025-12-04T20:16:57.766Z", + "postProcessHash": "6f582c607f5e3e6c3b253b0a49c3c865bf577f2ebfab4fddf3df621e1cdf9c30" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.096Z" + "updatedAt": "2025-12-04T20:16:57.766Z", + "postProcessHash": "d08b5c0cbb0d64af9ba6ace71d113321dc9f9f935ae6d33f21b121a65c52cd49" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.094Z" + "updatedAt": "2025-12-04T20:16:57.765Z", + "postProcessHash": "e76b19cd589d7eaea35fab6ebb0e931a2b858d9bd5c1b1ab20ddc8fae160752a" } } }, "516b68aad0c74f76c3c12fe30d1f7258569a0b66643da4924fd24d791f072074": { "55acd998caff6e952b47ceb372ae02d24533c50e2c2a2d341e32d84c2b4a01b1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.777Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "fa1e89ee79f093c28e5710c61c24c3fe986a8e6163e282558d99e7a49cda4770" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.784Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "13b12ff37d0b28766eecf1e934c1bc3d1faeab6131a64272dd3791ab6b40f10c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.773Z" + "updatedAt": "2025-12-04T20:16:57.792Z", + "postProcessHash": "ad2fa5cf65dd28932c96d23d588d084588e3a7615bad4d73088e7e524acdfe86" } } }, "52d9303d908cc54e70744ee1b1e2e09e4cf8cb226c9925cebd341f9cac387001": { "71eaa12db00dcad81d12c60186209b5ab377404a70d4a18ee7d26b6ece5ff741": { "jp": { - "updatedAt": "2025-12-02T22:57:28.788Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "46bf7aa9cc03453e2ea4e249b150bd73d4492d803ae3feec3da81f7c17ddff33" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.781Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "4aa644cd56806028a1ce4b55234106b470d76f7fefb2264d4df53b209bb2b919" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.787Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "85fa7d7b4e4bf105d56081b2287c12ec43dfeb8660cb59e6b674391eae101369" } } }, "576d725e93d552fa1d974e38954e0acf96bd1d7bdb7ce394aea881a846161589": { "5d83a7ec0232591623da4893b116014b1e37aa25bdbbedda273544d85805f34d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.092Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "f6c255a09fad65d52434d6a0d67748812d83c4c0278e6f74fde23d82b9dd9875" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.092Z" + "updatedAt": "2025-12-04T20:16:57.765Z", + "postProcessHash": "f6e8197e65e1ae7b3271d9b92b860e2e7f3cb2134283cb00aeeccbbabb39e87b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.092Z" + "updatedAt": "2025-12-04T20:16:57.765Z", + "postProcessHash": "9ec426ad1ba9e635f5c52ee3eac06dc87f747cb492de1e42e04f959fbe3b3eef" } } }, "59eee6beba7ef7f4b2b1ab657b188c2ad938982b20b45febf1c21c4c7b23d916": { "379215258832c5b1b0beefd2f0012d327e4907cdb0e2564650bdb42214e2e265": { "jp": { - "updatedAt": "2025-12-02T22:57:28.776Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "4629db550ef0332b1f95eb14ed6df1587316cf43fe6447308ed2ae3a6fa285bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.786Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "b8259b37a4380cb569be22929d39880fb53372f4ea6c3bf7496757672285e557" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.784Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "e059281585a2d761113c8af4d6f0cadf7a8784203fe8b6b2dd6dc080d1a4f435" } } }, "671c3c038f46cc2a350b67ff548f3064f3440f0912e1cada9cdbe60cb9c2971b": { "35a6b4b0da582ffce53ec6d62ecfa840b3fd54894bd3063441a0fb637cfcebb0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.080Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "9c7684a372ef9d36f9de20b17407405e2b6d6d8059d3010feb6ada3ebcf17d23" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.083Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "f19aae74ee22a58fb368b4eb8450470ba4a0338914c79f7a4175de54eaeeca5b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.082Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "8a6974dd1361db06762535627d14c703b89035b5276dc6b11b84a6a2dd6a889c" } } }, "6baf2f9dc91a3aafdaf603aa070b6d303e0ca43f60c45975bd126c795f51bf6c": { "21159c4739b98c5874cd3f6e95850d863ba6be6c3a8978b327a9bef2d0bbda5b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.775Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "5e210530a3890b2360d0f33798f6c0e6ddaf389848bc0f76f0faaae6bf21f81c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.145Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "144489e400516735f7f643e48c2b1df82f1943958c364d596aa7449beac78aaa" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.777Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "f62345fa5033815ece4f56f0820886f5222c3b62911fa1cfdc14e3d6c83618be" } } }, "85b69398b5611cad0ed97b26cf9ee7ab54989a0ec7615bc3aaabc2e0ae3c33ba": { "3069fe2c05efa1690a8fd9f6e9519528b8d09fe75d6fe914e613400f223a3e0c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.781Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "de16ab6e2f0b3e205e3794008af5450b551ae9eb8df24ba5d67b5c86240bfe93" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.772Z" + "updatedAt": "2025-12-04T20:16:57.792Z", + "postProcessHash": "f8b5e93b88fe40262a1abaf7a1e1f738787ecea40c64acb4a34ef76496888848" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.782Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "4bf86d36e9bf3cb46e4b3c142145a051773d06e12cba5067b14a3e469adb224d" } } }, "8d4d7b2200cef950ad1bc09f8c982ee5def76cb7de03b7265ce2ab6d4a48fc07": { "782ddff0f1b9ecab869f6fba2c946f9fc98a65b12620a1eeeb09e7adfbdef623": { "jp": { - "updatedAt": "2025-12-02T22:57:45.081Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "b6e3b6d51ba16da15146614051dec9022ce0aaa430f48016aab45a2da14e069f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.085Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "b006a5b8a36a0d1775cc8cd000e999e51b020936d56b2e9f850f9f120dccb83d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.085Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "bd16f7acc549d16bb785d62410c3511f58688dcf153f415b9c7880567ebf4319" } } }, @@ -5155,481 +6288,592 @@ }, "f82d62c4dc19a5c31661c04d7a069bfa0d236fd3870382dd08d9cdbb13e02b93": { "ru": { - "updatedAt": "2025-12-02T22:57:45.095Z" + "updatedAt": "2025-12-04T20:16:57.766Z", + "postProcessHash": "c24328a3a91fd3e5e4c5957e6eb54b12034a8184fd182c3c6a956a4b2dc6f943" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.094Z" + "updatedAt": "2025-12-04T20:16:57.766Z", + "postProcessHash": "c0a127138127dce3f86f43658af8a4b4d9412e84f256c8312042cb1579876287" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.093Z" + "updatedAt": "2025-12-04T20:16:57.765Z", + "postProcessHash": "a45c9d32c2f7c96027583f2d4828a6f41b06edf7b235dd46d4e71b8053e6a994" } } }, "b326d89975e77fc4fe5e09c43f7d7dd72353ad2de4c76604cfa709d60f39cee1": { "41f6f44d6560ff4b7b4a8919ea06169035e1ab5f00669a7875013466734ef23e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.774Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "8609ce1e482a7a9e2c5f261130770a4dc0ee885bbde49b76fca7f5212e9366f4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.148Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "bb56e86935b03bcebe54cce84ca66a87745cb917234e1425b54d1d2e7f04b700" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.771Z" + "updatedAt": "2025-12-04T20:16:57.792Z", + "postProcessHash": "350dbebe3bcf8c4267e10721bc589a52846ca3ec1323f9c3b74ca6a32338988c" } } }, "c0388925c5cbd757f80d526185840b27148d3d9f44442adba2d651d360e9f8f2": { "fe663d93e8ac7ca2bac8f4753fad3eb0d0150631ba2d2c4e3a85eb5fdd27dcf5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.104Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "99c7dc120b57856939c44dab24531054d323df80cfa6002e597d30524596bf15" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.104Z" + "updatedAt": "2025-12-04T20:16:57.762Z", + "postProcessHash": "a7f0c44743066b576425ef08cef1079baa68333ec5c053cbba667d6b34d04ade" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.102Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "ed3d60f9d1678751b9d26b507197a9dfa360709b59aa0e942779d0d6514cc18c" } } }, "c8f7fa8f88301edf51171572623222cac00927836c2b38e0b936dc6808969163": { "0bdde8ad92c2b56e1260138b52e278dda8cd06b984643902593d0d0cd7fb1ef3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.749Z" + "updatedAt": "2025-12-04T20:16:57.789Z", + "postProcessHash": "0f61441a74fc98c8042e6363eddd6587dee1ece680c4d9d6e53fa779a0e3c9d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.103Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "13cf207240e17c6d07e0d85f9129cba11c0320fead216950dfb405721b8eb40b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.103Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "80c833d8b95443723d2990d6fc87edff760c1040b9fe4d43c9de42d60b02384c" } } }, "cafe8a479283375a185399d18cc4d2444fa4afed320fccd79e4b21ccc00756f3": { "9b037a637113b68681c5e24a1691633df3e7e4ab645c3430fdfbded768ba8392": { "jp": { - "updatedAt": "2025-12-02T22:57:28.786Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "d5c91c0c8e927fe7d30d1e0c4588fbf57e0b53161b30a89598e2def5c932860e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.775Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "b2853e06b9718024224ba399502cd1e1c2c0576e3859cee411f765dd8e3a54a5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.146Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "a2e95c01e72f0c0a68efc6549f89df195751b18efbb28da0302e7a01508ef3f9" } } }, "d66c9f0b5bf68d56ccdb239450a2def7e044ee7dbb38a6df724002e0578ee93a": { "b17e684424dd7e3c1032466ae89d5b4b0753b2b11488a3c5480069b467bdfcd1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.083Z" + "updatedAt": "2025-12-04T20:16:57.760Z", + "postProcessHash": "be92473bee0493da18c3c62bccad4fa00f84235089c933c2ae2be778ba7e1225" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.081Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "a451fa05ecb55892e5c9ca94e49886c53d626c99c62d7b0da119bba06a3c5b85" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.084Z" + "updatedAt": "2025-12-04T20:16:57.761Z", + "postProcessHash": "94b2196e63141e50f76f735236063bf0387783a8f55fa282a2bc00194ef40406" } } }, "dfb826f61e2d3065b29aed9473793d5e9482ca0064907298ee886dcc849a2f30": { "095ffff652d364d8d2d207b5c2495c8f89b149222bdc9348bc26c7785dc49095": { "jp": { - "updatedAt": "2025-12-02T22:57:28.780Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "808a18c391b1fc8086a00a797eb0077442969ec840b597e35ccd6ac021cc3467" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.785Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "b2d4b92064264c41b06011776bcda44ede3ef34654b11b0125e701adbd34ea03" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.779Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "6086916e3335c10e607ed6652c0f38cfa7b02967990de9e59b89f93083132cf9" } } }, "f7ee1a75569ad87e689d0ebfbbc64afa80e4517625c27676aefe4a63952637ab": { "62283411a070bd19b48c75ef32990fea3d01df15e6ce74c1ef8474a50f977cdc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.788Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "d3c3273ae9aa730beb390168d1853e6e7b823f8536294b88733be9541b300b53" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.788Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "7b62d589122531f49409966ea78932a183cbba68a17b4fa7ef9c2c3548f8fa84" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.788Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "64230584138634617c4ec8237f10e07703550d3b3135edb0dcecc4fc13a63834" } } }, "fbf74a86f665ee9aea4f220700429c38da217030a00f7a905ec8171cb63a5f49": { "379c9b448d13ae5617010e62fc925030e206c603b76eb2ab7ab83dddade8d46a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.779Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "12798d4740bc2ee11f08161d50d416fea78511ad94e5c2d44891793083780398" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.770Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "6c0432058f1ae8bc36419626b2f02e47347a5292dd4a751efa69db28eddd5820" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.771Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "e3955804863d02d0f6cfaa240645b703f1e3a6922cf09ea661aa202f00516a67" } } }, "1204bfc3bd6e857b87b1b5a9dd1156c45498c5d9e64e68cdce6f8dfe4987ecfd": { "373f45a715a82081f8e2a3779cc63f874936a6ff999e1d2ee5daf6d9f720ace1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.164Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "27ec89edfbb0d60a895de4e165a9625439b2328cafda49ef9858437bbac1b247" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.167Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "0a5819182019f2909c49203a9a679fda272098694a500048064279ef34799636" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.163Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "069f0549d309db113c832004774cfcdf3f0cb52b84c7d598c6f04492c2874ed9" } } }, "24ceb06f47cf82806e35ac32dfe18ca24087b06cffbe5021739286a56c793b1d": { "4ace68b0458a094405f4c0fd1dc60a5ef026a1a8639846623e86fdff84ae8507": { "zh": { - "updatedAt": "2025-12-02T22:57:45.166Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "cf4eadf4274504ddb027b5ce919b33665d3b9ca66efc1063e490ca763943b921" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.162Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "0d5868ddb14d7ff9e73bcff7f5b0e7ae52c65069ec00100ee64a8d54dd5bca5b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.158Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "ff42fa37e91791f69485b088cd2a2348a8cbfd5310c3d47e31b5b00f61d0c59f" } } }, "28e0a4a4c7b2f5abc28938acf55ed73d8388d95376bfa8dd13fdecd6bd439e52": { "7b5571b023d676e2979970ede929e965221ec27898362e89cfb8519c41cf3898": { "jp": { - "updatedAt": "2025-12-02T22:57:28.787Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "94662b4cc992b10485c204ffcdfe1bde2ec58ebf9eca1784d5603f6396293c78" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.786Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "4470d5d63f9bd72f6536ba7d492bade661899d10fd7115b0e7aaf74504f8bc79" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.785Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "887b2c0434d1ad9b8c826e773e6f12732a836ae3591b6f0cf3a27a0c6bc2b577" } } }, "4a932aa16f4947c7ef17e42150e4a316f1ffcde90dd8415e4c6bf929ba835846": { "49a5dd5634212d8130c73ae1cd817b3917e322d14b3c96754d53df3d228cd836": { "jp": { - "updatedAt": "2025-12-02T22:57:28.778Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "c57d4fd3de2ce83762d28b055b4397b88592068f685dde8be30d1ec295e51405" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.778Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "06d7c8a543d5b7cd0770ab4bf5cfd4b374d059842a9044110f4f0c81c77739e2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.783Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "4eeff5e9fe2b7dfc26077d64812cda6635ca0b06dd0c4f0ec16a33e8b4808807" } } }, "4ca74029aba5db691ad0ec824ca43ed7d92a4d5b9aa1573bc7116ad308b92cde": { "f97238d94d5bdc95a6129e0715198e8a6b955a58fbaa7da4e12e9dfa1348f135": { "jp": { - "updatedAt": "2025-12-02T22:57:45.161Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "6db15ccf93c170a68d2d7fb97325c9f02e6d8c38256bf24ea1e1618eddd27bb2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.161Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "fb9d524d8f302d5744f6924e6f70909ecf01999cb9307b95490e63a271bf1849" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.163Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "a9c99ce98b568f56538bcfbc01de595e4720590293a8ea630e12ba0f5c21592c" } } }, "4dec7d00a7f493694d866154c843363d42ed6db4abc5dfbd010fdd90bfcaf67d": { "97c6b3e272815f6b0861c69df01e35d4daeb9dd3a1b81af896dc36740a178f9c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.784Z" + "updatedAt": "2025-12-04T20:16:57.797Z", + "postProcessHash": "0f953e37be295890b936167c16a6f61ab270d65246fd01c8f69c516acf4c6d10" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.769Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "f44cba54c25e585af4a2b0fd2cc07ed4b8606c8299d4a02ac0e4105c1d2414c6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.774Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "2ffc4a18b03d3a7fa614ec7ecab6ec4ba411a7e668df48f9856c8249653f24e0" } } }, "51e35897aeb6c746cdd097c39d7d3d876e62dfc0623f6a3c97974b88226b3a00": { "07eab7fc4983c7ac1da23e4f9c0e0aaefbcbbf2c5cf96b5e1af6a93d9eab9a6e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.164Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "67f9a9ac8336a3232c299cff604e0b2065cff81bb1a7954eac3232bd1f7a0134" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.169Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "40cff747897729697852d9835a5f6a78eb60e1fc50f9072e3b866ae10bfc0097" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.166Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "0e1a4150dd08f690a0e20b9703fca1ee6c6352964858fbb6258c3d5ad9efecb5" } } }, "6faa2072fc3d3a3770d528540726e0fbdb421fa84e62c668a817741883d26440": { "579c8415475bba272d86e61362d88b8f1304de7a7411591652572d7da45590c2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.174Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "dc7a3dabadd92a63a0721665bad5222b4b32a77af72fd6098a361e52519b05b9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.173Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "feedaa4bb1579aa8d22550566c4f5ae896f40fd7894444e136a1752d8f888f90" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.768Z" + "updatedAt": "2025-12-04T20:16:57.790Z", + "postProcessHash": "e0e7a270febbfaf21159b73368294667deb532c86c16b000923a5c6e90fcbb19" } } }, "765183c2f979cd15300174a6cbeab761c53e4a2b979f9c1c628c55c69015ae5b": { "aaedfcb72829b8339998ff9e62eb6e54a69755858854804557b9efc3496e73f9": { "jp": { - "updatedAt": "2025-12-02T22:57:45.169Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "f8dedb56834ea2b98f3b8fd0c76103240a76053b01fc1f47443b4936679bfc37" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.160Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "996a2c3e9c26a1ae8527ff38d66665c0fdfd6943f31983e2781bda9518914f99" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.169Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "c33622690e8cd37e14eabe01b30fb09445ca812293f1cfa1046c8df754012e6e" } } }, "9bd2367031f4ad3ccaa40d2eab23421bb90a176c87631c89d0565908c1c8129d": { "a3d661f00c76cbebde5bfa666feb5af47a4620862c09e2ad2d7ea88d84d8c98d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.773Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "14b01515ba3282ae4daf0953b2a020ceb4ee7b2ee01112b956a19b66ad292744" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.777Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "9ec4e2dd8c0f4624cc3c804e8db8842d6b74cf7935fbae167fea6ef1524fe394" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.772Z" + "updatedAt": "2025-12-04T20:16:57.792Z", + "postProcessHash": "1e23f767f676ff5f4481e2738dd3e81eb5d56e2a62d9d5df1da27042639c7994" } } }, "a61623fa5c7f672b85c730754446bc1a65a53fbfc1fa7eb64e0779690ac3049a": { "e82d7f23954deebeb66e19daaed4363f0e28569d3a42d1de12ffdce2ad3976fb": { "jp": { - "updatedAt": "2025-12-02T22:57:45.172Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "d554d6aa39310155aec0aec70df863857016b5bcc8ac58c2b099ebb0cccf7525" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.170Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "ccad1ec6d1f3bb4238a1426b34c1b780295ba8faf1543d5719747cb98bb0b223" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.166Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "1e3934ef3bda61bc7e47c9e48d3f06d8b7a348383fc21856a20aa3fc03fe0d70" } } }, "b0c4a6145c3f1c781d51adb03f8e4996331d1159cb14cba9c81b851b728253ee": { "d161896a6a88f3dc7f188f95f5ef37b65e50579afa43c7f21b1656e07c5010a7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.168Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "61326e5ebb9299ed045fb3fc171c1f992e7163405029b7248bfc060dfe03133f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.170Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "185624aed3489bc61c9491585b4bd1d500bd71cf89d5482d76217f69b1581bd8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.172Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "5e808829a64a5bcb551d24b513cfaf49db170e10bc8f163be9e44a20714de97e" } } }, "b6071010708dd9f91932364b3060488201176aeb008d6ba6dceaee25a82a0a2d": { "2007a45c3bc14f5333a4866ed3de37e1c4ce663c0e2b1fd31fbf2030fed127e0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.170Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "8e735a33e9cdd78760897cdefbb03fa17d2710cc9616d3d1176ae3a0f4bc20e2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.168Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "8e6be21e098b427436efaeeae4e4d54974cc94c20cf0d730b32e7c9f0a8b1486" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.166Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "0ceb18478272458fd1047439c98e2942f67ec98a45e48c5b8fd64f1ca461e134" } } }, "bf4425dd6cb86116b19753768a5420c28985c1fcb442ecd1b5e1d37e6ca2f98f": { "e1eae6052323b0cc1ddca82febd2af06bef603d4809bc06fe09b3e2b0880ed2e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.782Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "bfcb15514a3ff17f895f6545ca7c317e52d0cd2fbe60c47ba5cbe3f1524eae57" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.781Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "44a0d94b92cb2ed3e8d309f03ee4c73d4e4dbad12f1b0ad8805ee7df534cb38e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.773Z" + "updatedAt": "2025-12-04T20:16:57.793Z", + "postProcessHash": "86838d77a3e717ad7566ddba5260ff9102bd38bd43ccfc4a751ddb01ee81739a" } } }, "cdab7bf0d8c24f10d2d5dc375305c22f728e0f36fa1e29fdd04c99781fbc6cd5": { "083150d2c3def0d0736d5dbb6a695b7ea5c691ce94fcb5f5e84487727895f4ff": { "jp": { - "updatedAt": "2025-12-02T22:57:28.770Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "ceae7246c2187ed1be1f7ceb6706e90deca51df4b079b0cbd0f92ea96e06f63b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.779Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "5a19d6e38de7414aafc8f826fa62a9616a328df87c1bdc1d15698b3adb504020" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.781Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "25f6a3b1f2d104c61dddf49b4aef6a8a9f13f1343a563c0750648082d2c930f5" } } }, "e93967fcdbac2bba7b89b4164ea987452cd09d1070238a59a238036fd94e8618": { "94a465a749cb716926a6ad2a66382c7591719aa2f9d792d5910f48efdc1e20e5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.155Z" + "updatedAt": "2025-12-04T20:16:57.790Z", + "postProcessHash": "a415ecb0d90f5d45912eea53a70d10089614f790c5198ba3b83afabf543db6c1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.776Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "93f603fb205943e6ff8322e06981bc8d513c052bee3d63a77615d7f6ee853f41" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.772Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "03b027335a5c298b34474a73c7979fb6e050c9939dbf17b293062c4fb25f4982" } } }, "f0e219e3fb45c878fc0c3bc00fdeef1c5dd9c6ab75d1a093efffa9a0a6f002d6": { "f70bbeacf6050f44afacc1a4872c5eb1d3c4e9df491f0c452fdbd869057adb57": { "jp": { - "updatedAt": "2025-12-02T22:57:28.783Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "b49c00bb658d6d8a5ceb45c70a0968288448ffda4b6c9852e3c5fa01182ec5a7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.776Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "1c47320ac4f8b4bad86e679725757cc62d0cb4224c96f4073ea3ca3ea40f1044" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.776Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "193f5623396d261e09af713cb45cc7cf2ac6d58d7b1502844f09a484119639ff" } } }, "f39b12efbc001a35d87891fb78f7cc37fe27f3e15abe1f7329d97a2afc1e55dc": { "abf20812398c31c2895cbc7f3902a957857e45b0abdb831d7765f7268fac0928": { "jp": { - "updatedAt": "2025-12-02T22:57:45.162Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "eab9c6910ce0be64bfc7df81ad4b5e64c16e1bb9790c4ec0a02508e38a630e68" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.165Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "24aac98e1dd31fdb358f6bdb97f40ebad5864d418099f31a5d82318727a49cfd" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.160Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "75fbdd7dc724a4be4f1dda91f3e1200a31e9daf9bfd2422d4a0b2b60f5e3b76f" } } }, "f44395a43048118c7fe3d4525c225cb5397a7fe3c98ed8d8b8fcfa08e86d5620": { "9d5c04c8e9de527ab629ee91b9ebf0d572f7863c4f88f5651c671a5fff9df8fe": { "jp": { - "updatedAt": "2025-12-02T22:57:45.168Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "783d7c7b38688607623f7bb7e245d85b1e30199f891f937bf2c4542941ce0a07" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.165Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "8af3b533ee89648e3e85b141b02e9fd3374f206db0d862b6a9decc31d01a7bb3" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.156Z" + "updatedAt": "2025-12-04T20:16:57.791Z", + "postProcessHash": "566eae6386254cf5527bcc35904b216cf2cbaa4b852fb5b5715278925a1679ff" } } }, "f646fb33e6fccf32e79c0ff130a3e33907e8822e1555b98aa42e7679988ce2ef": { "9c48604413e046bab5cde9bba416d6f9bcc6a7ded493b091e329a27c18ad8b0a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.159Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "9f064dff2af6148c5e475eccfbabc4fb0649293553c9ea7f7af79b8f846799bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.165Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "9363482dcabaf3e2d5ab55237f5834c87c3b866577927091090ace393ad3e2cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.160Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "fa07674eb334a11efbce76dea7f4efadda21b5493369a220c4feaeac6526c757" } } }, "fb8e6138536700f07eca78b5f157d45b6036f77f52782711c91ba183897b4c9a": { "85d1f9adecaf2dd9004cd1e79d1ecdd61c68f65285973b86e6e2ba31e2eadf2f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.778Z" + "updatedAt": "2025-12-04T20:16:57.794Z", + "postProcessHash": "3d49f7c879bba344b9910bd797d0d8abd81da97bf1f33950cec906b11543c4b5" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.780Z" + "updatedAt": "2025-12-04T20:16:57.795Z", + "postProcessHash": "0dfbf9c1c588ff4886c27ce6455b099b6660567d13f570dba78d6c7b885d5d50" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.783Z" + "updatedAt": "2025-12-04T20:16:57.796Z", + "postProcessHash": "1184aba183ab0a693b4bd8f72ded7b727a9316a2c85cf2be1f3a25e0436ca582" } } }, "fd9477b10ed7d16ef2358b8d1e49ae2377cc94b7a2aa1d03cbf8e6ee55954611": { "36f5cb32c3341f1b52d0987870b8e971b48d9b4ccb72422d895a8e8de42aa565": { "jp": { - "updatedAt": "2025-12-02T22:57:45.144Z" + "updatedAt": "2025-12-04T20:16:57.789Z", + "postProcessHash": "fe58283bf606f6a70bdf020c22dab04955e5d73a8277923d3926a20b60f3f91d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.771Z" + "updatedAt": "2025-12-04T20:16:57.791Z", + "postProcessHash": "e408dc3f6f959f05b321cf04e158e29bb05da7d1e968a67a4d5e8bd2872dcb1f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.145Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "d7b30c48e6ed6a6aead9a4dac7a1d910419e72f7d950b2a65b35d8820f9ac430" } } }, "0a48452290eff051d5083217a35dc08044c6070e028523f7cac12162494649d9": { "007d16df56ba8d29945ba611a8ebd52c915dfd07a9959101cb56729201855baa": { "jp": { - "updatedAt": "2025-12-02T22:57:45.174Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "335c820f4c34b70622e1abced18586cc5609eca03e4b761a02a3445ab9cccf35" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.768Z" + "updatedAt": "2025-12-04T20:16:57.790Z", + "postProcessHash": "684353425b1ed54452dbee3826f0df5f4567f73d29d27e1700f642b077fe310e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.169Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "f34fb0dc1133201699fcaa00bac781d11f523d1f6aeb53e85d359b10ab34ee59" } } }, "1166fa0e4a8285a06447a2c810faea5954a70f41dac027f9312ad41f58d7980c": { "b55b582f39fbb6b1a9a83d90ec6685c4218c3e70536c2a445ad09c9e3380e0e1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.077Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "6d3bbd48328025b028a39a48687db13fd68b90529e3a3e85e0d7b4b7fdd7c625" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.070Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "a91468dbc48c7e39e651932d3db72e71886a92ede0e542e9763ac9cdbb3590db" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.155Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "b103884a4bd4a1c4dcd6b6099c9012d409f4c287658f57e8f7c82d40e06e3f19" } } }, "1cc5dc60c755c1b33090726793f332fef7bb57174bac81be1fd840360abec0a9": { "0b2d9a2f1a1de345b24bb2aed0e200731bba362c09de9a98ae9041f3e9312321": { "jp": { - "updatedAt": "2025-12-02T22:57:45.159Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "24001167b5ca7506c2657a010e12d3bc391d2a28a56f660bc099abdd8be07a8c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.157Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "32d833d4303e4e7ff711aa7104b65a863598974c37286ce7f1a5615cd8125e15" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.158Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "c5864d2478a54d3b8a96b757c78432726745ddb13afb0cad3922331b51c83b6a" } } }, "1fa73f7fb3f17cb73adf9d2fd3672fb7b1bcea959cdfa4cc1cebebf9783e8493": { "68781891b0d87b8b7fc619dd4fa0e041668116f49851eeb31c8f510173e044b5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.162Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "78b42a032a73bc3bc7e94ea927c1c116e336b52a4e1a2fdf9e7f41581db6fb0b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.163Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "87d0f93e8bc24cd227c518ec9d92ed04bd75e894d3eb89c45b4caf812b75c755" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.161Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "56ef85dd5f0d272c52d44ad5c7ad42d19c0458a9e4e5fb8bf1057789fcbf569c" } } }, "277327bc5d1f24036dfcf5127459029b84745c17df9cdbee699b92b7fa8c244a": { "edea05c97af2e9b00969299f942cd800726b3f980c4ecc738e093ae93dac3c2f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.081Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "711711c7d902a9008aff116db69619dc1176eadc3115f848d7efff90412e46cb" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.075Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "60399b1e8ab19f098014b005bd6ebf0f90d701723aca157cacf744eeaf54b1d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.082Z" + "updatedAt": "2025-12-04T20:16:57.826Z", + "postProcessHash": "537299c200b7179ded1307f1d1a582577ff7a14b03729c061dfed9b7698d4e3a" } } }, "2fa7a8042be873e4594c45fc4aa944580ac9957f07dba893bd079f9bd6831739": { "d53dbb06ce9443dcb0eff1d6d827440cd3f32c6995b1495a014f731eb03474e6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.188Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "ee7fd108b4701e522a247784add8081bcdeae6f363b76a5ce969920076aa921c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.153Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "c7d845789d6e7d12875cba5a2d9d131c1d16c05331a54b4d29c51efcf2ad9214" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.071Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "19c2cf64b483619b4bda64d0922db871ae04c9ca69549f4812b2e17e1f465c89" } } }, @@ -5647,1469 +6891,1808 @@ }, "43bb5d1fec0dd25484222ab1ef9501d17f60d2e4855ac7772a74068bf02aada5": { "zh": { - "updatedAt": "2025-12-02T22:57:28.767Z" + "updatedAt": "2025-12-04T20:16:57.789Z", + "postProcessHash": "8d42fee832d7ed55f6f025ee8df35d84f398a338da3de39dc863826c1460224f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.767Z" + "updatedAt": "2025-12-04T20:16:57.789Z", + "postProcessHash": "93d86db03956f54342aeb553bab5923a6cc70e52247814abebbb2e6824ac96b4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.769Z" + "updatedAt": "2025-12-04T20:16:57.790Z", + "postProcessHash": "d3344e54c37a79b9d1ab3113c9fe8cc618b3a45d5f521914f809158316f29692" } } }, "3cbdf684e4132d36432757c5b2479a68267eb108858510d7f118f4f80e1fe430": { "02a6cbb43f399b26f891350cfb238c12040d0543f4f79b9119f782c965160d27": { "jp": { - "updatedAt": "2025-12-02T22:57:45.175Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "76c4fc54d3d893c37b66c7f37483b442258488b5de800232c9276ab8b1a4e474" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.175Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "30d92aa07115634a7526165c576976d43183b1cce4d4deb4fc2f8bd522e08a24" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.172Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "448ef3899ab73ac73e503b1bcbfac18a42b7369fa7d1d2f6bccfa9a4dfd47c97" } } }, "4efac6c6f465c7a572c85feacf0f06b43a4e0c32c6c920019621529593011d4a": { "90716f5cd329825964992e1323d48a1be73c0b4afe6438deb2f5faa6947cb686": { "jp": { - "updatedAt": "2025-12-02T22:57:45.181Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "a530575675adf563e3e0cce3571720caa7650a7035bbff0a155d8d09c7c6ff85" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.178Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "f45817debdf6cf37b18522a175833a519d18655dec56ff8ffce226e4699e3b87" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.180Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "08cb609350f19c036029cef6cb794869fe77b00e04e195132af816ef05de1d08" } } }, "593efc50139609f8ecd70340a6cf0d42b4647499a51d8026ed014bda5df9c3be": { "d22863b43cc42cb50748f21dbf3ca52aa023402a9fd5fe4d478b8ad89b656234": { "jp": { - "updatedAt": "2025-12-02T22:57:45.186Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "2ff863a106d738786b0e55e5128a630db8bed34a7afafc78f7b6a73308455c09" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.185Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "e072ac9e851d7454bb8d1ef2a3f39c1ea52f9b4151f56d7488e438d6a53d9cee" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.187Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "1ad074691fc77a6a085e358797dad7efed1b0ddb505c2831791c0b2af3c64826" } } }, "64b5024b5182bfc45a505634c61271260ae40641e132a126b98fdb77fb6a7c95": { "4407c0820a47caebe5b1dfe9eff3d5de80d013db89f0925feb173cff9741369f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.081Z" + "updatedAt": "2025-12-04T20:16:57.826Z", + "postProcessHash": "c3c2562ea2943f5a93b44f176a81ab3dbea3c23743680b1e15bb00d7f453a9c0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.083Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "bd0737410ec88352ca0bf155d8df05bb59cd63c87aa398e2845f677a61c97017" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.082Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "f3f3ecbd6680f78b0c452609257bd6126e4610fa7a6720bf90c3723e0fbb248b" } } }, "803a744763b2e971d43427be40f1173beef9290f8152a79e7047ef5f514f42d2": { "bc19380cbc2e01ee6357dbd1150e6424d9856ad286e39cddde352bb68470ab78": { "jp": { - "updatedAt": "2025-12-02T22:57:45.164Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "1849d0d085b7e5dee098aaaa5b952389e5688e44283574f41a11433fb9c7cde2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.167Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "0c3ee1016d28d625247ceff477fe66ebeb4d51fd7c719b3ea166add1053cb1cf" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.159Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "89507f123626d2863e77d16dab2aadf8f4c61781e98f570d20c888ccee5e1904" } } }, "81b00d2254d3e49a8edabeaf9d9461d8fb19914c8abfef93d05c71270dbf3786": { "96a507a0b8ed5c5846b4d8f6ffced106a8f7d73ccb668fa851fed8b3be3dbee2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.178Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "ea8a9e32c03b1e6147c121e09d92b3cfa27c68aaaa997722e8f1978e907e0fba" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.181Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "0bd054c3b017005ea784d5584dd9d73be430ec8bc79df219820132415ef8fd77" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.157Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "e0369bbe18f431323222995b88a2e73f31fac3250a69d6dc7c8aa1e98a5c768f" } } }, "81cc4a22f5345ef537b81bda612b5e1b5de5d2fb5b7d6563d33ccac4c53d47c0": { "2264f2a7ed8ccbf74a72e2d8d69d0a56cc35d7bce6b065e30464315bdeee546d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.154Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "37fcca6fa4e5d0d7fa77ae3ce4e8b19c51f45a940f1280e5133f2185a8b8c7a4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.183Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "415e14807d345956b756c7f9a0d503d5a2513b34ce88fc309805971ff1de9443" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.154Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "2181ca4c700be3b0793536fd24ac63c9197757e335acf8473574b31de5e47ed6" } } }, "9229ae8ebb059ce61a183f4129a3e0da801e0d4717a314a296909aa6035f7d9e": { "fea4e84293c545f2207f795fa4b98c049df1c2de4dd7351a04e3cfb8dc162c2a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.076Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "db721945dd076c72ee725409aa33836bd3da5a529be828e140ed43cf1f3a8c52" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.079Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "caf684af1e3f4cb28b6118b87ce0ccfc7e7636ff7368873f7d09d3eca6c5fe0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.081Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "6dd0fa286083cabba9a7302c8fad068caa9d128665e6595fadcb31484fc4e079" } } }, "a9c6646ed9b12fe5c1578c74e0f8408353fc82448e8041b1c1d96f9c46e78dea": { "9cf8633b74ca4ae563d8b6514b6ee95e035b912752b8937b25e1ea6d00d6332e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.186Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "5e2d05795c43aff4941456417837b8a04a697fece8314b0d952444cd7d79e680" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.185Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "217ec8ec61df156ec4679f1fefc782cb160913de8e5c3f0580055273fc6b5388" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.079Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "4fc35b8d35ead9e83ae2eae75edec8bf13d335c957aa10b142422412699c81de" } } }, "b464890125efe481177f12e2f3c00a28cae107b65627ec59bb17ef93cf157e35": { "4a59992606ccfde9022f21ac63edbdf9bc3e1e8100eaeef04c372952f8c27195": { "jp": { - "updatedAt": "2025-12-02T22:57:45.156Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "3f888647e63d4b0fa04c22fd4beb8fa943f074c8c6e0c8a97010075bfc04dd8b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.157Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "97733c73027b8a36ee9bb9a623d88bb3ba214b0f9bc0a3a4e548c73eeb860de8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.156Z" + "updatedAt": "2025-12-04T20:16:57.829Z", + "postProcessHash": "3c87d8117db775972f739762492df949e862f44170ba4f49a130af0fe8d6046b" } } }, "b676683ed68be73eb9635273495e9731122ee184bb63d7293df2bdf22ebad7d0": { "81117b826442551d1cf5856c822f3d1c75ce597cd1faec68ca4ca0233ff5b395": { "jp": { - "updatedAt": "2025-12-02T22:57:13.071Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "9fcdb907f3cb3af44d1ae3a2105dea85b5c296c22017281987c13e4598f4e55d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.188Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "396dccdfcc6752b3f1c18d3dc9a25cc3169f80720bbf27f7190b41b89cacbeb0" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.185Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "4170df1337056f5883650514a04b43892495b76e81f27f201f48b677d8b7a72e" } } }, "ca8c63318081185dadfc8f0c999b2cbe8002743aa40d511bc0efe186e20e334d": { "d058a230016b4adc22efb36e3b3ae2fb018e4b84cf33b6862fd4f520d9e7d3c1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.164Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "ae699f8adf60390f7ffc7c0c2b8a7737f6f0d6fe1304795799cfabbba14c88f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.768Z" + "updatedAt": "2025-12-04T20:16:57.790Z", + "postProcessHash": "86bf82798d29c81f584988536503e3142d872ab02397d201bf70d72945eaa1ed" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.171Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "fc769f280c04ec7e7a734902562930c304838c6a2dc8dce44c136c731442674e" } } }, "eb036cf7d16bf188b666a24b079c499f0e91022203931f813a7708c77c75046a": { "d269d0ef9030cc0accc4626f57a4a0fc9fa917b10cf282d13fa57388c6603e4e": { "ru": { - "updatedAt": "2025-12-02T22:57:45.155Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "248827366b8ffa6cd7485b2faa44cbbc155d86ed59314b1d1aa67b0da1bedeb8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.180Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "a959e3aa0c75d0d268a540bbcd529a36e36c1e3cc1f0a203580e3f0558749d91" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.183Z" + "updatedAt": "2025-12-04T20:16:57.809Z", + "postProcessHash": "f8ceec422a170e15ba66107a18f0f032cf5f5099e129b8769bcbba2a08609c1b" } } }, "f63b4898f4bc0778b3cf11bb85aa91a674b602d4005b001e2977ffa70c3be27a": { "dd2ba17bbdc49a7afba06862b9e2f43e39bf834aefeb4fadb52775d8db69d988": { "jp": { - "updatedAt": "2025-12-02T22:57:45.171Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "901f6e9b827e2d23a38a538220a9c0b19a1d6ae3f0f813c5152f10613842fb56" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.171Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "7dddd03d3660b3dbbd7c08f2f5a9a142ca47d66b145212136652fb09fbbabb9a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.173Z" + "updatedAt": "2025-12-04T20:16:57.814Z", + "postProcessHash": "2f50eb01b965bd8aae23c9c0a585b631dc98d81caee6b43758a3b6b9e65eadc4" } } }, "0850d83ea9ff1a00088e3d64a210fcd073b48986df51234fb69582c6b7fb76d6": { "9a43156c05a1578fda8031ad1f1e9faf8e97b4816647d44bffd71e1f15c3647d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.116Z" + "updatedAt": "2025-12-04T20:16:57.827Z", + "postProcessHash": "7d69ebf90c196c4c0ae51d7f5d8f129fb8600fd01592c70c6eb4a7e95e0c44b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.117Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "3358257da327d34c84a48179192a067be14f8e6dd2085fa730600404bb825e14" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.111Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "fc6b5f2a5fb9cc8e4ec25e7802c5c6ed5cc10eddf30ce7edfbbdca375d87dca1" } } }, "1bb238eff17ee95c127a21dd293881a980bb8f3b0aff1bdd7ecd004fafe3764b": { "d005d0fdfdc2a2469851a9a7d27374e5fcf68c97518463c6aec7498e165ace83": { "jp": { - "updatedAt": "2025-12-02T22:57:13.111Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "40ddb76e47e82d9610b0b1bd5dad9aa8ee5f9e032fc01b25ea84f368fa4c3b2a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.176Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "0f84d35dea04bacd65be6c5675c8f2220109b80e1a02d2f37a60aa18179a94d7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.176Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "5adc0206edc029f7788bac1296f29205936d1f1a133e1b8294f0cef7bd9b32a3" } } }, "23d2246026762ae3494ced9af104cea91f1482d8c7dae1149b7bfa3441618283": { "0e016f2ab261e197b48540cb3d3091ab6d3af62d1c883dcd3281cb2e578a1bfa": { "jp": { - "updatedAt": "2025-12-02T22:57:13.109Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "de013d4f8ca12b69dd2dea0c22148d2cf20ed2d30faa67df1533a74e3e9af571" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.112Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "633e784e81fd73025bbebf829798f4e39177b4f98845f099761a0443c55b52e1" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.110Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "6dd74d93c5b6e48dd54b3a2e8482061d7ea5f0517523311d04de6b46c9aeeed7" } } }, "29f7d7e079a392736f8e8414574847d7fc12094c29074c197529b77eafd97a46": { "ee468e104feb8b3c7b0aa6d6f466b62ccd0c40d76c88efce2ee623e95b1737ef": { "jp": { - "updatedAt": "2025-12-02T22:57:13.074Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "ba39f1e1196b566b55aa060578667b58ff01ccbef8dc2865f88c196930a87768" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.188Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "37b86aa7ef9ae71573bb9f29bbaa1286b4a487fda93a53d189c05b49ce7657d2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.189Z" + "updatedAt": "2025-12-04T20:16:57.812Z", + "postProcessHash": "5949d8f4c68ded9b2c74cd7de9a57e1231514b97df3e6f371ff6f3b41c72512b" } } }, "3096aa4bb7832bb2f54010d3c5b6a248f9ebf6a366fb879f82c0eab244f815ae": { "fa532e7e71ef2e3585f03d9f864f4c524338db82a3098d4d46e1abc74f06c4fa": { "jp": { - "updatedAt": "2025-12-02T22:57:13.080Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "95e996fa7d7ad229042d2dbfc6aac7a53e026a506b3836bebca96892118eed86" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.078Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "cd353730494bac53336fb437c98d8b698c53f483c7e59ed8775dbd1e069e46f2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.074Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "bd63efbd88155f701ff4607fcef206ac92f29656a73cffd03a8a51989d27b564" } } }, "3f380b9290fe7c7d901aac0cb526ca3d522c42c21bc64b85c2e00fbdc953e794": { "e0c1c8cc04e2a4ba817680c61c7923693919ed48ab52a53f3ddf5094909767fb": { "jp": { - "updatedAt": "2025-12-02T22:57:13.076Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "034b9ed035e57206a0f3f5f02fb9cc7c412f759f8abf6c14c3aee969080fed2e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.073Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "d770e5fce3e42407482ac763427254bfc7e534356f42200e6b566bbbf6dd1698" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.075Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "b9f75d94f1c7feb3a942e10c2e9e7cd102a0529dba3d524dc150265f896260ea" } } }, "492356529ca75008f683673b06635e91f3cb2d7f1097826262a7957c6cd78136": { "ea6eed1ae135ae1362375bc54a6abf4d9bda82f9cd56e95b97e329d6dfceb889": { "jp": { - "updatedAt": "2025-12-02T22:57:45.182Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "47f46c611e32146b063b051a7e99ae05c53ac308dd834473ac9a101d9c628e99" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.181Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "d1c53671e7bc184e3a2d90ef304de2f0dbb12dbb3485c2114925d51b9e4b33ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.187Z" + "updatedAt": "2025-12-04T20:16:57.810Z", + "postProcessHash": "b22cf8cede992225f6f5aee94367aaa34e88d0b045f80653340b25306eb74442" } } }, "576c74bc00a8723ea19c093ffe6b3a472b9236e8f3bfcb0b95955083f9cadb86": { "351824c23a3d30665651f9a8eb9f4b521f17129ca1d202c38cbde960046a5d97": { "jp": { - "updatedAt": "2025-12-02T22:57:13.099Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "5c2f77e55745f08f6d991accb5736498ec4fb0717c231d6e86f28fb173350395" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.102Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "faf16c6b6203c24ae4989f9f8b5cf1b59902ef35939886f246c468adcd9c4c66" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.102Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "483c5516febe66c27aad2d3a3d0129fbcea8edb0d7eb74d9c903496ff4ee64c5" } } }, "57e03de44d901875fb5eb2401640aba105efc70cc184f0f23ff04489b548b151": { "3f8e85fe2d0ca94113aa748a9047c9553cec059c087362ec30bf90a68567a495": { "jp": { - "updatedAt": "2025-12-02T22:57:13.083Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "84e0ea61d4fd004367dd7fd023867512e437a20e748a921a0dbba0f4886d67a1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.097Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "c5c28b58ef6bd96ade4670216e61d3055aac54f1b1e3e582e9e29f2dbe6942e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.090Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "2b2c84889ebacfe1fc673351faf9b5913206af8992033fde6fed2c6de698723d" } } }, "82d75c46385806468ea3f9bb89ec325a34f8717e9925511cf3f746c6793c4178": { "56b23f6722a4743f7d9412ba74c3c4701d0fd1018ab3474c5dceb16bef9ca1c1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.101Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "0c722f38b166dfee4767f5b6d80b515ef1add5b4d5e5c7f45ba128b2f398f74a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.091Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "6cf7ae547770625189ff4c711ee46f5e292917f66b7c1db1f9a16efe23e3fd5e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.100Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "1db2c34d5b2c9dc94aef1fb9cd059c1ae1cc613891c8fea69eff0e57e8918315" } } }, "835fedb5cc4f200a51753e009ebccb9c5c2703128ecfce3dc53168f68570dd22": { "24e239e6ee1d39ee0ec39c0ebaf4dff703bef48fabe9d4ad32d9fcb51008866a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.177Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "29d7d4ae6d00c0ab35168f729bbc94db2248085ad8ecc172dc169c906d7aea5a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.099Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "675813b7bcb03e4db418ba19c673fa4efcb2f2f475538809eddc09237bc5c01b" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.100Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "e054de8a63560fb65c2e7fcb7053e3c9a676ecc2dec75219f4e60c91dc0f8b0d" } } }, "a218ff0160f1afb6fd940e4797a2159d55a8dbac410f179f5727b567999eaebf": { "aad6f9838da5dc15d37d5f9d16b53754eb0d3ff68a7cf73064f05eaa3669c05b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.184Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "a9ea1c82cb9bccf14ed8b21c0ba0d5c8f90c24fd814aa3666f6051b1ede44784" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.072Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "71e8564b7a04093161ba33e1dc2eeb34832a84e319f4cdbd834a3a0792fa492a" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.073Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "df677fb973776083da15b2bae773829585c8e9544e866e7583404b83d5048e5e" } } }, "a47af53023e5932aef2db5b77a0ef7cd04c45474a2fe93ea211914667b44e5ec": { "4ff7d90419a50527c3757c649b6725b0da711648246268bc520c1dae8ad9ef97": { "jp": { - "updatedAt": "2025-12-02T22:57:13.110Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "75d1109fc9cfa1b73f1363abfb7a5802697dc1433c36e6d6202c04e483ef0975" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.108Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "517c9ac5758f86ebfedff4e688965240148ba4a33a098cd77723485229b62ac1" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.103Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "068fc1a3aec4ef27f5c475879dba4a938df2c3ddd29d92c8816a9956db8c964c" } } }, "ab35a5ab8729c47c7175e9c6cc67e42aba43c58b1e1f2c291dcda4c3977b06bd": { "02d5a608d6ee630f001b827a8fa1c5cad477766220949ac58c83c9ea965c69c2": { "jp": { - "updatedAt": "2025-12-02T22:57:13.109Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "0e81313e1c0f3f626afbd2fa279669dc19d3780f6504738d5755bc21350b0931" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.112Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "f1a89204176eccc01737ec656ec0e00ec0df9c43d0d68e34799da8da89a4d15b" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.107Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "dfcc2755db6c33b506df5df7aac7794ac31e06de0b7bb93ce05673d7b7ce9161" } } }, "cd604eef1633b62d027e3e7d70856d9553f233ca6e0180381c2120985643a86d": { "e37d6318a1605b8e2ec28a6a7b49ca74444391f022f98dec4ac9cf1024c821ed": { "jp": { - "updatedAt": "2025-12-02T22:57:13.077Z" + "updatedAt": "2025-12-04T20:16:57.824Z", + "postProcessHash": "62faaff4c984727f67c4ab6c83661b4ef8e9def27a79b627f522012e2da228ce" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.080Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "9929b05497285247412e109396e4d54da0986b005a4f88dde4716550dd61579a" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.075Z" + "updatedAt": "2025-12-04T20:16:57.823Z", + "postProcessHash": "4f50e8db171c009360ebf4bb380b2c1b8a5d659a005793dfb08e17ee17a56aa9" } } }, "cfd6efb64f516235ee2ecb43e9da90a4a4f49b69cd47dbfe06c9e1586fb606bd": { "dc206b93eb4f37283d194fc3cd04163bee67e631f232560183ec516accced4b0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.119Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "0e9024ce0229e303bf791bf1ea8a090edf82d926f01815cbac846000236178bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.177Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "5f8e52765be279706e3ff3d3b7ee2d9cf59a7b604218491346cefc11d1adc36c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.117Z" + "updatedAt": "2025-12-04T20:16:57.827Z", + "postProcessHash": "25623d0e671c3c24282497703fd7e6ddcc5e27601b4d20662a26e958d8bea571" } } }, "daf8b3e4dde89158cbc831962f60de0ec14cecabcbd44a418f78eb071c12b0c4": { "436bd3437c6e83fc88999652218e47ef4afe3bd262aa9052fd9fbf8900aa176f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.187Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "f89763be70e530d88318512773aaa3a63e8cfd3bbd776e7638f3a88694f5a774" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.183Z" + "updatedAt": "2025-12-04T20:16:57.808Z", + "postProcessHash": "effa2ac764ea8fd6d51144743b787e4f72c4f6ba9be2abfa7a5d81f8469aa70d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.182Z" + "updatedAt": "2025-12-04T20:16:57.807Z", + "postProcessHash": "75440acf4ce7ec49bc4e4d50b86b7c27c932f7c15e7dfc59fef4e6738421ca14" } } }, "df45da7290d6edcd7b6391995f5058013634a6732cc0faaa6bd01d42b9804678": { "b184369e5f189b858945955301721885510add73fe070525f5c066569add5a01": { "jp": { - "updatedAt": "2025-12-02T22:57:13.103Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "4fc25542d773c3e8e6a87b485d1e5e66b11fc14704efb6b9df440880381894bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.105Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "da65249b659065a39862d8194ed89186500eb5355fcc5b3270132db04da72ddb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.184Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "ce87a037fcd3cbf50049df567b5c60d071905f8ee1edd1ec1ff96635b48f24c4" } } }, "e303e41ebcb2d5160248ecceb8943f82399ebc3323390c33a1d6a724c28354fd": { "28a231f853bc9e6425c97ca1c14dcd50898db661a90b51a9e9ef2aaf5c7c2f43": { "jp": { - "updatedAt": "2025-12-02T22:57:13.074Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "7dcfa70c80e70d78a9f25052fac77bcb74b4173796a6db3e37b34d7ec0735f2c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.072Z" + "updatedAt": "2025-12-04T20:16:57.813Z", + "postProcessHash": "fd093a380e67a6a7a163d3cbaec15bb33f3ed3db815b21d009d31a5b3281b98e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.187Z" + "updatedAt": "2025-12-04T20:16:57.811Z", + "postProcessHash": "a4a7f94bfc88648400f0cad49bf80d6a58e589301666f5e73a836da0db30f80c" } } }, "f1754d0c92d25ed65027ccc750febdcca2e7101c72a0eece6697b959d9971621": { "d2cbc57bddda71b0ca36a00fdc52702ffaecf753190fb6095d4a92fca38701f1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.176Z" + "updatedAt": "2025-12-04T20:16:57.804Z", + "postProcessHash": "8fe51ce28fd7b63c809f0c85fe65f2c9a918f7e5218c8c113cfd32c29f7b7797" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.112Z" + "updatedAt": "2025-12-04T20:16:57.825Z", + "postProcessHash": "cdc116f2781ce49a5beb20c7096091a53044bc9df24df147f3efed636357025e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.119Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "3e1d0c8a4d52dcc2fa28c7fba4adbcf2d1fa2e18d8ff985a42661d35a2ea26c3" } } }, "ff2e4c3baefa9017265684effd06b1ae64d9d7d79efa83110c92a11de95d2c62": { "7e68dd457179debb6b3b8c9690002e92f3cfcc5539913ccfbd1d0632617d6548": { "jp": { - "updatedAt": "2025-12-02T22:57:45.179Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "670638f0db36abcc26e40752d17cd0fc06808867b51d7c21268141107c12ba75" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.179Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "7b1ab48913e7836765f80eb6197b420128724839e375281c593a06ec4e41adbe" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.179Z" + "updatedAt": "2025-12-04T20:16:57.806Z", + "postProcessHash": "98fdc5cc9cf4cd68d69372c9e94b2e1bb9f7450a4d7b170ab1e2ce56400caf18" } } }, "10b704f16a650f1802b52736d2c823bd454d8b3dabb76ac91bdcc408b62420cb": { "2d4e7acb59df283f228e25658e527a973db16f341efce41e1ce84944cffa1fae": { "jp": { - "updatedAt": "2025-12-02T22:57:13.135Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "ec5a388a9a1019eebe54bcfa4362a37332e3118d38e5e531aad97124e7b363c2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.141Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "e357ad302cc375a5de742a47c67c9a544efba2a5cb5ef04024094598cabe6cd1" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.137Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "b2ebf45cdc5c0448b913c2d846fe223ef444bf10eb3d63a8ea326e1e4f9917c4" } } }, "1e8eecebd2a4e411fc3037074c79ba054debc70b7a76bf53100577ec14359aee": { "5e448cd743d25dd9d490161805e048c3c2f4696c9f46b52a466a1bba220a5eae": { "jp": { - "updatedAt": "2025-12-02T22:57:13.137Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "067c9037555a589aae2754e4d4d2945c352e03ce2eecc76d470b4cf05ac2a174" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.128Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "c4ec7224a06aa1c3a774c04b82dd773327b92cf3266dfbabf213510ef92edc9e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.127Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "2f03f5e9859788c62eff0d8e7cc79cd12d1b5904eff0df739a65aa785b7dd1d5" } } }, "3e8e050e4d3fc2dc532df4dd8556aae0bea35f5ab73c2aade8efe957930a412a": { "e8f4b7568afc6590d5203c133ee8873acbea759acf50b34794af4e2cd6b43ad1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.131Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "26fcdf44abcb4ff5dca61fbc132863181fb5f33b8572c7f112bbabe38ab93590" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.133Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "08b2f1347140478e1a3c2c8cf97de527be30d258e273c19b210fdfb517d932ac" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.129Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "b51fac697a79212d4e245ae4680e4347503944d65a611c3e01a9d45ea323b09f" } } }, "47bec243b816f1aff8d7f27262d59edcdc28cb3ec78a655071e9178290bb0578": { "880617a38544a545b4906c62f9009009c13a7ff3ccc2a60fe2e475bb26b6f55c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.098Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "b8734adeb424423881ca71a7522caec8fc46f242cb824c950b259474b686f36c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.088Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "ff8f8b3cc5a0a4f5e8fcd2ebceaab1481082f6868bd2d2cf699989a65221eb98" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.087Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "5facfb97e8ad27ef14877a9bdb2ae2422a9cd04ec48af2b5a60aeb2b7e0cdd55" } } }, "48ff5e21581a18794244e74d86a13a93c0401d4d23c46f267ead336c36e91cce": { "42db135883af584da69bdb891c2f149df97603eb1cabc3853355aeccb9eef199": { "zh": { - "updatedAt": "2025-12-02T22:57:13.141Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "e4f5bf58628c842da49578fb49824877dcad388e8b9e3da0ac3f365ea73c5933" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.139Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "2ae090a569bd03ad7ccbc3f9ba3d06e6bbe9c035529a2743caebdbb375954606" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.142Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "949cdd4353ae80c9bf1c3538594afe6f48c2f268ba969c369f1b7e61b5356ba7" } } }, "4b0ee48c4cbb9020c49cc55e309e9d7f69e89a9ed3a55e9c47bc013ae4ef6d56": { "2ed3bcd79fd5d4e72d74ac905059dc5e77bee95124595bde24fabd5f207ff65d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.131Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "2a726d7c5191145fd8dfb2770956b646271368f9f3863b1ac31a030ed9222c43" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.135Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "d15b845bdc5c9b6926f7d8d538bc67f315048a8c2c6b294bb97f9a2f60449ff7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.125Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "26e9e5adcb4e725dcf04c98f07e8a1974512d7eb462e2f98546dcc5cbee063d2" } } }, "58026ac4a257b8fe268cb77f7d9da3eab8cee812a5e1d5152bab8b3250885ea9": { "75ab9ab8699432a23f95f427a4d59951ffca9690508f2d181e017be2846fba14": { "jp": { - "updatedAt": "2025-12-02T22:57:13.129Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "a40fbf0ca91f87e2703d439f2cfe716b76265b771d6ff4c96d65fda3890349eb" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.126Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "3b7701fd6912750b27d3a3227ee0167937c1b453014a0fe5966521e86de235fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.130Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "0148b882b720db288b5e8f56c98a469e4918505cab8f844f690e4708ba019919" } } }, "5879b7ee9c3de048a317f9526d9961edba8038230a7d5244320ca051c3377577": { "0a90ec2dd8b2f3498aaafcb347dfa3cda2f9f6da12d64b79f5d5404b53325b70": { "jp": { - "updatedAt": "2025-12-02T22:57:13.118Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "fc0cca9610a48e22430985f36838ba2a2be0ec4fa6e3d01beaa783fc98f625e2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.113Z" + "updatedAt": "2025-12-04T20:16:57.826Z", + "postProcessHash": "adf11be6a82f43988148ce7fba598460e6dd07972bad2fa5216d33d5418fce78" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.114Z" + "updatedAt": "2025-12-04T20:16:57.826Z", + "postProcessHash": "327490e0e16a9e73e955ecb7ee16d45f23f4e79895bb9c2c16aefbcf261c12dd" } } }, "6381d5f736372e0e12418c5b0941665dfa5912b8121475ef968a4b5174f7afda": { "ca830a516bc4a6a4064bd19e68294d34a903114ae0c72112077306844ab37161": { "jp": { - "updatedAt": "2025-12-02T22:57:13.088Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "d6bcafe292aace49e709b9d6e06fbeddabdae5b658cbff05351edda634d94b31" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.087Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "63c59dfe378147d0e7e475aaba4fce312ab0c8f2c0762bf6f80bc5d0556d129c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.086Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "2aa6e46d71d25e8e0b3c4838c5d454bbc854031d89d307a3956155a1af049a95" } } }, "6391f6957c5f75a61373810ad0f0c8f36e0c6ab5b4e5a0f3c373ec2ec25c7f10": { "70a6df8beb04de853a1e2f9d42065e9eafda493219744deb6b08634115f9a498": { "jp": { - "updatedAt": "2025-12-02T22:57:13.126Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "f93083cfe7d983e4958818138035052e14445b13a21a105193c9ce76133f6946" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.134Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "95b875741bc695b9cc0c7e9c45a4e61f6dd8eb626630fba5c597ad5e5a86c062" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.129Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "2d4cacc8f6d3fe525f0028583c5c45c596362142e1d919868fd5a97db2cf3e93" } } }, "65aa83e28c6b450bc0daadd14828a7677fb27a998ea9f59faacc7187462718e2": { "3c0cab0fe63f1d762905d3d204e44dff7666b23009b55e1447c9939e7032e82c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.118Z" + "updatedAt": "2025-12-04T20:16:57.828Z", + "postProcessHash": "a72aaaacb836268ddaf282e21d0947d1131493dd98efc0f6f548d86517733b18" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.115Z" + "updatedAt": "2025-12-04T20:16:57.827Z", + "postProcessHash": "d39b02e59a6961396019b601f4605be7b9ffd562206db0628317c9219d0b6fa8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.178Z" + "updatedAt": "2025-12-04T20:16:57.805Z", + "postProcessHash": "635f3d5f0352a5088770352f2fb1eeb2f51d9a5d4d0796752b6c7430673d3658" } } }, "70c04f43190f497d5a2f8677cdc4ca3f609afb464cf98a219e9b600b7d989cf6": { "59c021fe8605f9f4ff5a62d7b51c4f5a7a05acc380d02368ad906c909dd5fa17": { "jp": { - "updatedAt": "2025-12-02T22:57:13.107Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "19bda45cea88ea318f6a214db431ab42c076830fe125ec15f6993508e5f7c856" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.108Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "cdcf95bf8179c11d24c6ab57f00d681901ee2010fa6d73548b9e22c45ae656d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.109Z" + "updatedAt": "2025-12-04T20:16:57.822Z", + "postProcessHash": "f896dd90f255977f242c6f50f21867c2b782b2f12be0cde2bbc7504d1ffecec2" } } }, "7ce6270ebd8e598a5ae3c7e63c99571a865d9289e493233222d36600b8ce255b": { "56a7fab051640f56124193c10c43bab0f0b30eb6b3b43860f813e4335dc69d61": { "jp": { - "updatedAt": "2025-12-02T22:57:13.116Z" + "updatedAt": "2025-12-04T20:16:57.827Z", + "postProcessHash": "b1214e4ed0daa0331b4992abd122ab53f4dffca0be63a120bee17e6b9b4b780d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.114Z" + "updatedAt": "2025-12-04T20:16:57.826Z", + "postProcessHash": "44e0bd8228616bc81a461e2592cc02182a8c4967fcb941ef3b1c6d056990f5aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.104Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "72a8a886424ff10f3bd54b7b2cc37f433818448b40ae212c19dc85e97a870bbc" } } }, "9c1e3cb41e28946be991ff74f6b0fea3622f21ccd94c4e6553aa990de1a4f6b3": { "8fec74d1546ec055cc9bbebd756641fa7e4a28ffd600d29eaf8d88dcf521d25a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.124Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "4092e6083bc9a38392e5526f5899d286d3825436aa951396b1b837286e147af0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.100Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "9a43e4279d456a9f474335be7b6be73c830efa0a7950104cc53ca16e2b1d0b9e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.086Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "85212af18b894e6d1ed5de84b9f992aa9243e19668d8eb029cabc206ee8e8bfb" } } }, "a82c339e6ec19dbf4bf88470d923d48a3cc71cf65c7bae8180edcebcbdffedf7": { "82e1205914218a950a532221e194e1c9da469a4477d36097b83d2a9c2fab0a25": { "jp": { - "updatedAt": "2025-12-02T22:57:13.089Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "2caa3c15e8e361a2bf4ef0ded4ddf1a1907e4f489d0be35848ab749d83ada28e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.089Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "3baeb332d51470db6349cabf483f54a909081cf0be7c0b4902d3bd3f2337908f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.096Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "32fe8184cbb4c3c0f151d03a771285778215b54c64be643ffb9f3e691a094086" } } }, "ab7133a925d3667ab21eedcaa7493b04d2a7453fa0b3dd6c1545ec18333f6c93": { "3cd87edf3b014d3bf39e15bb926affe5a7484f6efe0143fd80de32aa3bf31d8a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.124Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "693ffd2b556c6592712d04045aaeff85126a4a6801a8eb0e61ede5c041d4cb2a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.126Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "b40a4f317adb1b2f90e5954fce0afdf284d8d368241d4ae7cdf4dc38aac12fb6" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.125Z" + "updatedAt": "2025-12-04T20:16:57.818Z", + "postProcessHash": "c04102c60ea4c4729cca3f1eecb0a8e340ec8699ecab28f3d6b8eb8be0e0a04f" } } }, "abe38b651cd9f44a9de790429c92f0c07d5d279e5dae34af1329f362738d3a6a": { "0700f00685f173628dfa175ef2fa960a245c5094b60de40155456bae0cf0bece": { "jp": { - "updatedAt": "2025-12-02T22:57:13.139Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "0b1c17f95bbfe2fc00f19e3d6a88cda33ed6166a6a8ff707cf6ed09902f8a2f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.136Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "18b6fdbc507a326e594eea7c54d3b8c6d209fda9ba841699c465ac1f3d96cccf" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.130Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "440d8a4a07590b06106c27b61eab79600d31c82795448b584452b4099de8ed50" } } }, "b0af6145fc6e254fe1ec86adc9d2e005a2c78ca57a92cfbbcb204f22d2b0b206": { "ae6b07939de76cbcba1cb55d37c6d5d3944edcd60cd443a0ae6aad40a42ce5ae": { "jp": { - "updatedAt": "2025-12-02T22:57:13.086Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "04c3e9ffc178e33ceee00252604edd871a30da1e7c972c86659e60f1a3f4fa7c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.101Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "af2ed936e67732758da49f156f53eee218a24f90349f8102923fe0b992e56788" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.101Z" + "updatedAt": "2025-12-04T20:16:57.819Z", + "postProcessHash": "37cd7772aaef74a9505377ecb3857f3868e49bd090167703d69bda624ab68866" } } }, "ced28404e4ce6c34312f58e0fa21dc44dc32726f8881c1adb6ed189087c1b289": { "946529a7ef15a484b25d74b9a9f179b04a186b82780a2ea1059020ee8785a2e4": { "jp": { - "updatedAt": "2025-12-02T22:57:13.106Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "908fffad4e6a30e48d2c3ca0814549cbfec99d6e478d2a9011a891d0286277b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.105Z" + "updatedAt": "2025-12-04T20:16:57.820Z", + "postProcessHash": "2afec09a3065d848d506e29de76656f02acbb3e3db1d88712a9c1e6e795dccaa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.106Z" + "updatedAt": "2025-12-04T20:16:57.821Z", + "postProcessHash": "b2076b1f65034c86c05c85790e2096f9eab1754783a69b11727bc1765286ae3f" } } }, "dd5f0d309844443578b1e477b78c685d87f106d689eab41fab33f12709affeef": { "d85b73cbceb154602514bc5dd5ccb07827a65d84bacf59d65c5ddc95c14947c5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.145Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "6cdc869bd01efb1e22a34cf1fe454486875574c6a5039c0a12ab6eec29112fc1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.143Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "b5d6cbbef3f4a762cbda9f0e244fd2790a2cae46ae927cba7f02bd29ee70a9a7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.145Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "ba5e103e708d614f7daec7ae50494fbd1ec55d7ee2f87cb0a81a61722fb8c4de" } } }, "e03641b78328c61b637195e74814fe2a13a4f8b55b01fc7b32ac725dd77f1098": { "d7e329d38854c95abf0c4ec667157d6c9e812a6ee76245d01dba66336ccd0ee2": { "jp": { - "updatedAt": "2025-12-02T22:57:13.136Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "8db18d88a5218de2f298fae5c7bd37b7d0f1e340c9d67d14068e0ed22eb24e5c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.124Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "e9f407b390581178141036ed7eaade9d9b5a4890f9a9889a9afe91696f7f570d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.128Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "54394a69a77b3ddc3f45bbc279725575f72e2a8c094013e4d835f12fae23f3c4" } } }, "e1f66fca49c6ff453d4e8a35fdefe650bc1596acc41c176c3c186db3c6b32dcf": { "a953eb312c126bbe30b57606749cd07b7c2b0214177b48b7f6c98c70a8a245ab": { "jp": { - "updatedAt": "2025-12-02T22:57:13.138Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "d1291f80b57d183dee13751337046e3b67b98db7f91ebe3936192c971024b50c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.133Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "7c23bde16b3be1b47398a0f22a36c4f9faee797260051b8f7bda022de73f5c56" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.142Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "243056d8d550192c5f744a2a1443c15c74ecc6db02b6a823e1bf13b97f4eccac" } } }, "028aa3b50c80d12c1dff7886165e9713acd5da0e4c292ec8d74a396e6acb2825": { "1ba8e423cea5af1505e244428a4e315c1ec5b32bcf1289058189844c5da6dc2c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.135Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "1dc546cf019aa54cafef2f026aacaba4e919710066d77e70553828a4cb4b6599" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.136Z" + "updatedAt": "2025-12-04T20:16:57.838Z", + "postProcessHash": "52bfc9ffc4cc464436115db8d899d596e78b2225b56503d730b1c60ebeed4d16" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.133Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "0f1238f6dc917b0b31d83c0c5161200b664d3f21c661f6eb5013940aac3abb97" } } }, "0ae49380ec7f5d307e31d7b631f7f0bf275d679b03f17eb67c5359b37b5242f5": { "f8739620d7524e796b898c8c185a92bf25c2ecbf9cc3893754ede05bce45736b": { "jp": { - "updatedAt": "2025-12-02T22:57:13.123Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "8ebf4365b33ce841e5b1f9ca49a4dda5927bb8f7c63e6f1c1773e3487a8f33ee" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.085Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "525ef6ab46fe1e22493861f8be1a9b879731dfab81b51019d716baf63aeca1e9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.084Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "b0fb2a22a9181d90154b1f93dcc49226a2da664f92cde24cad39544d8cb83330" } } }, "15fced5932ede7e35f56539b143eb9b8d0d01a97412450e147ef43084abe420c": { "ec90df838c140604af32f15594fffcd4af40335ecac6a833f13e0158156b0cbc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.141Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "797aa66fa55398cf84a49805d5c84894fd0f93e29214b8e9cc46fa79cfb29d12" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.131Z" + "updatedAt": "2025-12-04T20:16:57.836Z", + "postProcessHash": "66ded0a2c4647ff3b715e79fd87df9f658827b2b9aaf7da37675a486fead569c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.132Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "82f962d4df3eecd726b8d00ee3924b1a666aaf27f6cb5a2138e48dea4f5f50b4" } } }, "16db9b76d16ef49e77f68158117027a4829a5968943ae93a509257b7c447f23b": { "04685109a89dab0b5bb34aa000e61426caa176d6790eefce0141144402762ae5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.175Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "3aa18332b4a56316ffafc563dcfdb3c345a240171708860e261c2a79788d4761" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.173Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "a9866a0e023d0994af53532dd9d69b6f24aac826ef76aa0e6c4373fa115f0c7d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.175Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "25e891ff56541fcac7400581db3bc58813cbb2600f517cd24b0ccd102589734f" } } }, "23eb3656e923d758ff491460d9d1bbec7009131392de09276848be0db41fd269": { "3625b1be463613c8fb56424fd4d91f2d85ae950ebd8adce02c7683e4fd11be26": { "jp": { - "updatedAt": "2025-12-02T22:57:13.177Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "9a9204413e431de603ca51b5d454cbac824143dcc61f7ef1b9630a45127cfc44" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.178Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "46c4b6b233b61d9e359449770bd38bce8932e6d461a5122d892f61ae8af29751" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.177Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "46842ab88b260ac933fb10cae9c98c723d1e91e6b557d5d18e17ba6f8d24a9f1" } } }, "2f2ef25f504a5d8ae76cc6b6b38d72e25aa06fb601145bf8c4555defd3b22c9c": { "3045e21be62572632384525c8e68ac94c74ae489c9d3787b9b86c295740ce2e0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.158Z" + "updatedAt": "2025-12-04T20:16:57.832Z", + "postProcessHash": "2a41f968fde03f86b608add131e0a268e01cf72e24feb92a075445cc584bbef4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.149Z" + "updatedAt": "2025-12-04T20:16:57.856Z", + "postProcessHash": "f3778954fa3dd24ba49a88880689a4877ceb35c67c20af208ce66bad79dfb266" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.150Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "a1220828e984a7844acc83f320b8edbabf7528a5eef95a89acaa487cf2aac575" } } }, "30adceead0e8f58341843c20ba7a1cfc58638b613d0457a74d610123f740dbae": { "e6bcf77b5129d316d4e7eeba39c108e94d974c9844395d380a2ef4f6b5f57283": { "jp": { - "updatedAt": "2025-12-02T22:57:13.176Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "6e85204fde0f00f0749a40dc846cacb681fc8609b8ec314623d47b8feaa0581e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.177Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "02e5291007f6b55b99e18f619f7a8218ca50fbdcf98e60b88eced5e6bb543488" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.176Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "adc71062acad530bb66571cb5e117135da00f9d0ad0b62c8373835be3a1abf24" } } }, "32d271131b76c30bee10004cc36afd1cc48e48b098944d731a875840a3e1520b": { "483a6ba5cfe7e35e8bd7361dfddd53f126ccf034f9f7e6b101dfc108419b0192": { "jp": { - "updatedAt": "2025-12-02T22:57:13.155Z" + "updatedAt": "2025-12-04T20:16:57.857Z", + "postProcessHash": "833a9c51d11609a18d659e163cb8582e7f4eef0ee53d3aaa558906da3174e4cd" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.147Z" + "updatedAt": "2025-12-04T20:16:57.856Z", + "postProcessHash": "7cc743ca97871391aa370332524b3244ce1bbcd767689f566c76b15fa979923f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.153Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "719fc23da0294be92fd199c239a16acbb21c3b0254928f58ad6d8dfabbd3525a" } } }, "384bbc8a5c6f8f4fd3947610412c719d2877f712b2afbd35874807dc5bf37b5d": { "56a53674a355d521b64bc7d05698ba4051acdbeaca6a3c46a2fda8b450c719e9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.084Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "6ab23281dd6bf07489b397eb2efc86141ffab20de0d7aab6b40c9f387bf669f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.085Z" + "updatedAt": "2025-12-04T20:16:57.856Z", + "postProcessHash": "e72433aa6643fb7640b533e1b45179d129b3bcb371ce68dd11c9c07183f323c9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.085Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "c1c510663369b56818fd7c6f4dd46321e8fbdf804476964d30e1ae10a6ab7505" } } }, "50e45c22e7e591fcbe4d61812d7d7a9d9626a7f94961516e9f2b08e27d3c36ca": { "4159f227f4e6ff08833e89755d03d3cec73f09d3e9171623e581edcd063d2833": { "jp": { - "updatedAt": "2025-12-02T22:57:13.083Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "daeea312ef47ea56cf97d61a50f6675edb0a5e03d42a588be832bf722f519bd2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.140Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "747cb3fa4a8d9767a17f5127e46bdc89bc3c72607a21c44380ae1c5c62b73c30" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.143Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "00c681ad796b7676775ed4653a1eb5c093fedeeae9b99345992a6026a2771a6c" } } }, "8b151a1a26b18205c264eb291e0e0442ddc0a8d5f8b81948e11a1cdd09758259": { "10f61a5bfa1bfc18d47b09dfd27319b441a25e084aea415d11bbbcb64e2a6c0c": { "ru": { - "updatedAt": "2025-12-02T22:57:13.151Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "ae095380b3cce693b87a385fd99416d4cd0156cdd413a0a10656861e01010960" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.121Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "af24d4d203068acd33f91fccc37eff9c618e2effb8e8e9604536f11fd3e4e9e6" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.152Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "f1ab57bc7f20318d39d216da4fafac6d5235ab4ced6057f7dfa42c510274e14d" } } }, "b2f66c32f59c426c83078d6b24b7186f54172727a996adce08872051de770134": { "0c794fe311b38eedc683c36f0c611835c85822c536fff3e7f51e45a39493a848": { "jp": { - "updatedAt": "2025-12-02T22:57:13.179Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "839a4b3205cf958d315e163b29fbc6b1d8ea621c8431a9c22d437c3f49c15fc3" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.180Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "0b1de2d6f41539ea7f85e66dfc11e27a1b74db6afb5a71186f330a2929841d1f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.178Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "179527139848e8f24e4ad556ee4a29c07f6acb5cd72100acd114b70776f63173" } } }, "b3581e0b617b1029663a70e779bab6aabd1b97807b23afe26b42a5bb82a2618a": { "38f348198e164923854caf2d5fb911a3b03dff8e5f682f59a476694465af9bd5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.159Z" + "updatedAt": "2025-12-04T20:16:57.832Z", + "postProcessHash": "445b75979d28df84d2972a9a6d57713f9cd3372c1c21ad38d2e47e97d1d0a203" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.156Z" + "updatedAt": "2025-12-04T20:16:57.858Z", + "postProcessHash": "e12372c5d9a197996f7a9e1ddb3a1b9d321aee82f0df2b8c1049979eac5e50aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.158Z" + "updatedAt": "2025-12-04T20:16:57.832Z", + "postProcessHash": "925b0c50f902a4ec0b3d1d58fa46300b814a56236934ec1ac966302528c50369" } } }, "b54c21849674b2f133d9a7587a54bf895f7b1a8384d344c53348c14c442b2644": { "ddce74d3907de04d0a9af32787564ecd6b5cba8d6c36159e1e227746999b1540": { "jp": { - "updatedAt": "2025-12-02T22:57:13.127Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "9567c10778d97bd9efb4e46c3079c9b5dad2b8a030e30a935b89af10e1adb674" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.139Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "d4643ba1e588471108496653806f1ceac51f5b4fd32ae156109dba7cba03064d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.137Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "992fc9bd0cdc364a811b860e1815bbdc5474e705a528781093b9f09d9a21b1ca" } } }, "bf6da61b91d435b98dbe4fcfd84c30e4661211a55093b7bd5294d05df5d9018f": { "8df18a3ed0cebffed7ef2a16c2c1feed24d08b38743943e1639bf2e1e83ad9cd": { "jp": { - "updatedAt": "2025-12-02T22:57:13.146Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "9ea7f8d6c8c20c5a2dc95983972ad75693dd120c6081fbf8344ae5bf6ed8546f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.145Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "e7917f798ec4fb2b17b5ee34cf9e21e4d882ed1bbd7112e4a7aeb164a669c10c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.144Z" + "updatedAt": "2025-12-04T20:16:57.842Z", + "postProcessHash": "86945358d0fe7af7475872c0c29161df46893b85a51de2b04629f61ceb089c8f" } } }, "c600219b9f55bdfcea82201926bfe9e4cabf53497d2110e11a6a97d3a6de16d1": { "879e570e6a755b5436d4b4e3e5ee02f6ef2f2b1b56d5e30a0d8ad6d11079deec": { "jp": { - "updatedAt": "2025-12-02T22:57:13.156Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "388831467293f4147c75c40d7d4a767f3a6de316e243b18b5df7bdb7a7e7ae31" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.155Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "074f636982c45d05441864e0bb8f9d8b6a5bcfb7379d7682c3ce6ad365d33573" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.146Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "08deb063d39e55969d8d1286b9d836e70d0b404f6f73ee3a29b9f877d4bdf960" } } }, "d20c2004eff27206aa611fa47101376ca27b19c79a7c22fef935d90c8c7ee0b7": { "31528a8c4089ac02ac4c5cae45bfcf8375faba7dbb39d635e3082a39955f5a65": { "jp": { - "updatedAt": "2025-12-02T22:57:13.143Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "092bac32ef2276bcf182aa04e275cf9c5cd96ed39e09d6571aa6f9162ae9f7a6" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.144Z" + "updatedAt": "2025-12-04T20:16:57.841Z", + "postProcessHash": "023231102310f58f0752ab79be2181161af384799434cd9f19680e5f8f23c4d0" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.140Z" + "updatedAt": "2025-12-04T20:16:57.840Z", + "postProcessHash": "1ddbc1e222434e19c2ecd79f69b8e9577477722bbbacd697a95bfd2bbb6aa80d" } } }, "d42c8393402232b95f473bddaaa33ac9663e18e070bfb5225b9240cded76bd36": { "469a531fc6c1dbbcdaf79cbc24df46624ad5a44a7c52da48e4665690d6de2002": { "jp": { - "updatedAt": "2025-12-02T22:57:13.122Z" + "updatedAt": "2025-12-04T20:16:57.859Z", + "postProcessHash": "b2658d9a508ab0181e3c6c459a20bd226d31a12ef40cb4179ca2c86c924d9309" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.157Z" + "updatedAt": "2025-12-04T20:16:57.832Z", + "postProcessHash": "69cd30c847e81d943f0b473c6140ae7af3642ecc1a61d226bc8deabdc394f7d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.148Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "9575ad76e15f8fbc570eaa9fd1fa2f63ff2e9bc3aceb1fc19aa2f48fe301ebe0" } } }, "d55ab4d59e8e430728299d153babb7440fdf1524f75ae30ac017602a393f72f2": { "e946a51dbbf49a6bb72dfb7320ddc89e75e9bca19562498770b9375217a83d34": { "jp": { - "updatedAt": "2025-12-02T22:57:13.121Z" + "updatedAt": "2025-12-04T20:16:57.815Z", + "postProcessHash": "4aed084dae37b96108b1b1a9171357a95478753bedf4bc07fd4752574d075c86" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.151Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "73049f2c4787e37b1c51efcdec91d134b3c91e93bcd1b9d5b6a3ad2ae67013c2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.149Z" + "updatedAt": "2025-12-04T20:16:57.817Z", + "postProcessHash": "7cbf89718feb0b6ea4f33c063d78763a00be2a7a265a7d336e10fe2be1f6848c" } } }, "e9e6900149061b39fd6dd6fa53d0c99f28ffac38d503ec961dd94dce5ebac808": { "aef65ce3391d03e363f980b73f3fa71276203fc5f77a1d75edec615250031f8e": { "jp": { - "updatedAt": "2025-12-02T22:57:13.138Z" + "updatedAt": "2025-12-04T20:16:57.839Z", + "postProcessHash": "19ea0796e5e1e4d20077d406d584c2d709993077f56543db5e282e487e265be4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.132Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "5c75bb3a5ac1617d96cc105c4206a114b94dff8f04edb8e0c14e83224927c730" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.134Z" + "updatedAt": "2025-12-04T20:16:57.837Z", + "postProcessHash": "e479ef0dec8be72e1c7774e8d8d3980c8d190c02130509bd864449fc4359d88e" } } }, "f5e923aaae110b8d3ec030f52c1731f515c0ed1b9a0e41490e863bb6395bd23b": { "c81f4b30001e6233066eddc0f7a5c166b4369eee24cb505fee91004bc16f3b48": { "jp": { - "updatedAt": "2025-12-02T22:57:13.180Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "b7767fac7e969ae0e2462d0b7c449b035c8dda931a535c4a41194e8bda95a788" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.180Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "130bed6241843acbd316ee9c3cc264afe7afe10fbe5f9a2bc290894f19203f1e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.179Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "fc904a42d5f92b851e7c7a7c64cb4730fde3d0cf4d1d282e8795643d52eea3aa" } } }, "1d0e04973f4a7a2726ce086465182e22cfc8de26b7036f67bf3246dcdcab5c87": { "31f058ab67c32c0251f087188700872a277440d4f0ff0bd41cdc2a390207f441": { "jp": { - "updatedAt": "2025-12-02T22:57:28.834Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "feeaa4eacc76e2e243446248328bccede723fa7e6088148ec4506da3659ecdf1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.837Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "d7d82650cd7173611493e2613d5c2400462664b0664da08d137ca50ba5918bd4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.836Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "1d307483c16ce70b8509ecb945e1ce32102014b64337aacdb4f823a4cee3344d" } } }, "1d411ae967753b5d27acfdc77c2f68fa873d228cea6cf769ee2c85f10b38628f": { "8c9d1bbb63ac91b1a18b930594b6d354536b4a42a4cefa28e167390053f64f41": { "jp": { - "updatedAt": "2025-12-02T22:57:45.292Z" + "updatedAt": "2025-12-04T20:16:57.986Z", + "postProcessHash": "cbe84bc7bc634a325affa06db273569bed5c8ecea5d9534afab3963ccdf58099" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.294Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "53bd37ad85ab38fac7c4b0a742137aad5ff2afa8e6e8dcf721cd66cf7deb0a97" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.289Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "282509301533f527c92fb3f52809a58101782730d3fc53a620fb819d5e70eac2" } } }, "32a2dfa24b35817a5fedbfc4895185da11ba73834f024a8c145cb60b3ee324a3": { "8f13f0e888bb91b30f7b56131bf3728f2950f55c2375b05eab6a6c9cabcab037": { "jp": { - "updatedAt": "2025-12-02T22:57:28.932Z" + "updatedAt": "2025-12-04T20:16:57.965Z", + "postProcessHash": "dddfae5b863f652a3db02b52444375066317b925dffed51e3a730fd909d3b624" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.295Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "65db24e8f1f902c5b4fc82af9c0021c8e90c3ae535de4cac9b10dba8fd3b9d97" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.295Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "3f53090f027053cd48fa57cabd00239d1611fd5792d44f409f7a48a870c3bd06" } } }, "34fe9aa819ffc70ef68be0505c66c5cb60f94370bfce6edd29d0ef846b1eb245": { "7ef9c6e569280d6e03a986898ccf237a939f4581319206934f40b7e910987b98": { "jp": { - "updatedAt": "2025-12-02T22:57:45.297Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "facd2130b91a0e927658674dfe90551cb72b7de9af6c09c67eea8baa4f9bd913" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.297Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "b7e47e3bdcd010f851494bc9d1f82bb009a6f743c320b0c3083e349a8b62e6b9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.298Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "974b35258c12208b262261f21223705447c42f5593de0ad8644d0b55532ba829" } } }, "5a1049606d2ddeb908a3f87e08c53c766115a2d5315cd4e891c852fa240471ed": { "4340b6e9c5ca9bb508ff61e1f7de601fd3ee092842be32670cf541dd9fe5b76c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.291Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "9b4fcd9777a63c27dbf02da2f7b40edad1c4f81f4628c5d08ab35fd8617d39d1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.293Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "ada6b69ab8b50646671a2bc5c903a7de89e64f496cdf5db6138275e98e1a209f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.291Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "65b7063043f8dd596ce71b2e1f76f6bf94cdda6c46334462c0780323245c4fd0" } } }, "6c930d7e263cee0da201aeb82b5afa15d7a0492edd3f17b70d744502c7da16c8": { "2c78d1148a39342c324f60ab8fd48891049dd3af4b2e04e98d60136cac22dac8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.835Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "df3b6a46d61232202de613cd76fb25c414f87feff5949e1652dcf29cb36c5885" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.835Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "bc574dba311329752d5704578502238ab936a72460c5ec72dde612934745181b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.835Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "b6ddd50e1fd8808aec2cd503c37af600139a7a90b0ba8ec31b8e39f0f1d8b1b0" } } }, "7997000584a74b3a4893e2d952e3e74901f5c48d13d2477040f08510ce7fb94a": { "f3a543f784ce343388875d80bf6932364452e41d5c499c0fcdb6193cbc18d2ac": { "jp": { - "updatedAt": "2025-12-02T22:57:13.153Z" + "updatedAt": "2025-12-04T20:16:57.858Z", + "postProcessHash": "2345340a4803b1b3b510ea7dd2bc13e07950e29dc1739f8a2ebf7d10b10fe135" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.120Z" + "updatedAt": "2025-12-04T20:16:57.857Z", + "postProcessHash": "d4ee6c97f9fae4dad53ae5dd3cf01df5bbe11269d2d09c070758ab5f03692e54" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.159Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "2d3cc3051651652b7e3b8970c47a93204e4bbf2f821dabdcbfbca4c291ed1f51" } } }, "7aeb5a3c848c3ac6401e3621b9731a411c3ffe53b1ec386f511089c819780c4c": { "1f0a4b693ba5e0ec268fafbbe5f0a583b29cfd716f04abb61d43c5813b6ad612": { "jp": { - "updatedAt": "2025-12-02T22:57:45.290Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "65e6bc5cc8fb35caca398553725c133b41f659e859ecd2a73a0721666110d09a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.286Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "3dfe1418ec9ca42909feaa349b051d9dc6827fcc4ac4f64823ffe39814675884" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.288Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "ac8834bd2928cb375e7af315d432ab7dd6b3c24a69cfb4c3af55cc287e3be37c" } } }, "7af81b34b1f80a6579a084fc3f8d1ecb9f0315e228a3b01eca34abc4e963fda6": { "c20825094b802738f9e5eb45bd5ac1dadaadc926f348ad24d8c06cc4e5157994": { "jp": { - "updatedAt": "2025-12-02T22:57:45.286Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "fa4f6d64eb02fdca7ae84416c143210081874cbad432a19c0e6e096f2a6bec50" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.285Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "fe1a5a5fc5b2caa8c79017d0ef1a26047d3f9d05a1f6a916d8530be67d35fde2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.285Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "b8a7c94e68c716af693abb992be85f6926f865c19a6a9418052670d00c61384d" } } }, "83eab82a7ad67622f732d278303fd5a55d015c462467d35a81a97662bdec853e": { "2d649e303741fd66ea1aa56354d590ebd300f6ec9c2b2ef22c28c636be7a29cc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.158Z" + "updatedAt": "2025-12-04T20:16:57.832Z", + "postProcessHash": "56e20a651c79062b49a50e9d06331d2622820b5433b5bbb6e7c7d62c90db3f5b" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.122Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "4b423573553eed62fabb7e9a34dfb87d800ca3a7181e247f18373e321fae6935" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.152Z" + "updatedAt": "2025-12-04T20:16:57.856Z", + "postProcessHash": "8df3afcd3831e70809b0e2bc47a1f9ab56a63409536927051a0328a684c6120d" } } }, "8aef57a5d0702946541ef4bc66a35386c47ef94c0fbc0f60abf1cf7cff964601": { "1de18ab03988e32b892f506405ca6a01d5a611302a852d3f5e7de174a37be78b": { "jp": { - "updatedAt": "2025-12-02T22:57:13.156Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "8a5b5fb037cee88794114c430668c80425a62de1efafd335474a9f0e69b1c58d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.120Z" + "updatedAt": "2025-12-04T20:16:57.857Z", + "postProcessHash": "e420ed3cb39b9fabd135a8816f518a9a768da2f173edc047a78f21914bf61ec5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.160Z" + "updatedAt": "2025-12-04T20:16:57.858Z", + "postProcessHash": "82545cd051c079ad3d6d731e0553d734e28906c0f03bca8d730e3c2c8c8f7eca" } } }, "a2ec760009faa1e1eff2c135a3d4deb7afa6a079dda0c6d9f99db627647062d5": { "4f03a97491bdbb54d341d453335aff270c60976e7c3ad96cb719e9003ee5ad0c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.294Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "c3fe8dc15da469b3b690316ee0f3dac05e111e967b6be92392e1a8cce1891e35" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.286Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "2b9be65425f6fb6a6894a8cb9c986437b826a97e0c630ba76f60138fd89d22d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.289Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "c4f53d9adabe2759254b7751848ed5fe13bfb181dcf69b90c360a700063dbcfb" } } }, "a81ad531cd4308314f95a3bc7ee7518076cb8b225330a76bdebb309de6c07d84": { "eb1a10c317b4f12f9023e3b4899a6403eac245683d867b105338963ab1df00ca": { "jp": { - "updatedAt": "2025-12-02T22:57:28.837Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "fe4cf16635e1597f0c20aa0526a3fd5833c4ca197e6a60ad21898e04cdc12328" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.836Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "cb2fb376e0e4badce6893f4c2b8abda0bdbbda55a4b5b4f53719b919fb29f861" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.836Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "32c0daeb591f374f52690f80c6485d2aac9da4d908b1cdb118fe5df773609f91" } } }, "a8b3a4c7be16228ce7b50cb870cc58cfe39f8c34bd28a3aca5822b90b0f42830": { "f2435d45557de24d303d66a742aeff55e64e2f4b580432c1d1d9f8eaeb1f5d17": { "jp": { - "updatedAt": "2025-12-02T22:57:13.174Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "0d6d7e0d668ce04f563064673db30f6c32df98005397e269ea32ac4b3d07fe34" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.175Z" + "updatedAt": "2025-12-04T20:16:57.835Z", + "postProcessHash": "9b28f7b97438e6854a99f6d9e59fb91a86520c705a1aa9ccdfb1d99819e8a6b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.173Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "8378dcb5c2fd0fa57802506a14885033d1179e8f0e8a69aee4cfdf25f9a23eab" } } }, "b2dcbd4e41cb07eefcbc269f5df931324f8744a9483f6b145243bbc5673c42c1": { "5890daa9787c7983a0d917f5622f02d272e85c52daeee1444ef64b42ce8108d7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.837Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "8bf966f95f83df3353b4f4504b7d076db0d140564487961648b907622b240848" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.835Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "8d5ac4031ed12646e123652cf9180e6a5d09296d67e19869321aa68f3bd92c64" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.834Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "c95bda606288a251008f1de6b5cf48b7b6aa8992082a10e2dbb00a93f44c948d" } } }, "db411e0514092e58a10e4b885faa2126f95d2bd39dace283d1e44cbc9831e3dd": { "527580835a672b74a709bacb51a246aba1c88246216cdba2db279817225f4044": { "jp": { - "updatedAt": "2025-12-02T22:57:28.833Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "2abbecba252e94f0c55744fe25304ed6807282ffd3903da19aa1378f16f39830" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.838Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "478c4fe843d5de646fe3274986973e4b11df6da8b848453d6795199962c079f3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.834Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "467f052dd9a64090155c457ccd46c20d7c67eee3bcdf9fce08fba4249b798462" } } }, "dc3682d31d860920c0027dc94b51e1f197c5a38ca754f403922910b9b8ba3903": { "668b968f7ffa7b6faf894697548c553b64afd08c5b62258b0eb445aab83c7d88": { "jp": { - "updatedAt": "2025-12-02T22:57:45.292Z" + "updatedAt": "2025-12-04T20:16:57.986Z", + "postProcessHash": "223f4d2ee1fe7b0d11e985284e637caab9f6023cfde4f5b6e789b896f9240b22" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.296Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "bf1f6d785fbdb04697affef21b8a1c90ac8f11cfdd3add261371ac0e2115a0bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.291Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "70418adf7b770a1d094cf90038c37f450b94d3df392eede922505fa9e4462c38" } } }, "e72fb86764359e026d92c8940ee6175f5febdbd710006033850bb2ad8aa43023": { "10e1df69f27be8e1de4c2159ec11f7a83395eb9a20a7b729e0fbe4c2bc8bb473": { "jp": { - "updatedAt": "2025-12-02T22:57:45.287Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "97cd79eaade532dc187f6cabcebe985d6464a405fa314376e5d4b1972609b9c5" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.287Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "33985d10f9132cd808f9b80ec91d3818bd61fb7b3f0d26ae536fe0c8294e3d2b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.292Z" + "updatedAt": "2025-12-04T20:16:57.986Z", + "postProcessHash": "9978cf30035ec8d2edfeb3e43b85314dc351f4b455fdfda21f4787ad677f90ea" } } }, "ea7e5e311ec73e96e57ec3343b5c4d7cd8d2c758deae9104dffeb15243a22097": { "a6b1a10073ba1bedb61ae0ed5088f394cf79fd30feddaa919ee25e9e0f4c991c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.296Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "917b95c03d87c84f7df491087db2acde0257f59b563b978b825ca81000d3b4e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.295Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "615fee4d31ee39e962f41b92d084178755d9b7964a1df16f5c56f98c2179e99b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.297Z" + "updatedAt": "2025-12-04T20:16:57.988Z", + "postProcessHash": "f7ff32107cb05e5339930f87c93d73cf7c840b8c44323ad7ed8b9f933c8fb898" } } }, "f46404d0d2b932ed24233530122a903e98fd0ad2e866b50bb50ad16e35006e6f": { "ce6bd20ee80f6f7df45c614920f103f5eb64699dca884aa2e9a55c8adbfcc913": { "jp": { - "updatedAt": "2025-12-02T22:57:45.293Z" + "updatedAt": "2025-12-04T20:16:57.986Z", + "postProcessHash": "afd7c3e617ccd4545d3f8ddfe29a733f1d93de18fb8e45726795a3bf8f60eda3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.290Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "1545c0cbf11f952e0dd4722849716292ae8319e22aa74d93e015451584fb12df" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.289Z" + "updatedAt": "2025-12-04T20:16:57.985Z", + "postProcessHash": "a05915be53793d8e461fc19905133e0e3540bbfe005d6cf0db1091a558b05a22" } } }, "f6103a7698b24fef604602086936cf148c11df516f6f84bf99b48971614c717b": { "2934cd253b5a2e39a317ce455fc2c1d9f94f60e9c0af926ce756c8e2261a0354": { "jp": { - "updatedAt": "2025-12-02T22:57:45.293Z" + "updatedAt": "2025-12-04T20:16:57.987Z", + "postProcessHash": "b488148dd618dde2959c80d89e468a7e8ef0b2b525d5f02e72336b6907d0f4f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.287Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "92f7da0b2dca0564d02c0836aeb4ceffe3240c3ccd2a7dadd3984428b74df8e1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.288Z" + "updatedAt": "2025-12-04T20:16:57.984Z", + "postProcessHash": "6938d08752491a6eaca3e91914c910f81b1a4a12fb37bfaeb387c87d6be254ef" } } }, "05f8d1acdb9d8a92c6735e4d5dcf8080fa8ee6512cc13dbf3b840c999a094c71": { "97638cef9fdf5d6328f466c856175463ac017bac4780f1d817b5d4729a88aa08": { "jp": { - "updatedAt": "2025-12-02T22:57:28.955Z" + "updatedAt": "2025-12-04T20:16:58.005Z", + "postProcessHash": "26e3e972a78081ead6f4659b8d06349d7c613bf59e4c01daf27fb87cc7af4303" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.951Z" + "updatedAt": "2025-12-04T20:16:58.003Z", + "postProcessHash": "d39ae869b3d015a5467ae902c50734d1d590d0ee81dacb2beb15caecf68b822e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.956Z" + "updatedAt": "2025-12-04T20:16:58.005Z", + "postProcessHash": "8cd6536cf8791ee0a9479e59fadb1d0762214d2ee2832b8b40b7612c3568cd4b" } } }, "0c936deece1cfa87a5970fb553569967ce05687698de65a98ef0315477967bbd": { "a922d6b0d8e112391f7d053fc7058eb1d5659b44c4a9dfa835485d17fbead31d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.949Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "db6b1f767b4c8f08c08ec0a6aaedc59f6a2f5b77a0d2d85190c30fc30f7b6aaa" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.281Z" + "updatedAt": "2025-12-04T20:16:57.981Z", + "postProcessHash": "4535db3daa76fb09cf88d0348eae6e0a50ad3a4ce0be94e8938fff9ab1319343" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.281Z" + "updatedAt": "2025-12-04T20:16:57.982Z", + "postProcessHash": "cfb4a5a3a53b2077a26313cbc472e7d949648ad27f0e0ee4b26d0612df691c50" } } }, "1582ff8ea3fdbeb1dad986160d1b0999795a555f6d89e98dd145b6f49dfb08eb": { "5e343ab5ab03d0e1fa46bf003992f1eb136b9a12bfad77828128edf71d3afe32": { "jp": { - "updatedAt": "2025-12-02T22:57:28.948Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "2771d0d0f9101bffac6122e94593a50ca733a6f00d64a69623445d6a17bed774" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.280Z" + "updatedAt": "2025-12-04T20:16:57.981Z", + "postProcessHash": "e494919c5bc805844ca85f11b025b851ddaf7dbac10999cc5346bcb837742d97" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.948Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "7305029317105944b6cdc328e4906951d26ad1968e8e341fa57511a297af8618" } } }, "179dbf5bb80545989b2913aca22d0861999dba14106d2380864014877de3c93b": { "114ef0735c99933d93e4c6a570fccf1ca3ef45aed471b8a4eccb902e87cb5043": { "jp": { - "updatedAt": "2025-12-02T22:57:45.283Z" + "updatedAt": "2025-12-04T20:16:57.967Z", + "postProcessHash": "b775a45ea5c6ffb9f9719c5d31c4db3d4f7cb6ee3aaaab4b9ab4e557def73152" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.934Z" + "updatedAt": "2025-12-04T20:16:57.966Z", + "postProcessHash": "d8656e4bc881c4730403917bcb3d27c610d3368c0472ec411d55015251093aaa" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.935Z" + "updatedAt": "2025-12-04T20:16:57.966Z", + "postProcessHash": "c902ab48fed3fe0735d29ab5fcd4448350902e99c7802dc07102c10903d0d931" } } }, "1dccccf586631074a6cd966272c09df3578cce225321b7df5ebc807acd0dcdfb": { "b435aec19ff6ecbb9d88c6d1f945636177e245c9c227442437f370098f0f3e09": { "jp": { - "updatedAt": "2025-12-02T22:57:28.968Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "6b63f1aabb032ad9be9836b988524a01647691de338dc49223effeb343351c33" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.967Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "c461526f8cf773e6470cdc1782d9a3f835f6b54eb60dde92f635ec62bbedbcf7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.967Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "51659463f52ba348ba121690feec75bd4fea36df55618cbcf2d5d6163f49972b" } } }, "2a3e385a0edab430e986558c8176d5e5093f020848f61371fce764ff9195f165": { "b8228ee3face15f90f6ed1245de3feab742bd22410c8360b5dcc4e855e71c22d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.280Z" + "updatedAt": "2025-12-04T20:16:57.981Z", + "postProcessHash": "3374e2765c882e0e39fe977c11d8e11f032a3072e621225a41066735805825c9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.280Z" + "updatedAt": "2025-12-04T20:16:57.981Z", + "postProcessHash": "fb12e2a5e007b99b3f906c71161063caaa02f15bf5676c9b853a0e0dd13308a6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.947Z" + "updatedAt": "2025-12-04T20:16:57.982Z", + "postProcessHash": "57eb6c73b922607b3d8d0b9690a490bf60b5e0ca54e784c23af8b353892f74a3" } } }, "2bb9b38a8d5dfd619ee7e2a01589dd2c06c59b11f82f178133c39690b45125c5": { "21f979e19600cd98d3791382f305b11aed31990ab9b8c6cfdaf57719effc558d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.958Z" + "updatedAt": "2025-12-04T20:16:58.006Z", + "postProcessHash": "ef078538c51a702d5550eb855d166eeddcc09263997231e80948148fea680d96" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.956Z" + "updatedAt": "2025-12-04T20:16:58.005Z", + "postProcessHash": "142cab45305b3b01125aaa04af605bef47491fb0646e926c8d702f65aa7054ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.951Z" + "updatedAt": "2025-12-04T20:16:58.003Z", + "postProcessHash": "e9ace345d4e8998ecb133ff71beadc90a1e2ecf9292adfa64a8a9f8b8c1563b5" } } }, "32b3dc73599ca183244dc71ff36bc88e62757e5face12c31b14ce042f684120c": { "1bb063448241263bf2f6dc2f55489a21d5cd06be00886e0e9e91d6bceacc47ba": { "jp": { - "updatedAt": "2025-12-02T22:57:28.962Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "f0014bb393000681e5c8e6fb57a625adea55feea761bc4e0252af7366b932d2a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.963Z" + "updatedAt": "2025-12-04T20:16:58.011Z", + "postProcessHash": "32004629208d8b1dad139bb2e8bde206ce29f890098bccab1d7c881c1f05c5df" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.962Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "2fd8e3081543c7e357a26e4cf674db809e08d88564276815590e26f85cda60f4" } } }, "51c48794a66e183ba70935eac117d954a1401f40572a0afc11169b24fcd14820": { "dc661924dc7cd06d16b7ed5abfda37c2ece415c277427ada79d811eff748ebda": { "jp": { - "updatedAt": "2025-12-02T22:57:45.285Z" + "updatedAt": "2025-12-04T20:16:58.006Z", + "postProcessHash": "5bd575dbd46a74dc46f0bd93bc3a3c606a4b2dd7a63ba6ad7e07885d5a785ddb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.284Z" + "updatedAt": "2025-12-04T20:16:58.006Z", + "postProcessHash": "4688dbbc74f999795d8653c432af66f9a25ce50405f6ef45646abed6a46efce0" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.934Z" + "updatedAt": "2025-12-04T20:16:58.003Z", + "postProcessHash": "87ae6363af78007687ae3df9b8d60371ba81c73d9cc4828ef96dcccfc9b77b4b" } } }, "5565bc89634d0648d7fb44f41fcd9352657cc2b36d57392f0a6561a32e66eb28": { "d223905451f4d931e0e856ce3fd5f35c1c3c25396ff43780337894e768a7242b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.953Z" + "updatedAt": "2025-12-04T20:16:58.004Z", + "postProcessHash": "094285d509b91224c301d261e58d7e13905216cd8f7ec47c4b0fcea25248a2cd" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.958Z" + "updatedAt": "2025-12-04T20:16:58.007Z", + "postProcessHash": "065490542a330288bc8ffe104aca229c8f42ef27b9f9f0332e0c9df9278fc2dd" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.952Z" + "updatedAt": "2025-12-04T20:16:58.004Z", + "postProcessHash": "7f54799b6d7d376176089de03a3c4f3853acb47af1ddbb363a45555c61d4a004" } } }, "705e7aed31578540442c080a6cafebaeba2bf1ddb38ec739dd014aec5b25502b": { "29a6c789509cb2e9a587186b93902ad76eec1850c4f01f91eb5c2a4c186d557d": { "zh": { - "updatedAt": "2025-12-02T22:57:28.965Z" + "updatedAt": "2025-12-04T20:16:58.013Z", + "postProcessHash": "aa9ea865ef9a74fd0403c13740154b4683b116cf2be151eb36e4d903df8e43cd" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.964Z" + "updatedAt": "2025-12-04T20:16:58.011Z", + "postProcessHash": "9e4d90e728d86e76cfe85c8b9159ada8015ead90139d4040e891b4a4f00850e1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.963Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "06352ad240cef2dac5575aae8c070331995023cab2e2e76b9c0db73094ae5254" } } }, "7e47d90d43125cfd56ce110d9bfe1a08ac0c8cecbad7095afeda215f8ebaff80": { "6aa7a3b849b9da4b7d84bb26a3754ab6d9c56ee35825fa788436cb306b81fc00": { "jp": { - "updatedAt": "2025-12-02T22:57:28.933Z" + "updatedAt": "2025-12-04T20:16:57.966Z", + "postProcessHash": "276ae24f4a56d47bc98f046e20e9ce58b10cbd61323b489525a6259f2eeccff7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.933Z" + "updatedAt": "2025-12-04T20:16:57.966Z", + "postProcessHash": "dd0b30ae33a70e3bed52c4a38888fb4fdbbdde2cfbc6dfcc615470b9284682f2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.282Z" + "updatedAt": "2025-12-04T20:16:57.967Z", + "postProcessHash": "fb57e9d16f29d8757712f94f02177d55d6871581a697f6cf0aa504d6d8209895" } } }, "9368e9ef7da2d3545fdcad02056a63f297099ae569a58d6445ec4175f477bcf7": { "5294da061b84e38e7a5c72fa3738434b348d3c948072b63438f6f8e9041f8d45": { "jp": { - "updatedAt": "2025-12-02T22:57:28.956Z" + "updatedAt": "2025-12-04T20:16:58.005Z", + "postProcessHash": "51e661202495f554aa919aba26211105d194905f68e90b11569644e22a4d19f0" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.947Z" + "updatedAt": "2025-12-04T20:16:57.982Z", + "postProcessHash": "d853a11d46583014dca16d756e17c7e4ca723c5ebf68502445d29c29678ad99b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.950Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "bed67a22fd139b99c1bc9312af0a27bf3841ed0b09101b9e00e0e6432dd83be9" } } }, @@ -7127,26 +8710,32 @@ }, "038b14ac5c1893d1111af35826b4c74e0b753cba46c799f2102d96ef3edb9d42": { "zh": { - "updatedAt": "2025-12-02T22:57:28.960Z" + "updatedAt": "2025-12-04T20:16:58.007Z", + "postProcessHash": "d7bd266f0df65d8674339f8d999d4d5c7d62364af8509415a6cba24cc8776c15" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.960Z" + "updatedAt": "2025-12-04T20:16:58.008Z", + "postProcessHash": "866371d53214a3859d540be44d80d55991bba5acdf15cace8764f17fef97c3c3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.961Z" + "updatedAt": "2025-12-04T20:16:58.008Z", + "postProcessHash": "53bda068727be17d4e27d4fbe727177bac89f0c96f1c85b9e518aa909cecb733" } } }, "9d935527c3051f00d3c44516b5c5b43d9ec31ba4d1ca19553b784a772963e4d6": { "b415e1612fa4c875f71cf858dcdd92606355f03dd3c13b5aef37f79f279ada0c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.957Z" + "updatedAt": "2025-12-04T20:16:58.006Z", + "postProcessHash": "42a4a26fc99ea8a27e5a0c04bd9b1dd73e9b7b1d68de1b0ecd2bc1534065b81e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.953Z" + "updatedAt": "2025-12-04T20:16:58.004Z", + "postProcessHash": "45954e77d6a8d6e5b550d0db55078d401ac371ede7f3b310bf86ba43c8b0a0f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.282Z" + "updatedAt": "2025-12-04T20:16:57.982Z", + "postProcessHash": "2217404a70c958d1a2b659c7cbce30fa1bf9fe8da253fd1c97bbbfe9cccdf1ce" } } }, @@ -7172,44 +8761,67 @@ "jp": { "updatedAt": "2025-12-02T22:57:28.955Z" } + }, + "2c79c5ac05027301cbc60889f7375dcc926475227edfeb0d74573ebc44b2c97e": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.018Z", + "postProcessHash": "0dd486fdaab0bae80ef7dda49071a976bb89a7d13a8323bb6eb62c7c7b6db578" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.018Z", + "postProcessHash": "fa4dccb81980138c1d51b888bc40599401076a542ac405708f93293c83854bcf" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.019Z", + "postProcessHash": "949651bd01c278d316cf0c42797291b6d6af942d2be2a74e926267ac514cfdeb" + } } }, "ca84649ef742e7064e2d857290ef9d942fcc1d6b9bdfff1813fcdfdbefec62ff": { "555cc07d313afdfd168b7ad11d02f0ab80d39cc85a07b294b44c7401c7ce9620": { "jp": { - "updatedAt": "2025-12-02T22:57:28.957Z" + "updatedAt": "2025-12-04T20:16:58.006Z", + "postProcessHash": "f6f9d026e6d3cba4da9db144cda2c47697657f34aefb4cb27bd1a5089ef63422" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.950Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "a0d3077a4a06f1004a85bed166daa08cbf0ab1d8ef450a6d0ca368f4cf4f42a6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.951Z" + "updatedAt": "2025-12-04T20:16:58.003Z", + "postProcessHash": "0433ff9b7a94da1dad3e37b640f7251c6ec28460138a1dd24bd4abfbcdda470e" } } }, "d8908fc8af7a3068c0cc48f8107adaf5bf331be7388208aa9a40ca7f00432b7f": { "561bda26e259939457123ba760b1c473d1ffa5cabb632bd41b00a30024d8ae4e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.283Z" + "updatedAt": "2025-12-04T20:16:57.967Z", + "postProcessHash": "95b0663931762a9269dcc6da7987c6be7f93641fa017b4fe59cb58650646f9b4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.283Z" + "updatedAt": "2025-12-04T20:16:57.967Z", + "postProcessHash": "6ab99e15bfcea845f58aa53d49c547162a3e23c974e5f0fee77ff950aebac2f1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.284Z" + "updatedAt": "2025-12-04T20:16:57.983Z", + "postProcessHash": "db8b6f0c31b0ccef480600166a72f81d63e8133372bc5e285446a03211771f0c" } } }, "dbd0d5161d0bd3efeb5fcda68e773df51262f2852a70440882d847c3e8ed79ff": { "558ea55eedb29b8236de463bdebed17358b2ffd17236ba1c7d0c9758543b7b74": { "jp": { - "updatedAt": "2025-12-02T22:57:28.965Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "041a3fe5d42ed1971b184f9c6d8e4ad58ebaf273f3b9cc0c1a673ec611b3fa75" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.963Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "4e4a3fc358bbe45cf9e85a51a2a4a3848907e116d4cb90509fa939d75851e942" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.965Z" + "updatedAt": "2025-12-04T20:16:58.013Z", + "postProcessHash": "77a7e038a01ede1de403ddc0e10fd2bc4c23fce83950cbcfae3e5384628eaf64" } } }, @@ -7224,304 +8836,387 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.954Z" } + }, + "f10cefde84597353beb5f91328c7072255506fa883614bf555236c792d319e1e": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.018Z", + "postProcessHash": "303fbf0f0a43e5f21f2bb5ef04d39b44d0837b339e6cf580aa0a3304845a409f" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.018Z", + "postProcessHash": "b3e747c5eb22c812d55fe56f2745c0fa854c562886a7bfc7cf60e857caaaba78" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.019Z", + "postProcessHash": "8ddec96eab7e6b801a0494b2381ca7fed5c24e30ca8e8d035f5f19182a6dc1ce" + } } }, "ee20bc66651b66977783ce3a17b9d4f38b09b4a0774e0791bb9fb26a7f930500": { "e7338142de8dacc4a6fc04e51a78c9dd1fb3bbef6534057d60f8de1db6ed3aab": { "jp": { - "updatedAt": "2025-12-02T22:57:28.954Z" + "updatedAt": "2025-12-04T20:16:58.004Z", + "postProcessHash": "24f55b7fa897436d26b0d4c23bd171f0acc7eb0237fb3e5c833738cbf8237978" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.949Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "5e083320599db445535dc51e52e9eca91b6f9ac9fb8163af07974107e03e85bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.953Z" + "updatedAt": "2025-12-04T20:16:58.004Z", + "postProcessHash": "2e1b95c98660ffec7a618d347b6bbbfeb3406555dd8284448db2aff2ab14f5aa" } } }, "fe7e045fa5f538d00f569d58a48e0a9285abe27807a38b3ce253116b4cc22e74": { "c2d3019dfd5c9e95d0bc93db0189ffd3ae5bb907d47f6a727f23a3e435164059": { "jp": { - "updatedAt": "2025-12-02T22:57:28.950Z" + "updatedAt": "2025-12-04T20:16:58.003Z", + "postProcessHash": "303d7408ae66c05303e03e841fcf9147d3b170c105226e935691ba7c92c987b4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.948Z" + "updatedAt": "2025-12-04T20:16:57.982Z", + "postProcessHash": "f129321ff3dfdcddc84938d99d222c947e5acfe0d644a1923cf87633aa01b227" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.955Z" + "updatedAt": "2025-12-04T20:16:58.005Z", + "postProcessHash": "5f49016f51b5f89f3572d90c10763527009071b34a16cd9eb8a794e0ad69054a" } } }, "26480489190477329712e0e890231f9ee67f7bae2ec93f1adc5e49bd8705dd0b": { "ca234a63cfee1038a0b6bb5b7e10d7ef8307e9e5239cd0706669420fd2cb62a3": { "jp": { - "updatedAt": "2025-12-02T22:57:53.554Z" + "updatedAt": "2025-12-04T20:16:58.056Z", + "postProcessHash": "e6d282f875e94ab589b488c6d9adbfa8eff4ebdbd730e42af554bdd618ba597e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.553Z" + "updatedAt": "2025-12-04T20:16:58.056Z", + "postProcessHash": "ecbe616c6925da52f4d4e58bd74b86565ab7c0158bb9ffb1f2fde088c6468b9d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.555Z" + "updatedAt": "2025-12-04T20:16:58.057Z", + "postProcessHash": "65c0d5f42e46b4e390d3db0c1813d95a69fe9eb1f16fd64087f9da6f13d13532" } } }, "356c6ff78cff0c4de1af14bfafe2c9bd10139292cd3f3c3553d242bfb277d994": { "cf5d9fa224a574f45a3c02cbc85a2617672d37fcaddc77e5adcfc9fa74e326b1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.973Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "611faa1df9e1fb1ec1d106d78f4a897913863f2ba72f7723f3a3af4f618bb7be" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.973Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "8e4f2af9312db5abcc6f81a377fde2ac62ba429ae3fd6997eeae5c3f58167c01" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.546Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "6791f52739c6dc49bd884ea2edea88a7a3593c792c942b6cf44611956d521d26" } } }, "372be1b1091279b14a64c301dd32f570d8ae7c28ebc2b0e65c8d600412c8a6b2": { "24a1775ccfe9d94dbe6ee2e71f12bbcddd22da3de1dd49f2d8ce8e542b33728c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.967Z" + "updatedAt": "2025-12-04T20:16:58.057Z", + "postProcessHash": "7a1301430852eb3b7c863ce7b60dd3cd28e3194281259133474d579135d3540b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.966Z" + "updatedAt": "2025-12-04T20:16:58.051Z", + "postProcessHash": "e31cfbd29d15a484dc495768a28cc2d9973899054688b9c265fe939446bb84d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.966Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "a35eff6c7e1526fe57b57005da42682caf28d855ef43a2805901aa88f49a04c5" } } }, "3b4bb74db846ca0f012ad71dfdb33334fa8118040393487ad35fea48bd2470ea": { "3120f1e4d4f08a6ba69af7daa70ffa13d27c3a4aef713d36140278c033dcf2bc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.961Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "392a889b54d67c206484c964c2347448235c34e9cbbc8030e3def3d7e6364a64" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.972Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "f0db541eb99656b04fcbdcf4167fe7911875830a5bb211599808c9c911ece9fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.975Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "062448fd1e59aab63e28462dfbcfc797642a01b6c947e16f61eaa19b37bb3c26" } } }, "42ae3a5b453abe44edf7cc0c8fb18a3559a3043e9828ca9eecf69cbab0362ecd": { "fb18df11b1efd0c29cdbcd9a0fef8f8e09542882ba6ccb09e3e42d9f3b8aa419": { "jp": { - "updatedAt": "2025-12-02T22:57:53.544Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "5d9fd1049eddc2acecd17f65489e1d6b2a125db11b776719cbf49f00f1c56319" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.971Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "5fdf10c3e8c17abfd0db0b32f668e8fe635dc069f3a810d39dea6a5a1603d824" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.546Z" + "updatedAt": "2025-12-04T20:16:58.035Z", + "postProcessHash": "a0eaf22894da0ed8dbf36bcc40020565cea86446810dbb2d7ab5aa4f4749b0ff" } } }, "501db638650e5304a9dba8ff4612de47b5da82aaad0a722bd89c11c68a35eb5d": { "f925e25aa54c252061995e84db9939551b2e2035ef3360d06582d778617a054f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.543Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "ccd97b016fd5721469cb80cfa87c7e7131d7c0be2013edde9af24b7c5725450e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.551Z" + "updatedAt": "2025-12-04T20:16:58.054Z", + "postProcessHash": "b2556c7ca0e62da6eae1686b5c9a94c474c3b6bb707c98eacf338bd3e6c42913" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.541Z" + "updatedAt": "2025-12-04T20:16:58.013Z", + "postProcessHash": "36d223a21a3d4b5bdad2ce3b70170eba8202403133a5a89161a8bd0eb61743c3" } } }, "5391d9361d8de859f55fc623438785f034d27921eaf51522b1cfec0b8ae6d057": { "4c5301e6bd068db1c39c7442930c97eb64fc020a710f75519ea91e088c153887": { "jp": { - "updatedAt": "2025-12-02T22:57:28.972Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "f349a84543a13162deef6673cabb7122fbe3a8ab9bd1e5e3c8161b45a7a169d2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.973Z" + "updatedAt": "2025-12-04T20:16:58.011Z", + "postProcessHash": "94b414332ad05171b9f7a8309c65d41566e22dc0ae6237bab32a5eada4278724" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.974Z" + "updatedAt": "2025-12-04T20:16:58.011Z", + "postProcessHash": "da350b58c6059a8e7b05be5601790e3b005f36dd5509dd19130dbb6238dccd20" } } }, "64565318cadde7f90ba96c3e29513ba020adf44fe66a9bf3e5482d23d0dd47dc": { "63452898bc1a5638b696f345c28ff8083c41b2223f3638a2c64f25800a2a5647": { "jp": { - "updatedAt": "2025-12-02T22:57:53.554Z" + "updatedAt": "2025-12-04T20:16:58.056Z", + "postProcessHash": "bf2f07315ebe4fc7f7aa571a8d5cd79113859d405929e03f37cde65953233025" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.555Z" + "updatedAt": "2025-12-04T20:16:58.057Z", + "postProcessHash": "d8dc8d6769808c35f78a447f1c75b251fd3b014a3b15267516829021d94ce642" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.551Z" + "updatedAt": "2025-12-04T20:16:58.055Z", + "postProcessHash": "95a9bc73670e0a54c8dd2a0649b7ce10b5a1c4e42e683aa9704970085e2a4d5e" } } }, "68ba9608dff675f309e6f07ee6d6f770a417b027a738a79f138c8d70e2106dbc": { "9dc2946bda2aea97fa9b18c311317369a59c2adf656d6ce6d76316a813616fc1": { "jp": { - "updatedAt": "2025-12-02T22:57:53.550Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "6e70dc15e4834422507ad779df552f2b8f4f42e71f5d5ca2d250464d4c8b9be2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.552Z" + "updatedAt": "2025-12-04T20:16:58.055Z", + "postProcessHash": "7b6301f7f66b6e632910eb13d293c942472870175cc88829b97503e379bcd89e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.543Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "7e75682487e04acffec06083abdf418c2de5cb35dc025d8a66f6368b9325d275" } } }, "78fe6d3b89afce471181d779a6a8b475696095ab4ef58d29771279afa02b2997": { "79d3b0b826a742e9b7895789e7402d878b568cd9e4df76a133dc77a70f03c8c0": { "jp": { - "updatedAt": "2025-12-02T22:57:28.964Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "52512bbfc6c3d667caa3d8ac9ac4e1f2f20b23c48fd1013379f90b49559b3314" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.962Z" + "updatedAt": "2025-12-04T20:16:58.010Z", + "postProcessHash": "6b8ac7049c0a3a464f93462ef827262ba10315c4381b21cfb8484ec2be106b4a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.965Z" + "updatedAt": "2025-12-04T20:16:58.013Z", + "postProcessHash": "e4c04c4ae0d9a7aef3487d7bdb5d0808cffeab4d81f4e0d86be6cba06a2c04d4" } } }, "81915656e6d382d86e051a8fa78d36209f8322f00df9d519bd2aba85055926e2": { "4bc52b2d49860b621c0c2e9203206add44f60ae74179555c48eff9366de95cc3": { "jp": { - "updatedAt": "2025-12-02T22:57:53.548Z" + "updatedAt": "2025-12-04T20:16:58.050Z", + "postProcessHash": "0d233d438aacc7cc28a36252f1f5d5ab263e5c98847520628bd72ba53cb74545" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.545Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "095c427e9c2370d725096f2064729d0833b68fd22b228d3922d6119908d24d5f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.547Z" + "updatedAt": "2025-12-04T20:16:58.035Z", + "postProcessHash": "4d0b53a69ca4140264678f61b06434dae2242ea1b929a7e58eeb9f0b29cf3e87" } } }, "89db72da570e81ebcb6f667a907e2f846f64923d46a9947f6788299488af58fc": { "bc1f7fd0c55c3e925412c0e368a4ffa88b8fe5c39a7aa535303e0d54e76f2b9c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.541Z" + "updatedAt": "2025-12-04T20:16:58.013Z", + "postProcessHash": "217a2fdaa0cd7f03a3f794f3060edb6a4b063bb9a3ab0ae4fb448c60d543eb2d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.547Z" + "updatedAt": "2025-12-04T20:16:58.050Z", + "postProcessHash": "2f251a1991f67748e571564400c0e2df2a3f0a12ff43788f45239781c4cdc0c0" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.548Z" + "updatedAt": "2025-12-04T20:16:58.051Z", + "postProcessHash": "09361b3e4e8aed669cfa82f19dc18a55fadd0814ea76c812bec905b494de6526" } } }, "938d56b6044b6cebcfe8b337190fa6dea927660551790620ca8c19fb31cd39ba": { "2aefd9ad0393f63b7e1ec0b002323afaa8b544c1011e8f3c91b77ac1f84ef487": { "jp": { - "updatedAt": "2025-12-02T22:57:53.549Z" + "updatedAt": "2025-12-04T20:16:58.052Z", + "postProcessHash": "43f9b3711b2bf807282390642f8251eb93fcbe12ffe1aa7560ee8545ae1f3694" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.548Z" + "updatedAt": "2025-12-04T20:16:58.051Z", + "postProcessHash": "425bdcb2ded91f5f8c718484aabcd61a25c9443c9b6a092d9b04e7dd38ae48dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.550Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "d40950119d7da8ed4e7fabda33c7af01669d24cb2ffb11877fe02163132ca310" } } }, "96e31c277d43b145242840ad838f44b908ce963c352dad86b59211265e87b591": { "482a21b0c27c50eedb13f76a309205d6a1f064bddbb03002a77af2aa8fd7cc3c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.551Z" + "updatedAt": "2025-12-04T20:16:58.054Z", + "postProcessHash": "d29d62ad9ddc810a8f267b21cc96da7323a30edd001a1ac22dcaea395fefb373" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.554Z" + "updatedAt": "2025-12-04T20:16:58.056Z", + "postProcessHash": "0573aef7f6ac645d7f954f191ca585f88b8689f5f02d84b8b4a527d6bdc5b101" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.555Z" + "updatedAt": "2025-12-04T20:16:58.057Z", + "postProcessHash": "fe4556576c17964f430e29a76297d629d2b888815d6faf6085f87c213ea5ba46" } } }, "99ff98bca369584f25c59d8f96acd6c1788719989416cfe1d5d478919758fd86": { "139a2b803dd22a097a0fb93f4bf76cd3187b48224be1271d561ce8d6d3b0bdfd": { "jp": { - "updatedAt": "2025-12-02T22:57:53.544Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "7dfe4d70c8a0f03e7d651345d0fffed5b4c7805080d268ffd5abd3559ede648d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.976Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "f407d1d97951c39ab285c56e7e962e44cdc7c0df9ad85300c246ebe1ec63a976" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.545Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "5aa191b2f54d41d3ae5e23abd58970d15e384699dfb8b23575de25fbcd61a59d" } } }, "b2aff55ca5954a6970b9f52ac23fc39fc004e51a346a6cd693caccb1417c6519": { "1010abd84c38f96762ed3b8cb461a3bb4e5e229304c1c500e26dc7c6e9d01318": { "jp": { - "updatedAt": "2025-12-02T22:57:53.542Z" + "updatedAt": "2025-12-04T20:16:58.032Z", + "postProcessHash": "dd44eb6d65d41f0373ec960965bfd70885369e22e8bc49a3d0661048f572df4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.974Z" + "updatedAt": "2025-12-04T20:16:58.011Z", + "postProcessHash": "6edc0dfcad28528249e76082e3562adf0bc82695adc5edb38a6b74cd03ef77dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.966Z" + "updatedAt": "2025-12-04T20:16:58.051Z", + "postProcessHash": "96ed5b8196875d4ce91a1800378a69bf1d7abd9158d5252b38194cc53da19b3a" } } }, "b8c212ea80c9bdcc2ba8434c82489b4cd25a84157ab8881924465e669bf2bf1d": { "aad4076142416380448496fbac36524304c81991e5c00dade2ad95e55a087c94": { "ru": { - "updatedAt": "2025-12-02T22:57:28.946Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "bc6834f5aa92fdb288442fc58cdf176cc6d5d69b037aff11066dd4b9a430d86f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.550Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "65027e49845b45fcc27c51126c215336ab6d67feafef81cd9b7672cd85f14e62" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.947Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "317c673020dd7bed2c0208c9742b7e476541042db1647a814b966c89f1bed22a" } } }, "cb227df00b6e64305168553956c1928afd33de9cb76c9d330e9c9eca9290c33e": { "268a8df1fdc77541fc0a6bc99e66097367ea72724a49b591b16c19e00e6685fe": { "jp": { - "updatedAt": "2025-12-02T22:57:28.974Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "5fd5632a3af9f533b0a80972c2e178250387cb1e6184a29ebbdd5ec3f7900400" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.542Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "835d98e3e67c4df3403d1f14ac5ce1f5d0c0fe80da1066bc0ccbaa7e108305cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.549Z" + "updatedAt": "2025-12-04T20:16:58.052Z", + "postProcessHash": "817ad2f03e86c2846ec822d7d9a5a08f2b4c92014c14f51d32ede5d1a539f1df" } } }, "d1c3b4df71214a3e88455cadb9dda32802eabf8a18de9dd12b4636f3a20001bb": { "407735ce33f5163b7e6c2875f0e2414993e84109f0556ba297b7f1762f038a8a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.544Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "44b94b45a2de383181d2850d486c5fd0df08ed6f12cdd6ffaf4ac2371d0ba0ae" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.542Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "f58600a1b3a2d7e9dae86ff17c77b548ed153bf1eee2d81eec281fe3a8a3be5d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.971Z" + "updatedAt": "2025-12-04T20:16:58.009Z", + "postProcessHash": "6287e8fbc67df184f9b080a55f4d5ec39e123101e6f60104c29144000ccc331c" } } }, "e9a7a6821acf2148d5fdf59dfb02c842dbeccfe3db8ed78b13af93341b542d82": { "45af94df7fb72c57f3c3954a12bae535b5025b01d4824ae9e4f23b2ab156e1ec": { "jp": { - "updatedAt": "2025-12-02T22:57:53.553Z" + "updatedAt": "2025-12-04T20:16:58.056Z", + "postProcessHash": "4bf659d31108e0f7180ca092629ffd37cfe1b37825658072e6bef3bc5355f23c" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.545Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "f4892efbae250db837952d841c773209aaee248a50ea481875d5ef4d9cf6e164" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.546Z" + "updatedAt": "2025-12-04T20:16:58.035Z", + "postProcessHash": "eb9bf28fb06316762621927ae788d4bce165b73114a5affa5cf85f982216944c" } } }, "fee5d5e407a8306e3abcff87b3f147641c908588b209b7c9e107759067db235d": { "35cee660251b87c86ad32e1c0bdaaefadc8dc8d26b278a55c87e87e3de226353": { "jp": { - "updatedAt": "2025-12-02T22:57:53.543Z" + "updatedAt": "2025-12-04T20:16:58.033Z", + "postProcessHash": "7ffef1e5a86761867016a4a2d07b80a2284c84bac16d3a340cff0b282eb09fa8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.549Z" + "updatedAt": "2025-12-04T20:16:58.052Z", + "postProcessHash": "b893abe75f50ca351b76981e0bf44d2cf8bf43f821f6bdb179f81f0f06143407" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.552Z" + "updatedAt": "2025-12-04T20:16:58.055Z", + "postProcessHash": "2e825f43adb831e6c2e7f805757ef48bcd517816c241643cecfc3da30aa42acb" } } }, @@ -7539,13 +9234,16 @@ }, "bcd3f856bafd91c47cfba8e758107bcc7ae1c1743a3aff704f72af97e7bb2e48": { "ru": { - "updatedAt": "2025-12-02T22:57:53.557Z" + "updatedAt": "2025-12-04T20:16:58.019Z", + "postProcessHash": "5326f02131a261a59b4be06497a2cc9486dee6a62d1e59d2081d917968c970e0" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.569Z" + "updatedAt": "2025-12-04T20:16:58.027Z", + "postProcessHash": "373827bfa899fdbee301b169aaa8b56e503314bcad919fb6faaf41c236994383" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.572Z" + "updatedAt": "2025-12-04T20:16:58.028Z", + "postProcessHash": "a9b5fdafd0e24b3df17a7cf2298812e306c021afd928867652524c7376ec464b" } } }, @@ -7563,26 +9261,32 @@ }, "ebdd9a3e7575ebcc17e2bf7eba747cab13904bd21b12eb03a1f34cdf5f1e7784": { "ru": { - "updatedAt": "2025-12-02T22:57:28.970Z" + "updatedAt": "2025-12-04T20:16:58.001Z", + "postProcessHash": "8f12bae3110cf85cf865b11be9aaae8fe993faf487f56a912180bfb7ea26c117" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.562Z" + "updatedAt": "2025-12-04T20:16:58.022Z", + "postProcessHash": "562fac459d8693f50adf119c4874a477c72ae0a0e2eda84513f3314f290d21c6" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.565Z" + "updatedAt": "2025-12-04T20:16:58.024Z", + "postProcessHash": "7d9ac1985d45f13ce6094b2cef90897fa98e7bba6a7f86ee1278c9fb4e47a099" } } }, "23af5cac91f252ffe2e42d1e7b5a0bcabe7dc844aed8ebeffba1570964d40b4d": { "897a5b0e6ee3fe28e1f105bc25b952d48f233f747b27270188a83040b9b40f90": { "jp": { - "updatedAt": "2025-12-02T22:57:53.578Z" + "updatedAt": "2025-12-04T20:16:58.032Z", + "postProcessHash": "3db37b9c686576eca5af630c85a7e0eda82eaa77bd87a78738e78929a6cc696b" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.576Z" + "updatedAt": "2025-12-04T20:16:58.031Z", + "postProcessHash": "e7c660a4237bc197dd181b3deed2ee6d4092bd8bae804c9fa1268e7732ebfa70" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.578Z" + "updatedAt": "2025-12-04T20:16:58.032Z", + "postProcessHash": "954f1d36313e751a758fa9ac3d67a1bdeae5ea3964a91ddd31d69e36ca2ede77" } } }, @@ -7600,26 +9304,32 @@ }, "b38543567533024d38925fac14dd5420a9b499f385ca94cc0b9b9c04d820f470": { "zh": { - "updatedAt": "2025-12-02T22:57:28.969Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "cf71ea33a40fe51ae5d3f820c60b8443541c2b26b7b80736f75fb9fe9b73a503" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.969Z" + "updatedAt": "2025-12-04T20:16:58.001Z", + "postProcessHash": "5bc8df77d72d6b2282fbcb639f632822f8bfa6d0b062e4139b90f896d4b25491" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.560Z" + "updatedAt": "2025-12-04T20:16:58.021Z", + "postProcessHash": "ab1e627cce36c65d2a1c6d3900ea47a855dccf9d31afcf84b1d31909a9956acd" } } }, "2a50f26ed5a74514a1bb5535e77a1e4295586acbc14137eeb91bebd950369fe9": { "77daddd248c06a3945d845d9935148cb7d185c9ace0f5a7e2b8d9a52649050c4": { "jp": { - "updatedAt": "2025-12-02T22:57:53.581Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "4ffe534d95e818b7357616ac46cc74ce7c89fbe2b6aa93c5e111af88c60e16d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.300Z" + "updatedAt": "2025-12-04T20:16:58.074Z", + "postProcessHash": "8f00191672b21a6af0ab47c7df09efd914dacff94527cf4ae5f04158d416178d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.301Z" + "updatedAt": "2025-12-04T20:16:58.075Z", + "postProcessHash": "86d2740e52d953bafff30de6e68fc79c019ef306b47815a0aacf1e0bf9db67cc" } } }, @@ -7637,13 +9347,16 @@ }, "adee3628812a2e0169c7c436f7c41012c6b0b856ab91c598890be0b181284e63": { "ru": { - "updatedAt": "2025-12-02T22:57:53.574Z" + "updatedAt": "2025-12-04T20:16:58.029Z", + "postProcessHash": "213f0f207a0893e3a39a5242a240341dc968dc69ce384067732f88e96facf909" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.574Z" + "updatedAt": "2025-12-04T20:16:58.030Z", + "postProcessHash": "aa51f1af1693132225fbf4e588d805adf8bc5159825d82b544320e8c067fce0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.575Z" + "updatedAt": "2025-12-04T20:16:58.030Z", + "postProcessHash": "bf8ac9363e6270e9a1f5f44d066c76673c998c3a63450ad2512cc726b12dea2b" } } }, @@ -7661,65 +9374,80 @@ }, "5407c15b69d4e3b9d265c317ef087192cfd42cc503f7ec6c3e963b8594948b4c": { "zh": { - "updatedAt": "2025-12-02T22:57:28.971Z" + "updatedAt": "2025-12-04T20:16:58.001Z", + "postProcessHash": "322d8c926080f19f39eea33a93d98f6279e5ca75e7412fb2847b3a1ed1509e96" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.566Z" + "updatedAt": "2025-12-04T20:16:58.025Z", + "postProcessHash": "ebb41c4517bbd848285548589b30073c54d75dcb964410c13b651866a67a9393" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.569Z" + "updatedAt": "2025-12-04T20:16:58.026Z", + "postProcessHash": "17b8cd6f50e878623e6579928aa5f75fcbeb3a6d0815adf31768a86b5fb1ee65" } } }, "6e73db155b7c6964fced099cd2a329a54c570e4567c1e741e45991462993ff89": { "d1aadc2b06df5561a41ec6294f8ba38c60368402b06032d12e12420507c14384": { "jp": { - "updatedAt": "2025-12-02T22:57:45.300Z" + "updatedAt": "2025-12-04T20:16:58.075Z", + "postProcessHash": "91ccace05551a62e4db7403d1135491a4dc62782065b20542d7eea9d44fbc86e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.582Z" + "updatedAt": "2025-12-04T20:16:58.054Z", + "postProcessHash": "029797c9970ceb58176ff0b5eec5b8d64103d25d60bdf9048cdc70d357672532" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.579Z" + "updatedAt": "2025-12-04T20:16:58.032Z", + "postProcessHash": "44eaeba901890c41669c7c254aa0ddbf64d39cc3eaa7f2766d8158243f012f4a" } } }, "854411037d5e91dafe4510e3bb749eb29c1405966f5c747972f003bea369b464": { "2f5dd362e6719f95a9f300225eac5ed8491245ba11f15bda272d36325d991c01": { "jp": { - "updatedAt": "2025-12-02T22:57:53.576Z" + "updatedAt": "2025-12-04T20:16:58.031Z", + "postProcessHash": "d607a690ec6089418526a6d1afc5a40c7d31013ea9c54f3c817d875fc0a20c91" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.576Z" + "updatedAt": "2025-12-04T20:16:58.031Z", + "postProcessHash": "b3d4d15b140673459f362a7fc6d871dce14130ea308a0901a5b660aea468f6dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.578Z" + "updatedAt": "2025-12-04T20:16:58.032Z", + "postProcessHash": "c10e46ee9ab5eed5f2de0b2f9e9586e687690ea56418ab09be51ab32c3521d10" } } }, "906c5c00462e8461e0b7aa1cffaec1f44d3cc275066f474f9ab70cccbf9e9d8d": { "661e85a9d5e8d39ed88218a74a7029ed28519c2e3ed3213707133a5bb6e243c6": { "jp": { - "updatedAt": "2025-12-02T22:57:53.579Z" + "updatedAt": "2025-12-04T20:16:58.034Z", + "postProcessHash": "5b323aa156c252f86ddd1d422410574abbda5c869ec45d97ee20f5289251d1e2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.581Z" + "updatedAt": "2025-12-04T20:16:58.054Z", + "postProcessHash": "27a112cb6374c2c50a972a52ce8bb594fb0e851b7330fab2711b925b7b091e1b" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.580Z" + "updatedAt": "2025-12-04T20:16:58.052Z", + "postProcessHash": "006f38567d562634b32fe016360d4e2e8786e334a364340228c0ac2b3e4898df" } } }, "9acecbbe697d2e6d2e334b3b54c514cdcf0ed3d6c83e6748104f8f3b983abbd2": { "4b6046e5cde03661005f0be0ef3f23e778a948c6c005456f94af71b6ea2e484b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.299Z" + "updatedAt": "2025-12-04T20:16:58.073Z", + "postProcessHash": "c062e664697ae262626750bdabf1d0375adb1317ad45ebc65afd2b2b43892323" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.581Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "da665acd45c127d63572ac978745b7b69d0421095d709b498f427190817bf946" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.580Z" + "updatedAt": "2025-12-04T20:16:58.052Z", + "postProcessHash": "dbc1e521b6f1c5da0c761f6101045fbf8e2dca6fa361707b34c020b0819c0cfd" } } }, @@ -7737,13 +9465,16 @@ }, "aa95ed0a0f75936c9fb958402d78b88a1b649e02c12c5724474d96593d8ac8e3": { "zh": { - "updatedAt": "2025-12-02T22:57:53.558Z" + "updatedAt": "2025-12-04T20:16:58.020Z", + "postProcessHash": "6f18b298db5c2e5fd2ab4240bbdcdaa7eb3881b8d6d3acec0a4e75b362adbc53" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.571Z" + "updatedAt": "2025-12-04T20:16:58.028Z", + "postProcessHash": "1cc3d771fa720b210196aca77ebdbdf9bc67742727845e0ba3dfd4a9353da4d2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.571Z" + "updatedAt": "2025-12-04T20:16:58.028Z", + "postProcessHash": "3f79081a8dcd817f0e8be51a9ef94cb3b49556a0861685aa18aa8ed259576798" } } }, @@ -7761,13 +9492,16 @@ }, "21a411586384e33979dcf970a5a3e351863fabcdd6a2f2d9ef948c7c72e29308": { "zh": { - "updatedAt": "2025-12-02T22:57:28.945Z" + "updatedAt": "2025-12-04T20:16:57.999Z", + "postProcessHash": "04cb51f7f5bd84cd9558fe88285c856ae8765b9ed4ffef21680da790b71e17c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.945Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "9d65259554be5c5fe878dce367fa4974a099b9baca8fdbe1759f7b0ddfed7cbf" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.968Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "45ee3495253d53c5a40edf1775459cf4e37b41f3ebb590a5aaa4f8b9a217c1d3" } } }, @@ -7796,13 +9530,16 @@ }, "b89bf73d46ab4c8681fe3343c9174975288ec170fa6f21f23a4befddc5ff80e3": { "zh": { - "updatedAt": "2025-12-02T22:57:53.568Z" + "updatedAt": "2025-12-04T20:16:58.026Z", + "postProcessHash": "5b1cb8796509fda146b1766c4fffffa902b8fc8cefd36c512920cb3a3aa4b0a4" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.570Z" + "updatedAt": "2025-12-04T20:16:58.027Z", + "postProcessHash": "43fe1daf1a18f2860dc959170a0a4d61a7576a9a256a371ff8e9d58f11238152" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.573Z" + "updatedAt": "2025-12-04T20:16:58.029Z", + "postProcessHash": "e6b0fe173f3d619be02eea82ddebb68a4c11d3f35931a587672577910d398923" } } }, @@ -7820,39 +9557,48 @@ }, "7e8c22f8384e0f5e1604ca164094faaf9dcf3be660b10e0ab4b447554527eeb9": { "jp": { - "updatedAt": "2025-12-02T22:57:53.561Z" + "updatedAt": "2025-12-04T20:16:58.022Z", + "postProcessHash": "6e8700da9b30c8d7fdfe25c6a51ac7643e669c9b521a58904b4dad2d55004edf" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.559Z" + "updatedAt": "2025-12-04T20:16:58.020Z", + "postProcessHash": "dc9a9805ce23235f9c66aed4acba3ef682b3f2b7b4328a7195e680c64cdc2587" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.563Z" + "updatedAt": "2025-12-04T20:16:58.023Z", + "postProcessHash": "7efe74461eee360ea2eda05eeabc0031c6e70f6086d04c341ac8788096e815ae" } } }, "caf9155f2ad3c6bb6165f0c5a837f80ca0f324d7821ee36716d6a44981b32432": { "c9a20f8ca6d2167945584243cb48aae584ce849963b883da031cb1fa3b57b9d3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.301Z" + "updatedAt": "2025-12-04T20:16:58.075Z", + "postProcessHash": "e1ce38f7762d0f1c0c8ffdce7b2d273f74c5f80b45359a558e06a2c9efbdbb5a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.298Z" + "updatedAt": "2025-12-04T20:16:58.055Z", + "postProcessHash": "06922756440fc5d77c4523a0144e6cc83d96c437edea4fccfb0e1eb55a482401" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.299Z" + "updatedAt": "2025-12-04T20:16:58.055Z", + "postProcessHash": "51cc39d7a472bf11bd45fa334d1f0ae0ed0a08299a4639150a170882f565be1c" } } }, "cbb612322707858e39d9de4d0c9cc540429b50cdf2909447e753d421fc3212d0": { "4a7d4ef89d791edabbdff46a2878745843ca285c2985ee018c727274960745d4": { "jp": { - "updatedAt": "2025-12-02T22:57:53.553Z" + "updatedAt": "2025-12-04T20:16:58.075Z", + "postProcessHash": "a34784249f318a831aee38aa0541ce36aa9bf31c7adae0890da64f90bf68bb6e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.550Z" + "updatedAt": "2025-12-04T20:16:58.053Z", + "postProcessHash": "ab1f650657f26a1fff11097ad35bf85b32e7696b8eec9514f2da73983338932b" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.547Z" + "updatedAt": "2025-12-04T20:16:58.050Z", + "postProcessHash": "712e48de063a592fc3204cdc320ab5125e38236103d1e1e827f00e180a5cff71" } } }, @@ -7870,13 +9616,16 @@ }, "36b30907a627eb9f2751c13b7b41989e2375737efd3afded8087c21995e25c37": { "zh": { - "updatedAt": "2025-12-02T22:57:53.566Z" + "updatedAt": "2025-12-04T20:16:58.025Z", + "postProcessHash": "0dea9c410cd04d331146163c0fc333fdd372dd0cbfedf0bec5f7e9e40d94a4e1" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.568Z" + "updatedAt": "2025-12-04T20:16:58.026Z", + "postProcessHash": "5a07f02c8e2f54822350e619f6112c19362ca3e9fd9840cc1da64c2e637df149" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.572Z" + "updatedAt": "2025-12-04T20:16:58.028Z", + "postProcessHash": "78333a3b03d4d3f90bec73bb14ba4ef462cecaa31269dbd924053420087ab965" } } }, @@ -7894,26 +9643,32 @@ }, "2887b5d2961f016569ec5457d21265144b81c9010f1ed1c13a8b666a47f53526": { "zh": { - "updatedAt": "2025-12-02T22:57:28.969Z" + "updatedAt": "2025-12-04T20:16:58.001Z", + "postProcessHash": "c21b0a975eddf45479fee1faa0d7c68a2b5e6addbef746f2d473f53f61bb91e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.558Z" + "updatedAt": "2025-12-04T20:16:58.077Z", + "postProcessHash": "d0f6f12483f436f0c0a31193b3b8b46d4ac43ae7181fbefb81f8e49091f97807" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.559Z" + "updatedAt": "2025-12-04T20:16:58.021Z", + "postProcessHash": "7e2e288f31d52b9a2c6beaf1a3b82cf6090e61a09f255c0c777ae43a26479b02" } } }, "eedd808236db61e2b28ca3ea587227703d2be3b1ced3ffbe6e92ba89ef707e94": { "20f04dac4a93b0fdb3374aba9dc0994fdc280c6ebad124568bf3fd2f999185f8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.975Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "e784ed3b8b985ba5ca0b2b0efd3d696380cc7db20c7e653eaa839bf7185b594d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.946Z" + "updatedAt": "2025-12-04T20:16:58.000Z", + "postProcessHash": "98e3d5b93e2df252d5645a4aa3a5ecd7841d4f1194304c16f08a2c0f84d9829d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.975Z" + "updatedAt": "2025-12-04T20:16:58.012Z", + "postProcessHash": "8c2b9afe1e2cf1d13cb0bd28be83a651666da9982206753d6960d6733b267d5b" } } }, @@ -7931,13 +9686,16 @@ }, "f0ee8735181f3f9addb0d2b3c24aaf4c8616df76b27f36d37500110520089609": { "zh": { - "updatedAt": "2025-12-02T22:57:53.557Z" + "updatedAt": "2025-12-04T20:16:58.002Z", + "postProcessHash": "26dfd5e121b40fd4aad824e26210ccd1ca365090e2827510638bcc285bdb2f74" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.560Z" + "updatedAt": "2025-12-04T20:16:58.021Z", + "postProcessHash": "7225cf702321922403471898f5eb34c847d346cba7e9e0f147509f1268ac995f" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.563Z" + "updatedAt": "2025-12-04T20:16:58.023Z", + "postProcessHash": "50a4d326f98196533e96110837072d17a0aaa070d72832bd30a9023ad1ebdad2" } } }, @@ -7955,117 +9713,144 @@ }, "be7f4e3331c3fd409e0646bffe9b6357649ebe66e4221085977b0cbfb8bd4a24": { "zh": { - "updatedAt": "2025-12-02T22:57:53.573Z" + "updatedAt": "2025-12-04T20:16:58.029Z", + "postProcessHash": "7b3a807290980061f18d5dea10663f5c6a5827b2a05c685fcfc05c7281ea9019" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.574Z" + "updatedAt": "2025-12-04T20:16:58.029Z", + "postProcessHash": "3c7f6c424dcbafb41b80e54e8fc0434040a7085c451703724b856afe0c0d3151" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.574Z" + "updatedAt": "2025-12-04T20:16:58.030Z", + "postProcessHash": "1230ee4f7c5a6318d4c2204b62d6aabd977498735fe6be81c242da1d6d02e395" } } }, "115c23898dca6a5bd85fc79980e071e10196e3e3295527809805baad03df1e8e": { "cc5d85e7940e700fd5d3f8fd7641a3e19d24a033b3c45b51595134cdc91659d3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.324Z" + "updatedAt": "2025-12-04T20:16:58.076Z", + "postProcessHash": "ac817453b63e295349104e21aa6df88725c2a7f05cf37daf324c0134e3d18185" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.315Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "cecb9496e8c754ceda455ff709262dba1347aa3a33a0356657349e420154ce99" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.321Z" + "updatedAt": "2025-12-04T20:16:58.073Z", + "postProcessHash": "5dc5378c02ce0b9cf474cf5f937b12d4b4f287b9bc321a07361ce27b6e5c2870" } } }, "25fa138ccb807e454af6642c2ed448968e7de55919fd0d0a4ecb3f6e490a397c": { "ab68507bc825afafe53c6d1d0d6f08c53621f3a95a39deec3a4dad7ef103b2c6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.320Z" + "updatedAt": "2025-12-04T20:16:58.073Z", + "postProcessHash": "f8c058112d113f83402c43b58120c54637979513a20df4ef300b77095582dd48" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.324Z" + "updatedAt": "2025-12-04T20:16:58.076Z", + "postProcessHash": "bd4d60aba3c2b928c8cf4b9b821f678a36746773b8a7c64b596617feebb57bd6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.322Z" + "updatedAt": "2025-12-04T20:16:58.074Z", + "postProcessHash": "1e6490413523f9afb535494f50fe6fc3fba1b0ff9687b9243424a5c504fc7376" } } }, "29098b8e3f1e1a679a5ddc94379ef95f05ce5d74ad32854eb1f4dbf472997cd8": { "a2fdefeb5c115c0929ae0f70cb0135e6ff4857188e411761888474889ae1edda": { "jp": { - "updatedAt": "2025-12-02T22:57:45.319Z" + "updatedAt": "2025-12-04T20:16:58.072Z", + "postProcessHash": "b72667dc5ad8d23111581ac5db2e4b42b90e66f3fa005fa10f2c17ad093d5700" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.320Z" + "updatedAt": "2025-12-04T20:16:58.072Z", + "postProcessHash": "9207b56c528a98dd1b23cd288fbdf91a5c5d8fcbce10e7fe39c0aa15e5c2f44c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.316Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "bd0671085273b9f85ab97fafa1bc01bcc1ff100d41a7a222ddb54009a7af06a3" } } }, "2e279d80c8ba84fded6bc29580d38a57165294e3bb9ec5ac3177d8fa43594ce7": { "c32887dbd37129abcf60580789e56e42295b227409b866e8d6f639ccb4436f91": { "jp": { - "updatedAt": "2025-12-02T22:57:45.322Z" + "updatedAt": "2025-12-04T20:16:58.074Z", + "postProcessHash": "048b0063a0264ab1608476982daeca44ff7a01b1ced2dec7b1a71427c17cdcfa" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.322Z" + "updatedAt": "2025-12-04T20:16:58.074Z", + "postProcessHash": "4c4e415e4dda21d27b7cb98af881042d977390109dfc2658733ffcfb0ab757dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.321Z" + "updatedAt": "2025-12-04T20:16:58.073Z", + "postProcessHash": "35da0a54e833e72c20073dea21ec99b636d5fa1cbb86267872ce43f97b749396" } } }, "509f6ede51ab34e339503f91928010a06f04655f9ae29650958c5b6768752931": { "b15b0f51d35014ff5faa6f96548eae990708c240d294f1b231da328da35a7588": { "jp": { - "updatedAt": "2025-12-02T22:57:53.579Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "08f3b3fd5166654ced00f267dcf18b27d2c2c5518cbbe79b70cd53c2dab5e383" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.577Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "c28fbdbb1bf0c0347454a4e11ef49a89e9d9f016741606861e30ff338a94e5b1" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.577Z" + "updatedAt": "2025-12-04T20:16:58.061Z", + "postProcessHash": "ce5a99eaa06c11704e76eb88344ddb9c290c9cb178f98912e571b307de2d3366" } } }, "521e12e9546adbbc16980431e680a5ef21ea7b5b3b9b36afb8a2521aa6b377b6": { "6e547ac81c7773f9acb16ff8e8b7c7388a98727bfc4319c29909249791e4ec09": { "jp": { - "updatedAt": "2025-12-02T22:57:45.307Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "bcec8c15259ddafc2516650cefe07e4f1efc9de175704f4726e193c7b2427d4b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.312Z" + "updatedAt": "2025-12-04T20:16:58.068Z", + "postProcessHash": "0b1fb47774f91e0f883b88007d9d5a54c6aa0752e2fd8c3d0339d65dac622879" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.308Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "835f8b90cb0838ba6d1e417d3836d7aeb21dfdc1b60add158bc1313250a8ebd8" } } }, "543fafeba882f7e65ffa713c52cc503e06a45708cf5d17f53ac0462449accbf7": { "10b537976cc0e91e97a168611992f05f85e4ed7084a47e4cb1a2f920f41380ac": { "jp": { - "updatedAt": "2025-12-02T22:57:45.310Z" + "updatedAt": "2025-12-04T20:16:58.066Z", + "postProcessHash": "b1e0f3c5a244f7f462e899744be740a341dc4252f42690ed77e8aef88de7aa43" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.305Z" + "updatedAt": "2025-12-04T20:16:58.031Z", + "postProcessHash": "1342208607b46f6b69a13c581d60d9d15eb7efc89d4aa8145138a70fe9d71a78" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.308Z" + "updatedAt": "2025-12-04T20:16:58.064Z", + "postProcessHash": "e88767abe65858da7bcb08af0aac5b8e9f5813dce4a0cb7cc103d2dc898c1678" } } }, "700af028231b046bfc9ddd5cfa321b3be5e023aaaee235d4d7d86453223b3fdc": { "5feb43870c53151fcd38f8407b9a14613518ef335101c53aa526f6a23caac7ed": { "jp": { - "updatedAt": "2025-12-02T22:57:45.306Z" + "updatedAt": "2025-12-04T20:16:58.060Z", + "postProcessHash": "f8e4d25d75b7e5a0e5893f8c3fb8a07cab5d40ab707232921cf51f0699699cab" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.312Z" + "updatedAt": "2025-12-04T20:16:58.068Z", + "postProcessHash": "c47830e545bfb7925413922eac5917eb39a8cf3914d42d844312e22e5343cb48" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.309Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "16371d551b326cd24174dfbed2387a7c3489fe338b8ca81bf4eec8eec8e0e6e3" } } }, @@ -8083,728 +9868,896 @@ }, "7daba956dd3abe4b27ba6749d7f47b4f15037a1481f6abae28e2aaeecc9ed552": { "ru": { - "updatedAt": "2025-12-02T22:57:45.304Z" + "updatedAt": "2025-12-04T20:16:58.030Z", + "postProcessHash": "9a0b3fd7fcc1c9f9b16c35aca2669498454ee88f9c4c4505437c9e504fbb9567" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.304Z" + "updatedAt": "2025-12-04T20:16:58.076Z", + "postProcessHash": "a93519e8a64af5a036f51a4f38a3353c869c6647d4df02156e3556f7d19c18c9" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.575Z" + "updatedAt": "2025-12-04T20:16:58.030Z", + "postProcessHash": "2c9ab68c76fc07f1126d61171b4d613489af2e2695ce51e0a112ffb743174112" } } }, "7eb439b32a67cfb0aa3624c9184253dc089e7da15d7e10a23f668083dcbbdb63": { "d75745d1b46f0de5b2028a881660f2bd2ddadc7ddc0b54286beaca30e215e44f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.556Z" + "updatedAt": "2025-12-04T20:16:58.019Z", + "postProcessHash": "5e19c09ce46094f71bd81732c6b49436eb0b89d136e27875a8652482680f9ba6" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.323Z" + "updatedAt": "2025-12-04T20:16:58.075Z", + "postProcessHash": "d2435f5d3c49d800adb61ca7a1ec93ef3ef67778b19c263bd203aedadb0bb200" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.324Z" + "updatedAt": "2025-12-04T20:16:58.076Z", + "postProcessHash": "9b1acfeb6d4653a7ba148534f3e94c1c53b096c8512dbe25593c800c776fcedd" } } }, "8cd1456e58e9b0f32764599fe1b3c08b4549cd901e4ebe5d8ff994983ffb18dd": { "be2df94d3de1df0b087713bb38516d1a78f6b4313e8daf18309af45c6beb735b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.316Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "e6a13381b2148ca388ac8aa2188e0a31626258da8bccf0b6c23884a079506c83" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.312Z" + "updatedAt": "2025-12-04T20:16:58.068Z", + "postProcessHash": "a90f69f9d8d96ab09f786c4c053a5b4df519e6591a94fc77552c53ea64f27f33" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.317Z" + "updatedAt": "2025-12-04T20:16:58.071Z", + "postProcessHash": "40b9b55b1c8341fded05333327e49c5e87290f496d0dba0072acd18329d60b19" } } }, "8dbad11f22f37a8dfbe5928d8a4733fffad030ebf6032dcfecd084e9101dba52": { "f92ca8e97f1895ba9a62cdd9bd09b067b16fb3472cb748d5ec26c6d2830bdcc3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.313Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "af208136a0142d889583cad0ab9bfc3f294c5c0161e5b27a169be515780a7d08" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.314Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "8df6816fd33939238cfdaa97087931d871505e3e2d83533c0e08671faacf4a6d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.311Z" + "updatedAt": "2025-12-04T20:16:58.068Z", + "postProcessHash": "e568387740f56acd92ca26c5215b98935493c5693842cadf364a3bee3f1b1960" } } }, "9606738dfb47e926dbb72401f97fb8dcdca15e8e7e4c7c8e0b1de1923f128ebd": { "f38bca2728a4ec18acf3801a37e29bd6ce1663c505004c92a4ef0fb8bcfab83d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.309Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "4b09186d1f8d9a5f23488c38fe7ea91bd3306e6ef6e45f6a3f762a1c3c016e12" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.307Z" + "updatedAt": "2025-12-04T20:16:58.061Z", + "postProcessHash": "3a26cf8789bc4697dd50305855f2b4ae32495e75e401de44158a6cb679a644cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.310Z" + "updatedAt": "2025-12-04T20:16:58.066Z", + "postProcessHash": "fec436520e380d231588ed9d132243eaff2e9dc6fdc1187402743648c67fc1dc" } } }, "9879a8ecb21ed941282ca62ac8cd46ca90a2e07bea45df3014931af580b18b1c": { "1cee6eed8b351ab527a9d9c859764f01e20c33109d8796baaf74d0bfe5e7498a": { "zh": { - "updatedAt": "2025-12-02T22:57:45.313Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "ecba7946e8ddc074b59e9cc44adf33305137b709c8cab02c63b15915f53053a1" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.315Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "5dd159fbcb7df23b6bf55661224d6b4f6abb0434c17748ee17ea18b920ba1012" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.314Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "98b25b03f032b612502b0e5491217972c1555c4b5ded4ea67af99ad8a0a1ac4b" } } }, "9c64eb3f63ed2f4471f8cc3e3a16b5d6f44f4c39e15dce1c2c911d1a94e1a018": { "4af09e0c2db5842c3ba3437a58d8012e6ed6971aac46840180567463da4f8ce8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.557Z" + "updatedAt": "2025-12-04T20:16:58.020Z", + "postProcessHash": "b1efee257199b0d44577402676f52a237486a0a8f63fb88a327a0b6d81c1c77f" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.580Z" + "updatedAt": "2025-12-04T20:16:58.051Z", + "postProcessHash": "3ecf8383f063105aac10b065b0ddfdc0925233ae219cdf2f2856e25a761d8772" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.302Z" + "updatedAt": "2025-12-04T20:16:58.076Z", + "postProcessHash": "cb0f338207364ed68725cba42473d90df9c2d1579f9da9740b7a5098d2b48170" } } }, "a2fd395ad42270710df1127e0482607ea48ccfe81a62976bedb63b46c8ceb860": { "67cbbbbf1e4f7f85554eebfe9fb09a5afe145f060eefe6aed1c811dfc5891361": { "jp": { - "updatedAt": "2025-12-02T22:57:45.313Z" + "updatedAt": "2025-12-04T20:16:58.069Z", + "postProcessHash": "5e0b6f27d2e083ac2bea40ad713f1632b91086b806ce3f7f63c1acec3909af65" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.315Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "d47df8292f9cdd5ec5f1bbd7287ed48ad6a0b7c5c08b8f4490c2ec72ef9e1c1e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.310Z" + "updatedAt": "2025-12-04T20:16:58.066Z", + "postProcessHash": "1052e2bccf5c044dea5b229e947ee351895be43236a7972e19a5b81966827dc3" } } }, "aff3738ef426bb03f782516f0c962dc0d4f1e8b1e75422276233e8a61abcbbf9": { "62fbdd6dddf79ab74c534883a022557ea5c732ed713d1fc244291ba771204269": { "jp": { - "updatedAt": "2025-12-02T22:57:45.318Z" + "updatedAt": "2025-12-04T20:16:58.071Z", + "postProcessHash": "31e69d90f59f66975f7e5b26ac6be2a647ea9ac5cb93f4db20cbd6d4a993577c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.317Z" + "updatedAt": "2025-12-04T20:16:58.071Z", + "postProcessHash": "99eae06a104dbc0e1a3edc4f78803ea282e49f0de7260ae56a88af0b974163f4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.318Z" + "updatedAt": "2025-12-04T20:16:58.071Z", + "postProcessHash": "07ab2a551fac06cc977f00b5514b6fcc0b56e5bef5271098752c814ccaa5c32d" } } }, "c61ff854a1d65abf94d196412aea9f3db52e099f903e0aec1c8dbda684f0ee4c": { "6725d42405abcd2763e59c5af20b80e294c49a24e5dfded57358991054e676ae": { "jp": { - "updatedAt": "2025-12-02T22:57:45.311Z" + "updatedAt": "2025-12-04T20:16:58.067Z", + "postProcessHash": "3ea16d49609af1ccb90ffac301a7d59193b74893e47a8f6bb597aad96b8cc098" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.305Z" + "updatedAt": "2025-12-04T20:16:58.031Z", + "postProcessHash": "45875ab36b35dd3602b95009102040ec47db3e875d047ecbe941828774d5965e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.556Z" + "updatedAt": "2025-12-04T20:16:58.019Z", + "postProcessHash": "fb65b59c212e6140ba990b590fac2be9f7d366144c0196a56809cb4438a952e5" } } }, "c85b0e977e47a5de069cf6bc2a4c3c7c368f637081c6c7a74c2b3f09f541da76": { "6a1875203c3c11a5ddaeaf844592c8aa66c906a5f10d8118af659f3188166f2b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.311Z" + "updatedAt": "2025-12-04T20:16:58.067Z", + "postProcessHash": "079ceddcd87e9eb6c43fe73273e0f8f8e54c336065000eff0aee4d9aa8d40ee7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.308Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "eba651172ed8c4aa484b9730e6ceb0c4ac8f3985368ba6afb9dcb70ea3679496" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.309Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "f818b480dd0fd432847676d43fb196d9cc11892c10636f2f039c0b387e7d96b4" } } }, "cfcb155375b8c7dce0cd7951038c468106245eabdd22e87ceb685a86ad5787b1": { "4f1c6f9f3c784ede710c284000e57bbb2570ca34ccf377e55bb0aa62d9575fb3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.323Z" + "updatedAt": "2025-12-04T20:16:58.074Z", + "postProcessHash": "dc9df8eacb9928451af33cbeff02eba788a99459d87f27b157411543c445bbd9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.319Z" + "updatedAt": "2025-12-04T20:16:58.072Z", + "postProcessHash": "ef74d1c40db1abdbd7185fd8f66fe6f09e739ea9fc19b5280d2b269e2eb32eb6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.316Z" + "updatedAt": "2025-12-04T20:16:58.070Z", + "postProcessHash": "20a6cc625aa0d0f78ac7e09772aa3b971de78f687604669663ea14fd753f59bb" } } }, "e771f00ee03a6b8ac3a2fe4466ecae0a0ef5fa4a1c06261040efd4c71c7df8ca": { "afaf81983280a59e7aa1584371969108a9f08bbf39abdc8489d3da2cc68c29c7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.319Z" + "updatedAt": "2025-12-04T20:16:58.072Z", + "postProcessHash": "50aeede48cd2f83b32ec8cc46bdaefb9a6d17031c86ad549e6efbdc6cc773f67" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.321Z" + "updatedAt": "2025-12-04T20:16:58.073Z", + "postProcessHash": "4a12e10c7b455cdec2309c15fb1bc7375912995353a4a7280f4c2c49d0cd549f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.320Z" + "updatedAt": "2025-12-04T20:16:58.072Z", + "postProcessHash": "6da014082767c05649bed6d6a7cf3221c056a07db654de746b31c515ca4b94d8" } } }, "003cc65643f9d9786893e0bde4fee0fde5fc25de83cb44c9b184c9f67f682330": { "7bfbb7c49650987bfda71358fcdb6c75e10f3775e57dd80dfa998cd9df1e42b1": { "ru": { - "updatedAt": "2025-12-02T22:57:28.981Z" + "updatedAt": "2025-12-04T20:16:58.088Z", + "postProcessHash": "6319b6ad03fe6fc1de2c6cae1b2f9ce230f80a1703d550090e5e9440df1d14fc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.987Z" + "updatedAt": "2025-12-04T20:16:58.095Z", + "postProcessHash": "ad7a13b5fea2b7dc81395cb6e5a1c10e1ee1d93300ded73ae933a549867f5b5d" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.303Z" + "updatedAt": "2025-12-04T20:16:58.059Z", + "postProcessHash": "d4f81ca0d2809d049dd909bdc3d7f6f58185d07273a3cb439e06c18358203133" } } }, "0bf287012c3e4a1823f4a6d9af97b4ff2ebf50382b88f6e446f2d2462ceff028": { "6fc59c979e71f5ef7d01dffb85d9c0d52f0f7d9af3f0d2364ea573c821dfb4a9": { "jp": { - "updatedAt": "2025-12-02T22:57:45.332Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "bc4fdbefc26a99dd79c21dbc43d173dd2fb950b6849961eea452e1bab8c55864" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.980Z" + "updatedAt": "2025-12-04T20:16:58.087Z", + "postProcessHash": "c535bd7ce2f0768a16c68ba0302bb1918b240dac57b0664abbfaed73e77dea5a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.333Z" + "updatedAt": "2025-12-04T20:16:58.066Z", + "postProcessHash": "077617f331d3b34a1cbc3a0c47a898b3b5ef1c917e727967fc06efc8ac54e7d5" } } }, "12e31ec44b3dcf65828805450d562ba22004284c24e16fe91cc2a8306183626b": { "9894639ef964614d3ef8027f22a7deb78a5ccec89d41e007f288e7db21591494": { "jp": { - "updatedAt": "2025-12-02T22:57:28.981Z" + "updatedAt": "2025-12-04T20:16:58.088Z", + "postProcessHash": "e6a5eb4ea058b296065928822b39c5210a907eab18fe157bc575f9f1eb0ee4f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.986Z" + "updatedAt": "2025-12-04T20:16:58.093Z", + "postProcessHash": "cd586823ccb2c76e1e11e4f4a4c11ab8f99a782d9b24aceca3e9c9c2883471dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.333Z" + "updatedAt": "2025-12-04T20:16:58.067Z", + "postProcessHash": "7a0d62fca82651e948e25bf2e95024356b0853bd1ca1d04b82fe4bb45c8dea75" } } }, "1a8c3dc523efbedd8ceca5a1bf0b315be2ac1dcf90f08530d461bd213eef4f7c": { "da9e17112c0ec79d1fa82ab5f0ca3db1c53729e70e3fd6a2c4370c03691b292c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.983Z" + "updatedAt": "2025-12-04T20:16:58.091Z", + "postProcessHash": "be2cf650b060caab7e111393e3d4c67f740eac95acb894ec68da295be7729860" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.984Z" + "updatedAt": "2025-12-04T20:16:58.092Z", + "postProcessHash": "4c9756137c70e9d3bde68d61368737490b6052ae5e164910e2dc7618f248f152" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.980Z" + "updatedAt": "2025-12-04T20:16:58.087Z", + "postProcessHash": "db749271cc51a1b38c2135f134a9ba31a9315b1fddc4d6bc093d41e73c00bc5a" } } }, "227d51f47fc957dea766831ea43b73c58c9e450c7aadba923fb55f27b830acd0": { "88ce0d6c08629f221dcfe109d7e8a09898443472a62411ee8e84cd0cd4e77851": { "jp": { - "updatedAt": "2025-12-02T22:57:28.977Z" + "updatedAt": "2025-12-04T20:16:58.084Z", + "postProcessHash": "210ca9920c7cf1c3454375d5f2acb7007bd9d7cc8f46673bf52bf2f654d0969d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.331Z" + "updatedAt": "2025-12-04T20:16:58.064Z", + "postProcessHash": "edb3ed7bcd4edc610d8ba351c0c070d4de768a5f26215694ae235537f785c42e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.976Z" + "updatedAt": "2025-12-04T20:16:58.067Z", + "postProcessHash": "e1ad453cfc16346cc84f5cc867eb44c28446d4d5f99134fe399a51b948c86589" } } }, "2788d1737d33b8bd86e0aa8f0dbd2c1bed226411e50160a1554ab9361f7532d2": { "d0cbc85c85d4d71c67952d11b3d238be8fc75b6ea16860b09935bd9f96add653": { "jp": { - "updatedAt": "2025-12-02T22:57:45.331Z" + "updatedAt": "2025-12-04T20:16:58.064Z", + "postProcessHash": "410b829158f080e958cb44d2ab071c78a178d5ed71a28b2c2e6b7e826b2c4ac7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.306Z" + "updatedAt": "2025-12-04T20:16:58.060Z", + "postProcessHash": "b79e51183c68f6353af8a4abfbc7692c5a0bf15b114091c0f8d5212f63fd913f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.331Z" + "updatedAt": "2025-12-04T20:16:58.064Z", + "postProcessHash": "ab776a5478094f30b8399dc9b6f5e7cafef5cc020e8dbe926b82ddacd806e887" } } }, "3b065a4f3fc6b25a5184da43b7b0221b5aeccf7b81e1255bd8a6d2a6b86a8ae7": { "c88ae622109bfb3777e96a49c9bfa5f9889a8187d65d687676ef5de1bf070514": { "jp": { - "updatedAt": "2025-12-02T22:57:45.318Z" + "updatedAt": "2025-12-04T20:16:58.091Z", + "postProcessHash": "af60e7ca25646187c01c1623dcd5ff94f3decabf3e1b1483d3a0bd342bb5f798" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.321Z" + "updatedAt": "2025-12-04T20:16:58.094Z", + "postProcessHash": "1b98331b276ab78f72089f9ca8816d69922d15cc548babbe01deaea9e03eaf62" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.317Z" + "updatedAt": "2025-12-04T20:16:58.071Z", + "postProcessHash": "9fff5b47c2022b7492ec05827b4174ce97a006b152d4302ac171c84a3ad7e5bc" } } }, "3ffea18e4142d273a23435211934d60695e426723e88ea42a887c753673da12c": { "9135666001d3b0d949ff7db424b18a4b655d4b8eebcafa75a9e472d040fbb808": { "jp": { - "updatedAt": "2025-12-02T22:57:28.982Z" + "updatedAt": "2025-12-04T20:16:58.090Z", + "postProcessHash": "d0a6fd2421df75f192fe8cac307a2fbe88c17c98a49074b9840bbdb6a0f190e8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.984Z" + "updatedAt": "2025-12-04T20:16:58.092Z", + "postProcessHash": "771ad81d5fdb1d4be89e9cb93857c75adc6bce7bab81bb2166502fe84b062131" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.982Z" + "updatedAt": "2025-12-04T20:16:58.090Z", + "postProcessHash": "936b1a04466f567d022b75e6dd3f2be6399522bdfb0514e876715d3dbd6260ab" } } }, "6ae9dde7cd947f044ac422d9819b807221ad5825d4f6859ff2c72f3c22d7331f": { "f17b1d4769177c8b7b3260aff487e581de4450f37dd2fbeff3e0a899b7559706": { "jp": { - "updatedAt": "2025-12-02T22:57:45.330Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "f53602bdf01194b0c0485bf07724bf623a2aa195c0212d517219a41dd53c3e6a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.976Z" + "updatedAt": "2025-12-04T20:16:58.084Z", + "postProcessHash": "0ab37a0f2cb2506503cfe78e1a149d65353c1f7509ad424aab3608d8179a7183" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.977Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "f10e507d8c198886c980b6cab91e5fa3f7026c745f76ea3035c506730cd4c743" } } }, "95219024ef9522a55be4e6513f75defbb49883b4a5e32a05d187bbbcc9f53c16": { "069a5c20a99f64397b1d13060b06470148c26b5072a36b8e1b16d746b0e4ad7f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.978Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "83aeb74e87cf7735240ec3923fe59b0d713f4a6bd5f83a383312b05d797a3f59" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.302Z" + "updatedAt": "2025-12-04T20:16:58.059Z", + "postProcessHash": "117bff0ee84e502cea527bbc36b3ffada0d890cbe782e76cf6a18b51c8f47626" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.330Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "43b2db3a2028d93fd08d6a6be0157b6bf696438a5bbcbbb3960d23dee30472cc" } } }, "9829e6d504f03f89cf3f9782f9238f3dec6efd6d3810dd672ec15bd67e65f810": { "e59e26adb9705f2e6456ed1518f0aefb7d0cf0e3b13b040fa78b4a590a1181c2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.329Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "fce1bd0ff2f97509b56a72c10ede4bfd5ff12655c9c3066043893fb5318d3ef5" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.329Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "67afb50837504ef4df2562e835effad24359a67932b802993c8d6f228eeced15" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.330Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "8042cbfaa003f6d43a3043eef0baf04c4f6c2d1c7e4afa796937d29415c24cad" } } }, "9f51461ed5499a8b1f450f23f773f55200d3922c76578fac080589c6d4bdb7c9": { "eaded5b9cc370f7c0893d58a270227dd93fe67bc1568a6b674bdee429e92ac10": { "jp": { - "updatedAt": "2025-12-02T22:57:28.977Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "697fd5fcf9e4abf4fe7745a76cc11ea810fcf8a135df2e6c873bf7f2ad787bd5" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.332Z" + "updatedAt": "2025-12-04T20:16:58.066Z", + "postProcessHash": "58364e384ab60314a9654f75f5ae23672245de32f60bed8a146bdb4ddde8af46" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.981Z" + "updatedAt": "2025-12-04T20:16:58.089Z", + "postProcessHash": "63e586e5ea1a612afc7f696442cc4055299743fd74bd782205eb53d735eba5a2" } } }, "a4186d2152fae14c248c1297810d8ae84b17536d8f68513f586c1e2d378d79fa": { "da62d5ba1b9b52d86fdf52ef9a5a5fce77010670db44844630fe457d0a64dfda": { "jp": { - "updatedAt": "2025-12-02T22:57:45.332Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "0b0cb3ecf7e2562e72bc77e8f0c53f0ff60121ee4cb20e71d01f9ee9261c6e35" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.331Z" + "updatedAt": "2025-12-04T20:16:58.065Z", + "postProcessHash": "7a3248ab00069cbfd5bbedb95a77b5143d7c6fde20db857c5d5c0a7cdccc76ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.979Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "e8863ce31e2a17f66fa0012f05e74be5b93f4459cbea29e80835979e7f16b4ae" } } }, "a98f06f78a3ec0f29bb4f078dbb0c37f77d01618cebf2733ff11b32c497f7b24": { "a9a69fd4a89753f57c102accc6affd4752db865e189ae4cc4e551815c20e9964": { "jp": { - "updatedAt": "2025-12-02T22:57:45.328Z" + "updatedAt": "2025-12-04T20:16:58.061Z", + "postProcessHash": "671e35f78a99dc438e05fe68d155520228e0506ef958ccbca87bcf938b7010cc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.329Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "5060f77334f3d2e2d28e5ff49d6a1def93a4474b2cff440cbb8f2ae4d4342b92" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.327Z" + "updatedAt": "2025-12-04T20:16:58.060Z", + "postProcessHash": "a3490f5c8afb9ce38b9a84b74518f84d3f245861ebdc938db10d365918dcbac7" } } }, "b0f8d850504855a8481784c04ab4b0c0a35453e0ccfb3fd1251528b4f77a8b8f": { "0dbab51aa36f5b479c39c4f615a8a9b4493aeae6b1e482a4ccbb9064901d7f3b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.986Z" + "updatedAt": "2025-12-04T20:16:58.094Z", + "postProcessHash": "49f4100c782aa90982be9b97284171abe31a650b5a439d5990d426ad6398ba55" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.983Z" + "updatedAt": "2025-12-04T20:16:58.091Z", + "postProcessHash": "b8de354afdfdb2f9c2f6e921b93f375ca467e6f109d53efbd6cb87ff2d73ec7f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.979Z" + "updatedAt": "2025-12-04T20:16:58.087Z", + "postProcessHash": "d36e53b3e5651f5add8f28e1e52d5b074dc996451404b04f3e2d9ccd39470c24" } } }, "b7f8c2c6c3c0d8cae21834a515d86c9ba6864e0aa9c968e945adf28aff1bd428": { "bbcd7ca2f8d136d5cdb1c28f0c53253dd6f2040d23646bfbb062d85161da4e08": { "jp": { - "updatedAt": "2025-12-02T22:57:28.988Z" + "updatedAt": "2025-12-04T20:16:58.096Z", + "postProcessHash": "d989d3d6c01e541294a196b85a3f7ab92bcabbb8db5f8d256aa12fea1ab39362" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.984Z" + "updatedAt": "2025-12-04T20:16:58.092Z", + "postProcessHash": "4d9453cc58c5b8f6abe5f57961e924e2c1b332a0b9abc1be4cfaff73620ba4ec" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.983Z" + "updatedAt": "2025-12-04T20:16:58.091Z", + "postProcessHash": "a88db46b54f3c6c94b382a1058b76e8357f2a0f1b7f5b7e459971b8bf955665d" } } }, "bc18991124499a7f66617eb5b243033498a2376e769bee9084fac4cef0b7c045": { "d62f4767bf6ec9661415c60e24e41a90ba047d383b9bfbb29a327253f604da58": { "jp": { - "updatedAt": "2025-12-02T22:57:45.327Z" + "updatedAt": "2025-12-04T20:16:58.061Z", + "postProcessHash": "d0d57e1aaeb9ff28446404199f19b0fad437776a6db1e3172c0e0cbd6deb8d90" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.328Z" + "updatedAt": "2025-12-04T20:16:58.062Z", + "postProcessHash": "6ef2b9a52cc970612ed8d1b8acdbe588b78f11e82fd29b8d9dba592b68200d7d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.305Z" + "updatedAt": "2025-12-04T20:16:58.060Z", + "postProcessHash": "55599ba9ae2ab045d4116daeab5255f98ce0b50e17374f83bc7a8ce0e502a41d" } } }, "c23421b71aced0ac75a1f6d4e5a8b8ae239e457c02367e880b6d1a4ff7277e3a": { "4719e0b0aa1afb513dbea43054775d5c3e22f6638707c72a91d88a4237b487bb": { "jp": { - "updatedAt": "2025-12-02T22:57:28.988Z" + "updatedAt": "2025-12-04T20:16:58.096Z", + "postProcessHash": "2e9b2544e6ae3b4162b87d3e9dfab6dd0f36449006f389c40c189fe279786d11" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.988Z" + "updatedAt": "2025-12-04T20:16:58.096Z", + "postProcessHash": "5db18982cf8e39c8e92c7fdbca028589601da242ea3ee48fbc943168a9e11937" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.985Z" + "updatedAt": "2025-12-04T20:16:58.092Z", + "postProcessHash": "54dccd61e53425e0966b9f324b6677689c261f2af2b60bf92cf24062e5d364be" } } }, "c621962c4e9a6c1f2dcb4ec8f98b33faa0d771e9aac97195014471b0f353099e": { "8e462b2a96c9f45baf5c523e8a97e3ffac3676c40724d42a9c5109d5413a54bd": { "jp": { - "updatedAt": "2025-12-02T22:57:28.982Z" + "updatedAt": "2025-12-04T20:16:58.090Z", + "postProcessHash": "9321e8d5b126599076342cb82a51b96316248fbd2430bd337bda7ce2febb0865" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.978Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "78fcdfe39d68856ad3c231d910026e1b397e36ba3cd125af42a29f145c9465e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.980Z" + "updatedAt": "2025-12-04T20:16:58.088Z", + "postProcessHash": "808cd4d713885f6824145b9f9aa1a880d2af3fe9d3c82139062ef6407d099db2" } } }, "c6addfcf4c2f183d0885f78b6bee455eb175ed28099b76df7d58a87ff79c231e": { "0bdad070e3c15637e1941843f067e2a8ab54f34932a6197c4b57662a1ab08586": { "jp": { - "updatedAt": "2025-12-02T22:57:45.329Z" + "updatedAt": "2025-12-04T20:16:58.063Z", + "postProcessHash": "92fe330183d95bba737dba89b09b87aced21260d5f4713f06e26716dfe893ce9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.333Z" + "updatedAt": "2025-12-04T20:16:58.067Z", + "postProcessHash": "29183797f829a317a22669a5946df588a6f32339cb201a51cd4824452f351131" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.328Z" + "updatedAt": "2025-12-04T20:16:58.061Z", + "postProcessHash": "9e29e451853b78cfb9ab34c5ce1655c3b0b4da64b68b7f34681fd1e4b698896a" } } }, "d0a117042cd54d2d897e9ff128bb30722802674d738351bc727ad6a48d97c13a": { "ef198e4984503045b3061df3df5083cc081e20ea251352bf6175ea0983742b28": { "jp": { - "updatedAt": "2025-12-02T22:57:45.302Z" + "updatedAt": "2025-12-04T20:16:58.059Z", + "postProcessHash": "eb99fc2aaf3c2437d7619366a6c0f8e5c8b8ba01b1e93892facd669f2bf3aa89" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.978Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "869d4039ca997fd59b3d6dcdabcebac46d42294e6c0464c8c5b0947c3b4d12b3" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.314Z" + "updatedAt": "2025-12-04T20:16:58.087Z", + "postProcessHash": "5a5a863e456eeff7ec2fe80fe83517f9df6cd934c37abce6ea958733e1e11ad6" } } }, "216d22e459b5339d73b5e5f5efd10ba0d324035b56ffd8c09aca8ff6053e5be7": { "4347cf6fe8d3643c0bc778bc6d6e1a2d7728b22b55e566913fa8326c720d6e54": { "jp": { - "updatedAt": "2025-12-02T22:57:29.005Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "2ac74d44b94c4878915fa5197b56a0d32fe8336e76db34b30344f87ceb0d5c82" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.326Z" + "updatedAt": "2025-12-04T20:16:58.058Z", + "postProcessHash": "1492cfc48f29f44c63f19a53c046d4436da1fd40325e05269b8817dac3f959f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.325Z" + "updatedAt": "2025-12-04T20:16:58.057Z", + "postProcessHash": "04cdea08801d2fc8faf5a3b9a23213bb192432ed7814cb477c9d4b6099489f50" } } }, "2f2c64962247267011454aad885684dd07f5230293d18c996004e9a086a48a9e": { "de25513083b27abcf3a1ed0793d26139ab348f9ddbadba05a87914373d86d034": { "jp": { - "updatedAt": "2025-12-02T22:57:29.000Z" + "updatedAt": "2025-12-04T20:16:58.082Z", + "postProcessHash": "f0919108f1e3444d7ad0ed2ce7f757fa4fe95aeab95e3e41b77ff6afcb737ad4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.997Z" + "updatedAt": "2025-12-04T20:16:58.081Z", + "postProcessHash": "2ef3e3402969643755291c71765ff496ffd60a25cbfd8124c5d53d85b2ca4578" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.001Z" + "updatedAt": "2025-12-04T20:16:58.083Z", + "postProcessHash": "1f765a10bdca6b8f3cfe9a764e7705aa0bb2f56074962111e2022c0be547c62b" } } }, "3b502bb7173f6131431ad8322b576ef99ef5e91d3612beb68e0f4ce3b6053bf9": { "c7797285e4835ab50d34203593f5308bddaddec5d13f14f4f6d7be4be2239eb6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.999Z" + "updatedAt": "2025-12-04T20:16:58.082Z", + "postProcessHash": "6a403b8211deea85215986079f966f71866a9413b05528656c732fb75ecef771" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.999Z" + "updatedAt": "2025-12-04T20:16:58.082Z", + "postProcessHash": "3244bce27aba0c03411916d10837079230501af1c5f5d7f87675136726e364c0" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.005Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "23356880c8bbeccd027a2e5f615f49006000e6679f3d7e343c36459d4829c4ae" } } }, "3c55f6319b00bb5e571612e6f740d049975d5c3de127e0de80d0f34889dd8b12": { "08f719dba95186bb05f8277899abf3443e7cc9fca51a32ba8727dadf82c77879": { "jp": { - "updatedAt": "2025-12-02T22:57:28.993Z" + "updatedAt": "2025-12-04T20:16:58.079Z", + "postProcessHash": "31d5018c03e2275aaf7b62bc09999ecebbcb3faa1d61648f09967dacab8bee31" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.994Z" + "updatedAt": "2025-12-04T20:16:58.079Z", + "postProcessHash": "f7f3315104dbfcc334fe3f80bcef41346b32d77c45ef19876b65838ff944147e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.994Z" + "updatedAt": "2025-12-04T20:16:58.079Z", + "postProcessHash": "dc897f8f56e725e54f16ff04fbe4e56fb2a8ae345118b1e1dde9ce7baa26e649" } } }, "40ddf7122cbd5708445d09282a9aaaa01b51f15847138bd583939c6bee63c5a8": { "1efde3a11aa977a804768bd9d231b648a793e9638453375585e0f62486abe9f9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.995Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "8461b209658fdc5e6cfd3c58634d875c62e37217cbc49c5dbcc876f2f97aa586" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.996Z" + "updatedAt": "2025-12-04T20:16:58.081Z", + "postProcessHash": "b8fb28e2e7781fb925ee3ae32dc4d36681b48e4c9dc4f0b4e8e6911e14fcc0d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.002Z" + "updatedAt": "2025-12-04T20:16:58.084Z", + "postProcessHash": "d063aa4e649762490a9151ae3b8a695b234ee95aa16973848dccfab3a5da7555" } } }, "44e6428941aba89bd0fd45e401717504047bf2582288d528651664c68b5860ef": { "3dfaa8d64c4eec1438f9e2fdcdf95885e290daa7a1d6f9280e7587fbde343224": { "jp": { - "updatedAt": "2025-12-02T22:57:29.007Z" + "updatedAt": "2025-12-04T20:16:58.089Z", + "postProcessHash": "34cf5b0a58eec96dc3a359ea62f542c67c24971e366f6603f7911744c001a748" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.010Z" + "updatedAt": "2025-12-04T20:16:58.093Z", + "postProcessHash": "0799b2eaca1f6b687dab2f5ba247439f8cdf9bb153deb0f34d866dd8d344ecf5" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.011Z" + "updatedAt": "2025-12-04T20:16:58.093Z", + "postProcessHash": "ed66176600379d1a34c24753487c81f771ee346d411907e7d97ca04f672b8fe5" } } }, "5034a9cab8d174bbba4fcce036fa29d5dc6bfa365274ed3cc44a0e5ff13c4738": { "c73720aff6e3013b19ca923ea6650c5399c7cce59157340fcac3ecb68f255f4b": { "jp": { - "updatedAt": "2025-12-02T22:57:29.003Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "d6488e7f0d32e03a063c00d418aa05c3f1bbab9eb3e2a1e094e4c1360e23c7f4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.325Z" + "updatedAt": "2025-12-04T20:16:58.058Z", + "postProcessHash": "40711d5cb0c75022c52ee21f5bb3cbac97aa8888b4275ef4a7a8a443bd576af5" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.009Z" + "updatedAt": "2025-12-04T20:16:58.090Z", + "postProcessHash": "e9401bcbf58e98e101504336b0f707148971d64bc196b40c23fd351a06a83c83" } } }, "541698242c872ea29127fc2dfe64cbea8b6e3ad3471aea2ac19011a37d71e754": { "08b7c30758e175cbf2a1d09a301193f88562d6e7ab18b078ab6c4b805b81620d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.987Z" + "updatedAt": "2025-12-04T20:16:58.096Z", + "postProcessHash": "0c8e1918cdfbeeb7bb96fc9ec72c55b9365fe6cc2ce90d0ccff456b97a5d8f5e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.987Z" + "updatedAt": "2025-12-04T20:16:58.094Z", + "postProcessHash": "c251bf9a5d0942e93aab0752a9636317fd8ac7e7aff5186ab456711d6c234df7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.986Z" + "updatedAt": "2025-12-04T20:16:58.094Z", + "postProcessHash": "77a44a9aeb60f1efd9b28186e8814226531ae051cd02af60b36fe4fa92e7a0a5" } } }, "67199cb0b07db7b73e9d48c3856e7a80fa64a401ac9356f38dd56f0ef6af4f87": { "2a193532f966a6fea5015f9758bc034a7cbdfaf8b91c7431fdbc29b0d020b9e8": { "jp": { - "updatedAt": "2025-12-02T22:57:29.002Z" + "updatedAt": "2025-12-04T20:16:58.084Z", + "postProcessHash": "2c495f57690460e9a5e402bda07d617e9616499622c2e557f1afd6f2e1af85f0" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.009Z" + "updatedAt": "2025-12-04T20:16:58.090Z", + "postProcessHash": "5247e92de187ae72c32e6cba5e98bf41935bcaec99447f61fd4e0ca141e398b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.011Z" + "updatedAt": "2025-12-04T20:16:58.094Z", + "postProcessHash": "5b79703980fdc54b387535e9caf3cdc82a99b89a0655294124fbefb9d4304c75" } } }, "74f8cb35854e4cf151ab34a6587a3b0c76868a99d06b7a1b7eb88bfdd101dcc2": { "9431057902d3a29dbfbbd44c8cc88c4dd2b703331d32f31fe7eab5675d5d047c": { "jp": { - "updatedAt": "2025-12-02T22:57:29.000Z" + "updatedAt": "2025-12-04T20:16:58.083Z", + "postProcessHash": "68c4e00c2a17ac04b349b275f9c3e5e89ddb956660222f0aaa430dc8116e7b7e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.999Z" + "updatedAt": "2025-12-04T20:16:58.082Z", + "postProcessHash": "5e2fb4643f691329d1883a61e36d1ad04fffc2b471bb06756783024d9a46e21d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.998Z" + "updatedAt": "2025-12-04T20:16:58.082Z", + "postProcessHash": "02292a0bab75803bbf04e38bb3ea652a26f8dc86844234a2fa689baff86812b7" } } }, "7e0dc4543c81b33bb19b9b0222c533c95884214b5877d7ed6c08d6101f73935f": { "4d2ea53c6c8b773cda0b23778f9e67b35379e9de8b35e7412e470060aa209fbe": { "jp": { - "updatedAt": "2025-12-02T22:57:28.998Z" + "updatedAt": "2025-12-04T20:16:58.081Z", + "postProcessHash": "2fa08315740c9fdab4ff06e9843b6e1ed2d56cbf2d140128c132f7ebf62020aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.990Z" + "updatedAt": "2025-12-04T20:16:58.058Z", + "postProcessHash": "aa4d2aeebb06a0cfbb399863fa41a00499255fcbba17dad58cc18320257f038b" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.001Z" + "updatedAt": "2025-12-04T20:16:58.083Z", + "postProcessHash": "9140dcdbb081da6bad53efc2d3289135c892a0471617ec145c9a2147ca912edc" } } }, "885b5d789ebf32a2edb92bc498ab9f2e881afed86ef284b4892ee15109bb1321": { "b7053e1130cf6901ba2d93962cfe71528955b54a3427effb3f8dd0cb63a10854": { "jp": { - "updatedAt": "2025-12-02T22:57:28.996Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "15ffa66b2ea41029d262aae8f3241ecbc70b838af51f0bfc6980e18f723df370" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.997Z" + "updatedAt": "2025-12-04T20:16:58.081Z", + "postProcessHash": "bb295f2965f3d1bbd769cfc8c63bd1b07159fc6249f9086d99ddaef4b7a27dbe" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.996Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "16db44ac9565c7051e60ccbc99ff25a35000445d34eca425f439abbf1f813855" } } }, "8c4025d67d4f83f1787b2935a24ca235fcca456bc7505ac9ac478e5351ad8297": { "3cdb2c61028a51f468d7e958cbdb00bd91b81a31123aacd0a6e4c0f676f159fc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.998Z" + "updatedAt": "2025-12-04T20:16:58.081Z", + "postProcessHash": "cd47e67e319a18008b96a97a358120935647dc6fb1d7cc30b8052210a70caba8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.326Z" + "updatedAt": "2025-12-04T20:16:58.058Z", + "postProcessHash": "839e89f91a549fe12739ee55617279b207012391fbf6400959e8556094d2bbc1" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.002Z" + "updatedAt": "2025-12-04T20:16:58.084Z", + "postProcessHash": "0cc9fc706459c43dd39fd3c837ffb186422805761b5cf1a823c620d46906255a" } } }, "9f2ad018997a5b2a59f6bb176b61937bfa9cd7e81143b53306fe58e2c41400f8": { "79e16644830172d488a3acf805a5b9fe0f8b79fdbba1afe39d5495d561479ee9": { "jp": { - "updatedAt": "2025-12-02T22:57:29.012Z" + "updatedAt": "2025-12-04T20:16:58.095Z", + "postProcessHash": "61c4a533910b44e5211e87f658690e0f9615262c412803ae6e8f11cd35c05dfa" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.005Z" + "updatedAt": "2025-12-04T20:16:58.087Z", + "postProcessHash": "02ed596efd2c934ce511e920505a8c77dab2ecb8ac37bd3a55bfaa54ab683e11" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.991Z" + "updatedAt": "2025-12-04T20:16:58.059Z", + "postProcessHash": "56bbe4dedfee5bf881c36764a73f419181828f296e4e3ab9fff48accaca389c3" } } }, "b2c15bf0de452ad7ec7d6015900f40d41f66f8080e473ae5d92a9e398fdedca0": { "a7e5cb05a26913f4d5d6b8e23e33097010b909a91fbc9015096bd23deb3ef019": { "jp": { - "updatedAt": "2025-12-02T22:57:29.008Z" + "updatedAt": "2025-12-04T20:16:58.089Z", + "postProcessHash": "4d6f830f8cc608345e883dabc3b9ffcbf83783129c9a4186eb528ef00993d543" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.006Z" + "updatedAt": "2025-12-04T20:16:58.088Z", + "postProcessHash": "674b5a7c921a94ca25ace6c796accb670e67b1c58939f1ed1fb63cc549307673" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.004Z" + "updatedAt": "2025-12-04T20:16:58.086Z", + "postProcessHash": "fd9481ebdf85a6ea0ab70acacd1e29c0867476d8acdf2b0d9c6265ffedce962a" } } }, "bda6feaa2f751d257d0e9bb7488f846a2998fca7dedddf3b4830683849ba2b58": { "2afea7889acf8ea5044a0d33842f100ab65c6cb7f1df295cd1f21f7e129776fe": { "jp": { - "updatedAt": "2025-12-02T22:57:29.001Z" + "updatedAt": "2025-12-04T20:16:58.083Z", + "postProcessHash": "88d078238d5b7aff489f9e50085b246d117036ece9d257236803fd105054824d" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.000Z" + "updatedAt": "2025-12-04T20:16:58.083Z", + "postProcessHash": "a1f43f0a18a770089269c648567ab706cf67ad4878f91059f5292901cdca1168" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.992Z" + "updatedAt": "2025-12-04T20:16:58.060Z", + "postProcessHash": "5721a55f87f25b2447cb3c77858d581b8c8c0c129010eb19c34f2ba98ff45ac2" } } }, "d032d67a58a6623fab2b1b66938ad265d806211c7e670b710006fa88c0fa60d9": { "4c0a1b6590854c3a88fa162f08d4611049c85780870affbf3d49f61a3e412fae": { "jp": { - "updatedAt": "2025-12-02T22:57:28.995Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "7a85009e38145aa06fe6905c9feea101a61b6677eb16a6a975c85e3fb0ae9953" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.995Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "db5e64d44717d31dce9e9d326febfe86efce17d964abd29e9fa99015eb1dc537" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.994Z" + "updatedAt": "2025-12-04T20:16:58.079Z", + "postProcessHash": "be3712768ae0265d3da8e2e895ad437a621e22da81912ec673c7c592e53616eb" } } }, "db88afafa5d929b34cdf925862a67b324309a0e6c76686a5ccfde2ba82d5226c": { "e4a92a198d90a6cd53c04928fa4fb9c381359603f0c986e9d59a15fa39407592": { "jp": { - "updatedAt": "2025-12-02T22:57:29.010Z" + "updatedAt": "2025-12-04T20:16:58.091Z", + "postProcessHash": "b0344a5a18422cb6fcccb70c17fb41d166ff7f4557bcf4656b72eff0204afed6" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.008Z" + "updatedAt": "2025-12-04T20:16:58.089Z", + "postProcessHash": "973fd83765aa78bddff81e0288de805b8bfb04eb00118968618b7d81d7f23fed" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.007Z" + "updatedAt": "2025-12-04T20:16:58.088Z", + "postProcessHash": "753986fe52248966cdf1c9db66a1aa8b8aff34fb98208f8e105bc7a497790fa1" } } }, "e18abf4c56dbe146fa998f9070622acba484b5011490469cab0c1e16bc156647": { "58bb991322769280e6d10291b76e02c6a2e7231dc177a9f01f4c729dbe75cc7e": { "jp": { - "updatedAt": "2025-12-02T22:57:29.013Z" + "updatedAt": "2025-12-04T20:16:58.095Z", + "postProcessHash": "4113e64674b7af02134d6a45910ba1db2b383153d16659a0875f9ed7e27f41ab" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.011Z" + "updatedAt": "2025-12-04T20:16:58.093Z", + "postProcessHash": "40a02872f90edb1a424f9ab315c099cb34a123d7c66762a83f831cf0c841db7c" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.004Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "723a339f6530f337c839d0f94db645a210c85b45bad3a43f5235ca55d82a7334" } } }, "e5455b8e71ca0240dbae9ace48f312b2859517718c9b5597790152f5c5e4c55e": { "70f5e4c518ecfa04a597a86630bfa6b7c13859702dbefa84f43a08c628bb9c6e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.996Z" + "updatedAt": "2025-12-04T20:16:58.080Z", + "postProcessHash": "f3761c5fc409fe25da8fa803f4c374edd5c1f2ec5a6d5eb46cd1466cfec110e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.327Z" + "updatedAt": "2025-12-04T20:16:58.058Z", + "postProcessHash": "f1f10d31a8ec8748ab61153c74d5e543bf46635c94fa567bae435be5adc46c7d" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.003Z" + "updatedAt": "2025-12-04T20:16:58.085Z", + "postProcessHash": "945315cb0689c7d02b52a51f53e9cefec557e3700f53ec6db86c6bc11b13c9ec" } } }, "f0b04860378a97e43a484e7cfff527be98a82a04b75ec9ff8b95b88bfe017c21": { "4d6e6128d8cb69272312bc10969428b2d7ec14e93843e97641bd6ee1b539f104": { "jp": { - "updatedAt": "2025-12-02T22:57:29.012Z" + "updatedAt": "2025-12-04T20:16:58.095Z", + "postProcessHash": "fe705a27999e5e8f95e313afd2d0a1b80a330f947f56e123b25c14031de6f53a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.303Z" + "updatedAt": "2025-12-04T20:16:58.059Z", + "postProcessHash": "39d044971443b21d59439600d34d409d96ce8af02c6f8e813cfab5a40a7ff8d9" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.985Z" + "updatedAt": "2025-12-04T20:16:58.092Z", + "postProcessHash": "4d83f0604e5d0e47b7ddc39febbacd9a98013464f2d6f2104254ac7ee57cfce5" } } }, "0f826dda16a017686da9cd258a7b36a8a0fa9bf0906faf288ac5dc07e8293c8b": { "7e91c488285e13a646cf4e0be8efc95cc3461d1b565495878e5ce6df5241454f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.343Z" + "updatedAt": "2025-12-04T20:16:58.107Z", + "postProcessHash": "66f9b3e1e73fe2458654a49e1795278b6fec5fc475fa9359a7570d3748b50abc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.342Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "1bea8ffb75c8298563ba4e2930de282570283d6355dca2855985910299350044" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.346Z" + "updatedAt": "2025-12-04T20:16:58.109Z", + "postProcessHash": "dd5a86d85979d4e5bf40a7977b27a65dcf8f13b07310bb703646c425646e2d13" } } }, @@ -8819,31 +10772,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.348Z" } + }, + "aa22e9603b0ee7d5f78dd9d88e3596885f852cd20bbfe4597ac83fa1c1be2b0c": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.116Z", + "postProcessHash": "d624c2e6a9965ea0ac98f15b951ddceff00f7326fee3b3f9f48967afa801a456" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.116Z", + "postProcessHash": "de018d2d6b4a80634792a7eaae5b3dc6c25960d89acfe55e4b460110e4a5910b" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.116Z", + "postProcessHash": "1ff5c46e9f5f6ff7ca79742bc18eb36867e8f10609d3ff8f7f8ca38102fa08d9" + } } }, "20547e4692854c30843291c8c4b85cbaaa2473154a503ada089b47a286e119c6": { "add80eef63fea1cd539d2ca896319743cd0debee7952a9062ff15a5bac9cc978": { "jp": { - "updatedAt": "2025-12-02T22:57:45.356Z" + "updatedAt": "2025-12-04T20:16:58.113Z", + "postProcessHash": "67eb2b054df268b108c3a0faf3270ac109dc3489f3bcd78344641306c8e53936" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.355Z" + "updatedAt": "2025-12-04T20:16:58.113Z", + "postProcessHash": "20fa9b3888430ed9fc54b78cc0d6b0bfc4e4660292e07c64b1b891e14660d554" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.335Z" + "updatedAt": "2025-12-04T20:16:58.078Z", + "postProcessHash": "95b3745ce1ffc3445d96eed03f87d92bae03afce90e2fc5afa594ec5b8add65c" } } }, "3f80767faa69da876f277f16dd9152d0f1e8aba3db884130fa4c7ea029eb17e1": { "c8ca096e88fcce6dd3218a70cf039d6d7d8ebfe91be1b6c3b85f141fdc1feac1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.341Z" + "updatedAt": "2025-12-04T20:16:58.105Z", + "postProcessHash": "3576f73d5ab569b5dba6610c41d0eddb549fece6a945768138d9994ef1bc74f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.345Z" + "updatedAt": "2025-12-04T20:16:58.108Z", + "postProcessHash": "ead4a6005aeaaea290b4a0f2e5ad1e3d7314773b444c2294f59cce3485eff989" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.343Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "5b3e7681ce098fd818559a203225d53b9df6c60c08eee658a98860182900798c" } } }, @@ -8858,31 +10831,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:29.003Z" } + }, + "a3e43704023aad4b3b8addc9c912c674a1ac0bc95c91480f4d77b6627040edd6": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.096Z", + "postProcessHash": "c614501e69bce800b2d8cdbb7fc155cfd9327699aa5c896cc0a95515be54fcfb" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.097Z", + "postProcessHash": "3db126a7376e537ee4e26fbc19fc5b27db24fe400f30e1ccf8912a0ebfe8bb99" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.097Z", + "postProcessHash": "b3811ac6df428419c2e46b8c1c3b410d59c8bc449319718af138c4d541de54e9" + } } }, "4f944066028f36b0a6f28232fe75a6ebde995b969ebfd8a3c379cd645f0ff366": { "8ded3d0fa9f33ae122022672fd02b631471b5177e76c368607b554bbb3efce22": { "jp": { - "updatedAt": "2025-12-02T22:57:45.352Z" + "updatedAt": "2025-12-04T20:16:58.112Z", + "postProcessHash": "1fb05ebe827d9b4e89e118f388720ae6200e71c8e4006802948ea9efd7afc06a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.338Z" + "updatedAt": "2025-12-04T20:16:58.115Z", + "postProcessHash": "b4af8004a27cf420e961ee606f31c9d122395cdbd1410f7bab003e83757bd2f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.355Z" + "updatedAt": "2025-12-04T20:16:58.113Z", + "postProcessHash": "8acfb0da232bfeee15ac3bbafe4b8e27e14466496bb1e96d66e424d3e1702f9d" } } }, "74dcbdc993f03875931c0ef548e27e0ecdd4c39c4c084edc6eaf3237a562817e": { "a9ecf8d346bd106208732038ad37c4f2b9861186a25aead51cc7057a47bf2cd5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.346Z" + "updatedAt": "2025-12-04T20:16:58.109Z", + "postProcessHash": "918ef54ebe85ad246c386f779552f8e5a38cdea34faee26ddd62088c8cf0bcf8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.345Z" + "updatedAt": "2025-12-04T20:16:58.109Z", + "postProcessHash": "fcd6f74a5f74437c31629f46622dbcc5393538c89688fecb02045e96c72aa46d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.352Z" + "updatedAt": "2025-12-04T20:16:58.112Z", + "postProcessHash": "91f8ff05d0bb794e576aaf9988e9e6f241c790bbba782ff1593e5e0685bd3e64" } } }, @@ -8897,6 +10890,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.354Z" } + }, + "81865e2b87cbf670a23ecb1eef682c7e5fe88b937dbc2bc5d152964b74cbaee3": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.116Z", + "postProcessHash": "a9a61c6157f81a96088d7aa0c1761e42324aa33ac6266f7a6613347dba121332" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.117Z", + "postProcessHash": "bf6c6a821425f0fa33aca59c6ee1f9069d3a6b32ba007c2da5070c9614ff1be7" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.117Z", + "postProcessHash": "b24d21d28a41aa1cecdacac7eda8c2149c8b34a74af5ddc488e019b5b66f8792" + } } }, "7ecc3a4ce272f64e4527753ebb952d46d33a160afd6add3fc9a4a9b08d7fae1c": { @@ -8913,52 +10920,64 @@ }, "017bfeb518def6458017f0b534b81d2e14329dc98b0278460a1c0b8fb485b0fd": { "jp": { - "updatedAt": "2025-12-02T22:57:45.358Z" + "updatedAt": "2025-12-04T20:16:58.114Z", + "postProcessHash": "94f161be146a29e86ad008b4d31bf19826124ae2ecbaada62eb022da7c033b03" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.358Z" + "updatedAt": "2025-12-04T20:16:58.115Z", + "postProcessHash": "56780738b94e347b5bf42537057470b4c2ca3aea8cc079fd8c6064f6377d4c1f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.359Z" + "updatedAt": "2025-12-04T20:16:58.115Z", + "postProcessHash": "005506485c4ac08284fcb6777390ae06795f64c4ef4398732ea9a6971cb2e2e2" } } }, "7ef33beb95b850b8400aad8ded966f28fd1eb3b61c5de7974983f2270d2b4f7c": { "501d9df3106342436670302f74dd2270b110ee24da435123cc0a1b51633a2284": { "zh": { - "updatedAt": "2025-12-02T22:57:45.375Z" + "updatedAt": "2025-12-04T20:16:58.101Z", + "postProcessHash": "29abdc29c9f6fa9dbd1257021470308138f5297cc8f0e142a98a6f70276b1e5a" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.382Z" + "updatedAt": "2025-12-04T20:16:58.105Z", + "postProcessHash": "a14ab5675918b357ee97f986941a1a82b9e69a8b3747df7b2a7aace0bee94c99" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.377Z" + "updatedAt": "2025-12-04T20:16:58.102Z", + "postProcessHash": "51585014626eb743a0e49c6d092eb76fc5a76ff10f0d648006aab95988aceaaa" } } }, "81154bce9be97a0fc523001b189f4c093458747ff4e9b7f5cdecde64d9163d22": { "126e1bba0f10751cf028401cc1a0f3a944780e4a87fe9b63fb850c58b7d7510d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.352Z" + "updatedAt": "2025-12-04T20:16:58.125Z", + "postProcessHash": "acc98d237660d385dc0496ddd1cbfda95b928c680fcf771d8c6295c1c628f7d4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.351Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "81f8104cac3ac6016d30a4a6ffcefdd4c7ca222f5c3ac0ca908839a5ad16608d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.992Z" + "updatedAt": "2025-12-04T20:16:58.077Z", + "postProcessHash": "ff66624c1fe09811ebdc485a5d415ff51046670a00b61365d89c395f552250ca" } } }, "88d029b112f5fca5e4ba3d06b8c35a6d55e5b557663ed600c6f1b98f59f8ae20": { "1393aaf825d4dab45a6acc1ac4db09d138970e7008f8c78dc434242141a483ba": { "jp": { - "updatedAt": "2025-12-02T22:57:45.344Z" + "updatedAt": "2025-12-04T20:16:58.107Z", + "postProcessHash": "56f3a5e3ecd8f623a5c79c29d894e4e85f188ec86613d5e5d18583800f5fd0b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.342Z" + "updatedAt": "2025-12-04T20:16:58.105Z", + "postProcessHash": "adc9f0a054e23894106f1c441732e83341f60c111bea7ad2b41164dd902fbe84" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.344Z" + "updatedAt": "2025-12-04T20:16:58.107Z", + "postProcessHash": "070d25a59aae60bc22d736dd862714043a6a8200791dcedea1516f4b15cfb05e" } } }, @@ -8973,148 +10992,195 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.356Z" } + }, + "a0b0d47ba92766ad59fa384ddacc05a960e2836e2c3cce1d9c34ac868b117166": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.116Z", + "postProcessHash": "b0026fe3dfa10fbfe007635e54812d75288def01718ed77d778464a7995d5069" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.117Z", + "postProcessHash": "caf5358a6b4a8131e6a52b054c4e91590f855bab0686bc545f9a78de3bf8a633" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.117Z", + "postProcessHash": "39fb72133ec16e3bbe6e7fecf8028bd6e2462d9cb175ffe123cdb570e8006e1d" + } } }, "9b041aa508f2046ee0a4f84858531b8c2507bb6a6989db886c2dd4ea0c11a002": { "23dc86ecd0cc50924f5ea02d06b16b4e395c8e0f2fd73bd76d547ac864d42f36": { "jp": { - "updatedAt": "2025-12-02T22:57:45.386Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "b52c724061f8a1742a87bee7d6ef39c6ab20bbc016127c7dd7c88ff16ff98a3e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.393Z" + "updatedAt": "2025-12-04T20:16:58.125Z", + "postProcessHash": "39ba9ce49d6f9c60016493652cb79bec9940b4539c99d6802caaa8f6aabc93f4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.341Z" + "updatedAt": "2025-12-04T20:16:58.105Z", + "postProcessHash": "5ec8683eb4da60cc741f969daf4522fc8d27678564eec7c6d930701ae409a87f" } } }, "9fdb709a96f96fb011d844ca13cda88bb361212284a327821501551223a4aa9c": { "064e508fcc9e28910cd94c862392084ac9bfbb28d99941ea8a6c7bf60aa11b79": { "jp": { - "updatedAt": "2025-12-02T22:57:45.336Z" + "updatedAt": "2025-12-04T20:16:58.078Z", + "postProcessHash": "cfbc8f754e3b40f1706dd59b915ed1a98604baf2111caf7267e52f3c311ed69b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.348Z" + "updatedAt": "2025-12-04T20:16:58.110Z", + "postProcessHash": "46671661c130fc3cc842366c6f328b3c895b014f3c869bfe5f8d9f8fcd85751b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.347Z" + "updatedAt": "2025-12-04T20:16:58.110Z", + "postProcessHash": "7eb31a8a8855904c945f4baa617af1a8391ed4b24bbcef87f3f1f1f5f0a3294b" } } }, "a08c904ab61d1c76fa622a160e0956711547c3a01e8aa50f73e5c58504c9110b": { "65bcd1c2b5d5887e042c81d6e5c21fc2a0db88c65574a53d172b1f40ae25058e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.343Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "da1a481a59fb5d602cefe6b3743327edd06ccf0ca63f680e8586ece2ccccc4e6" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.342Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "0d525bff644aabcd7dbd6c5a3be09c748357ce9314be13a93e4234ca92da324f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.344Z" + "updatedAt": "2025-12-04T20:16:58.107Z", + "postProcessHash": "61ac1caf19a3f7b833ea229508daa0cacc3439e1892c68d10d3c8e87019ed08b" } } }, "acceced538fb290e4499cdbefd4179c4f4d347c0ccfd60840e8eedd522602b6b": { "124022bd2cc51265ce8f1c49ed73363724b1580a4bbe5d35e3c5d6d9b2bb7c01": { "jp": { - "updatedAt": "2025-12-02T22:57:45.356Z" + "updatedAt": "2025-12-04T20:16:58.114Z", + "postProcessHash": "8d450135ce2e4678b7f5ef26a20285f803a83e1569c34e3c1d92408103cbb5b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.357Z" + "updatedAt": "2025-12-04T20:16:58.114Z", + "postProcessHash": "3c6cd02b15932c7a65f68de896c38105e21d2af94340c564b04440a40e48f142" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.353Z" + "updatedAt": "2025-12-04T20:16:58.112Z", + "postProcessHash": "b628a8573fd7937b6d1eef68cad4a5248fa234470b6e7d94abcf2d351b9f4e02" } } }, "aeecbc80fabcf65b76af1cc65dd769022e4856381588c8501d1a59b206c10326": { "0ce14d2631d2a24f63a66e4f8b06f82fee405f818a0bcf369ea6485c8ba72681": { "jp": { - "updatedAt": "2025-12-02T22:57:28.991Z" + "updatedAt": "2025-12-04T20:16:58.077Z", + "postProcessHash": "4ff5d6bc66d290848ccb9621681a791fe2629e7b762987cc8a6f2270f8212540" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.354Z" + "updatedAt": "2025-12-04T20:16:58.113Z", + "postProcessHash": "4b6efd4cbee1c64507ad176eefffaa950fdb625f3788e4ca3b4a30848089ec91" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.337Z" + "updatedAt": "2025-12-04T20:16:58.078Z", + "postProcessHash": "1c6eb5df23694d44726af0c873107f964b5332282056c534f6dac64c5623bd5d" } } }, "b5543674ee59dc5d80ec783390644aa03c6a1b7c91bbff001eda92fd5198a064": { "dce1dfac5e498639b6f080315eaf0ea6f42c51bef46d3fb13e621234a36cb996": { "jp": { - "updatedAt": "2025-12-02T22:57:45.351Z" + "updatedAt": "2025-12-04T20:16:58.111Z", + "postProcessHash": "e47d5939caf357a9faa594309819704dad52d6db8fcee830d99a0e1e91c88f10" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.349Z" + "updatedAt": "2025-12-04T20:16:58.111Z", + "postProcessHash": "e0402180a5d5666a052e5eeedcb798851abd1708db41e9b9dced14152fa3cebb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.349Z" + "updatedAt": "2025-12-04T20:16:58.111Z", + "postProcessHash": "6004bd688be54222f0337a7bb5c00853e0b53b9f20c74bde32a4e091f9a0efb9" } } }, "e6ce65cbfbbe441fc30cf64ab1c1d1dbe1699d91ca18dfbe615e4a83da7007bb": { "366a43842989bf6846e76c26bbf2e87e00bcb25564dfb7941a416bb6c279a332": { "jp": { - "updatedAt": "2025-12-02T22:57:45.350Z" + "updatedAt": "2025-12-04T20:16:58.111Z", + "postProcessHash": "8a603ea14b88d4af53886e6a795c4b40b5b50b4baae36c508c06a75422bb5542" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.347Z" + "updatedAt": "2025-12-04T20:16:58.110Z", + "postProcessHash": "00d3b20339fecd220ed9d52b3cd48c6ce1e125af0f9ae532507a644c1aefcb37" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.347Z" + "updatedAt": "2025-12-04T20:16:58.110Z", + "postProcessHash": "ebc28202bf7e41e9000dc3d9e886f312a69383f0e8aa920135cbd744cde7d547" } } }, "e8bf7b4871a3b921003161fbe9fb3b3e0df205638abb6aa707688886621c9715": { "15aca606b9aecbf11a3de4acfdee9f33ff548522f3411df807128a214f52bae1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.993Z" + "updatedAt": "2025-12-04T20:16:58.115Z", + "postProcessHash": "47f8c30a63fbe6c9c67fcd5bb4e599aa7e88defdd75e45c1189eb03e06606d21" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.006Z" + "updatedAt": "2025-12-04T20:16:58.112Z", + "postProcessHash": "117a7b1fea2d46072d7d61c2ad54f755fabb9ae9e2676aea9a52084b9c885eae" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.007Z" + "updatedAt": "2025-12-04T20:16:58.112Z", + "postProcessHash": "0aef755562b2f21b003ae571d081e65a32baa856f4b53420d94c77e28c04479c" } } }, "e90559dea9545d48d4ab67dc41647c74245d5af3f7450472a6a52017b58aaa6e": { "a15b882337784575046360aea947e55fbbbf97d76e32f32fb9e275a833afb47f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.347Z" + "updatedAt": "2025-12-04T20:16:58.110Z", + "postProcessHash": "46465f4f40422902b63789a51fbf8986fbf9b0be36213e00f719b970173a991d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.351Z" + "updatedAt": "2025-12-04T20:16:58.111Z", + "postProcessHash": "e7699fcc6ef51e4b022e99b7830ffa5cc94689cf024b83d75d9dc5920c1cae55" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.345Z" + "updatedAt": "2025-12-04T20:16:58.108Z", + "postProcessHash": "9322f0d5f0385c08e5fce2a476c12adaebf38e3e59928ad71bd07e7d55518637" } } }, "0b209462f1ec411886fda57e810cd3eea5efebe202ca2b4f5dc9f1fb3787ccfb": { "5ecfaa73c3cc92aee3ee2825b0bb89bc857721cc0ed52b56af3b10a539b65498": { "jp": { - "updatedAt": "2025-12-02T22:57:45.377Z" + "updatedAt": "2025-12-04T20:16:58.103Z", + "postProcessHash": "c5f62566932495a741ae6b618207cbad4c992b53a4c964e91308bf07a2c31ebe" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.379Z" + "updatedAt": "2025-12-04T20:16:58.127Z", + "postProcessHash": "a7261962fc0368e1546aca4e9ea3797a2552fa4a5d24843b7683a0c397e91e9d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.379Z" + "updatedAt": "2025-12-04T20:16:58.103Z", + "postProcessHash": "78ea3d6b6c126cbe3e05168f370cd3d6565cce15ed85e087a8687ba93b37d63a" } } }, "1d14e004d487902f18fc6c1de04f1ef911152e4d8c2d76455e4956d9cccd132b": { "435800632f77c2f3a43f62396007c869bf0e3310b946c504cec9c7661f101c78": { "jp": { - "updatedAt": "2025-12-02T22:57:45.394Z" + "updatedAt": "2025-12-04T20:16:58.125Z", + "postProcessHash": "12e545b47285102bde384a0918c92a8d42bf79f1d1afcd9d441e2ad287c22f22" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.334Z" + "updatedAt": "2025-12-04T20:16:58.097Z", + "postProcessHash": "b64f9cd2de62bdd21ec0bfa9759f2b2d1898288061cc8368efd3ab23489011f1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.334Z" + "updatedAt": "2025-12-04T20:16:58.097Z", + "postProcessHash": "fb37e6d45221abba2b724c388e9bee86cd7c1c418f0b60ba606e0044f995db98" } } }, @@ -9129,70 +11195,99 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.371Z" } + }, + "12f741b625eeb871e319dacb79266cdfdb0c145a2d7b6a3ad9c2bfaab4835c14": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.130Z", + "postProcessHash": "18f832c11e0803ee4f57075a40c739e7e670ed48558e5a098467862fdbe3eccf" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.133Z", + "postProcessHash": "a62640c407fc16f3276415425e16d18caaea9d02cdfe5c3a3dc78ada75c3e5a1" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.134Z", + "postProcessHash": "cda3381c6466e150661ac237bdc1d75d4124084669153b8dcc9c419f8118748b" + } } }, "3429435d33feb194cd2815db45da2e05b63eefb419c7039d15215e40384643ba": { "918e8abb0c6066b88bb4b0bdf464c3907f836aae0d601ee87bc91ad879720c8f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.375Z" + "updatedAt": "2025-12-04T20:16:58.128Z", + "postProcessHash": "e3c46ab4ce64ac41b792c409a3282d6e83c5def64ca807f625d2619f4649334a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.374Z" + "updatedAt": "2025-12-04T20:16:58.101Z", + "postProcessHash": "3de20cbbf92383240692cafe77aa9512a9dd9ce2e608ceb4bfde23d69f5a6d32" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.372Z" + "updatedAt": "2025-12-04T20:16:58.128Z", + "postProcessHash": "632127b3e258e806c218ab606238ec171ac5f6906be0d719c5b93bc63c378e68" } } }, "3fdff0c8c92ebbc95447e7244075da88510e0c3d4966e3b72af95a6e4c3d8e8f": { "1e45c8cfbc59d4c2fd364a34eb2e7afffd36ea4f0b127f873065e2b176a0133c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.392Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "9d4a691bb1ed89ea3226b0bb90a4875cf5db3dc0e6709ee46a3db3da5233cea9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.388Z" + "updatedAt": "2025-12-04T20:16:58.109Z", + "postProcessHash": "634269eae10945b9df42027becbd520ec5a7609d0dff547f7e3348f0a16a5888" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.392Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "e98a23a17f011e9f5851be0d6ab95bfeca6efbf84870077ca0554e80a9dbfa39" } } }, "4ff60f576a90647ac6859ba05f56e594f54029ca4beea54b1e07f27ee5acfc94": { "b991af90c327a458792ab1640e608a8704cbde6a6f1373636c3d4a5c3445b766": { "jp": { - "updatedAt": "2025-12-02T22:57:45.381Z" + "updatedAt": "2025-12-04T20:16:58.127Z", + "postProcessHash": "803e8681ad39691ad155297ca76a467ababac229fe5bd7da0d68ffec5775ea57" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.376Z" + "updatedAt": "2025-12-04T20:16:58.128Z", + "postProcessHash": "442ba75a91c9be455f774a80909e57643cd0cf044d6603a5f7687b36fbaae563" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.378Z" + "updatedAt": "2025-12-04T20:16:58.103Z", + "postProcessHash": "348f5b71d063a06e70bbad899d4b9b48ddf0c0cee59e9de6489ce4815986f697" } } }, "5063b2b4bc9b2899fab5998a2b281df0229add76ce268451423a1dfd2ffa5f2c": { "d2af9085fbf80701266de277a6a67f2400d823b5ac0d2ee3f5ffb2eb0b4f0294": { "jp": { - "updatedAt": "2025-12-02T22:57:45.379Z" + "updatedAt": "2025-12-04T20:16:58.126Z", + "postProcessHash": "13c1a12c727097ab797e1bcda7cd05d3df27bfef92501a2be245d3aeefb3aabf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.377Z" + "updatedAt": "2025-12-04T20:16:58.103Z", + "postProcessHash": "545c335253c0cff1525ff24f542b93c1609b9d65561a13519b6d1645eb7e9184" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.380Z" + "updatedAt": "2025-12-04T20:16:58.104Z", + "postProcessHash": "83415bb6011ee5ab96ad8d823123828d80ca71af42a383a7adf19013cbb61980" } } }, "53e5bb2209c16605d7273edd1079563619f7fd4e6e5bdfdb95988af1a4694755": { "19b750db7b91f72b4f9666d5cd502557bfaf69581d6fb96105e239e437635657": { "jp": { - "updatedAt": "2025-12-02T22:57:45.385Z" + "updatedAt": "2025-12-04T20:16:58.106Z", + "postProcessHash": "455d489272641fe322d064fe2e5dd7d2ef08ba62f17c3ec01442dad507d57288" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.389Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "b5337b0128bb33419afda954188fc2854728f124f106933eb00511703b9d60ea" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.387Z" + "updatedAt": "2025-12-04T20:16:58.108Z", + "postProcessHash": "e131311ebe85d80b085088d2c0d50e302f6f77f14bd3fb0df4d84018fb8b889c" } } }, @@ -9212,52 +11307,64 @@ "611b2b0d02709490f3fe4e3331bd31828428e661cdc38d633fd487daabd3cc1c": { "cb8d66eeaa9869bc4e4a0832238beb5e4b8cc2ffa0e3483678d79606c472c326": { "jp": { - "updatedAt": "2025-12-02T22:57:45.372Z" + "updatedAt": "2025-12-04T20:16:58.126Z", + "postProcessHash": "06cedf0444f47bdbb3872bb8da86938439b257b9f69e9f6a877560a7c0286ee4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.373Z" + "updatedAt": "2025-12-04T20:16:58.126Z", + "postProcessHash": "4478bfce8b787cd05a4bfd7b0f48a33c46835d05e889f2f83fbab2fb40ed3a9b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.373Z" + "updatedAt": "2025-12-04T20:16:58.101Z", + "postProcessHash": "f5c5fb798666d739551af0f0b911d8d65a9fabdb866299aabdb4cabe3309f998" } } }, "633a4ffa471ca2244e6ef5a3022d6a46f51861f23239b9b4594d8cac210cc0b0": { "011445c96b51faadcc04ca2af74b4a9de574446918a704bcb7648036f25d38a7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.388Z" + "updatedAt": "2025-12-04T20:16:58.109Z", + "postProcessHash": "2bb78765b095c76c532c526334e64871eba70e46b95b6ee70feb979b67b4b1f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.387Z" + "updatedAt": "2025-12-04T20:16:58.108Z", + "postProcessHash": "661b2f38cb4beff8279dcaee312e3461f6d61a3a3a1edcc1df09236a15c24adb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.389Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "d7ae5f8356820c6d5250fc660bae5a50c7a59b423630b08542d122ce6d168f33" } } }, "675843b51c582122de910ed4f222f211176c97af172b7f849c0b8ecd0dd2b190": { "a27dbf65b4c9c2e9891bbf450b7163614f6940254a6ad1c1db78fd18c3795fe7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.375Z" + "updatedAt": "2025-12-04T20:16:58.101Z", + "postProcessHash": "0666c5b4879afec446929d7a66effe66f8c0a8faed399242f7c6b8fa1c00b489" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.369Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "fd9b827452d7e74ef314d529718102605f9039eedf4a2efc18c7fa3c07c4402a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.373Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "48365e9ea4a29a749a38d9c2a82328eb0479bc1ecfda54456b028aa544b5212f" } } }, "798d0e3eca2e56d6aa7658d85b9a41657e3aacf854913976ea97d89d8865966a": { "767118d90c94b77855b18cc08229cfbb4dd47ceb560ee656c0882c9192c24418": { "jp": { - "updatedAt": "2025-12-02T22:57:45.376Z" + "updatedAt": "2025-12-04T20:16:58.102Z", + "postProcessHash": "f3f86613e11e9fa1fed2a7bae5e3939d010b2ad7a13f02920546f01a42ba6e1f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.380Z" + "updatedAt": "2025-12-04T20:16:58.104Z", + "postProcessHash": "baff7bd8a8153e567bbcb03654b245ca208fcddc7d83467d088a031eee149f5f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.380Z" + "updatedAt": "2025-12-04T20:16:58.104Z", + "postProcessHash": "78a8577db95646e3b9770b85e2219fe739fc18bfb2d047669e9a8d7d6056013a" } } }, @@ -9272,70 +11379,99 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.338Z" } + }, + "41c49c63971a2f9e6bd3fd7c7d57ce1afb19c85181799f072d28c0b669914f00": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.132Z", + "postProcessHash": "4d643781883c1ce4d469d1d7e31e5a9ed597752a41bd2e8f0d1131a027e314c9" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.133Z", + "postProcessHash": "c865bafa751f6885f58390589e807ccaae8c8b57b1157f585df83b196d59cb1e" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.134Z", + "postProcessHash": "767ca0c85513b39e58b5657a7538b251313a89d353c7dd42b79e3e078c39bf1f" + } } }, "991e27fab22b52bb4b08b4ae04fdec89d5e6553dc7110f7d24b73408fff315c1": { "a03618c42cb58f95e7e03a4057880d077e66e088f5502749a604eaca3e70f464": { "jp": { - "updatedAt": "2025-12-02T22:57:45.381Z" + "updatedAt": "2025-12-04T20:16:58.104Z", + "postProcessHash": "8261ae74672376cb23f75e6ca6627a703641cfbeae2cfdcff99296a65ef4eea8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.378Z" + "updatedAt": "2025-12-04T20:16:58.103Z", + "postProcessHash": "55b16ca599942863b2719eeaa820a0dced581dc805f582d05b28eb08ff1d7a4f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.382Z" + "updatedAt": "2025-12-04T20:16:58.104Z", + "postProcessHash": "b7c93bd5850431bd62f09c9778909a4fcf84b55fafc13a3e96c2eade861bfc72" } } }, "a6b9d4c5cae0464959192ad659ed2100cebdeb8bc49e4c041d80a9c6a804808b": { "e888d9f5660cbc8a94390f0efc75e38b61355c7aed5b560ba7c55138aa191993": { "jp": { - "updatedAt": "2025-12-02T22:57:45.339Z" + "updatedAt": "2025-12-04T20:16:58.098Z", + "postProcessHash": "06e8c3469e2445580b09a3b28ebc1ac039223ac81dc207438c12292db2c93adc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.338Z" + "updatedAt": "2025-12-04T20:16:58.098Z", + "postProcessHash": "5e6a5cc39eafbf39d999441e9e7e081c57c8fe1d64b3c30024bb5f5cd1cc8fec" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.339Z" + "updatedAt": "2025-12-04T20:16:58.098Z", + "postProcessHash": "249f577cddf532bba4917821d15b2dd459aa9cec2babb00e50b420b4d7bad721" } } }, "b7a5608a851a55f00f22ae8d517987b946c9c3eb543370562dc786dab3594714": { "88a876337f46351c9ccac93457f33dc4fb23d9aab3760cae91e020811ac6f19e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.387Z" + "updatedAt": "2025-12-04T20:16:58.108Z", + "postProcessHash": "e22c9e0f62c46be1fb0613fa6e2ce4bc3a00c029063cac380a0718439a8b8892" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.386Z" + "updatedAt": "2025-12-04T20:16:58.107Z", + "postProcessHash": "70216443ce86a0078a7e43cf6fe47fbbef7c520fd8a646e18fe35617c2259e34" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.394Z" + "updatedAt": "2025-12-04T20:16:58.125Z", + "postProcessHash": "33596428ebd76998ffe1a57fee4274e249443a6788ddec273f62c9250d343dc5" } } }, "d9b29cc47744c8dbd75014c191b2d1b6a6cbd834e8f58800d7ab54ee3b380193": { "790a9a7598eac524933e6837e62602bb54c548d8cae162ec8f67203a8285580a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.374Z" + "updatedAt": "2025-12-04T20:16:58.101Z", + "postProcessHash": "b171ee89fd547cb0a22e443cdb48856851d3a06c5fc1949fd7414e1ae632d692" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.371Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "393ce02045cd6f41ad9321327a0ba145a9eb24cdbb3ebac0202b3c1c488843f3" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.370Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "0b55fe9d381743ce9dac352369c0d33dc69779904f1e16fd9d652bbc06d17c90" } } }, "ec3ea94f6a821f3d66e7dc9993bc4fc2b65580f3ce729e89dc7d1d6e9711078e": { "078157aa36205afa5c6e11fa8f7457d8696fb79062fc79c709121c33ed2a7d52": { "jp": { - "updatedAt": "2025-12-02T22:57:45.389Z" + "updatedAt": "2025-12-04T20:16:58.124Z", + "postProcessHash": "e2f6c84e39d78121bee0c5c86633ecd81a0173071bd394b0377f909765344a17" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.393Z" + "updatedAt": "2025-12-04T20:16:58.125Z", + "postProcessHash": "c56896757e52e329728d36c52b5bdc59ef02091ee1b1e4317f2fcef2b9767f13" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.388Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "905153647f3b37b66c0f71d880391a6b13f6e9e1069840b9d9b902211d13a6dd" } } }, @@ -9353,13 +11489,16 @@ }, "97e44c5927e52bd9073aef11610c839f64450b48880fbf9a450f43177e163506": { "ru": { - "updatedAt": "2025-12-02T22:57:45.371Z" + "updatedAt": "2025-12-04T20:16:58.128Z", + "postProcessHash": "ab563c45f22fe4912f1c0ca7f8d771a0762978cdac649f0ac88919372fcc2765" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.369Z" + "updatedAt": "2025-12-04T20:16:58.127Z", + "postProcessHash": "0020aa28f9ed0ed3692f02a5dc8f872042e1432425cc884e8004d6429bf6e123" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.362Z" + "updatedAt": "2025-12-04T20:16:58.128Z", + "postProcessHash": "899c9f049b976aa1e06c7d40193296888b011bfd4d70de17d3efd74798f275c6" } } }, @@ -9374,18 +11513,35 @@ "jp": { "updatedAt": "2025-12-02T22:57:45.418Z" } + }, + "bcde795f789e675b001ddba783f5f330dea1d9b0156a229711c8ccc9037887c5": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.131Z", + "postProcessHash": "cc27afb2bed48115d9080e952001bec145097eff07692868599324aff79b8ed7" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.134Z", + "postProcessHash": "0b41f8c11253d2339680f0dfb51efd8660f0b93e75ba715709e2a55f98e52681" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.135Z", + "postProcessHash": "048a3385d8a57977d223ffec2c8506518cf97142e9ae1d8c041d2748d8371414" + } } }, "0788f71f3701d95084837950d519aaf717087552402cd82dfcf4236628f15af7": { "1840d9cc80dd9c8c8cc0209074557de0b8c1bf9c2ca33bff6ab6effea03e9a16": { "jp": { - "updatedAt": "2025-12-02T22:57:45.397Z" + "updatedAt": "2025-12-04T20:16:58.099Z", + "postProcessHash": "9f8e5414e2db4974242cb61830b80369ceb6db03b4a9c1f1550a8cf6c8a60411" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.362Z" + "updatedAt": "2025-12-04T20:16:58.099Z", + "postProcessHash": "bb4a8b2a60aab8aa9609be5894ee934311807e5370320be0396455802035fe36" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.361Z" + "updatedAt": "2025-12-04T20:16:58.099Z", + "postProcessHash": "b710f5a6b142ce49a40818b641b59d222c3028da1c9627a5c02784331529ef6b" } } }, @@ -9400,31 +11556,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.408Z" } + }, + "48ea57d74cf0991e459a1a6ef7ad1bfb5bfe3f7325c793a0a4cb78626a6a37c3": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.131Z", + "postProcessHash": "ab4e13587497dd76e68361e979244061166f0013c48e00477dcff7ba71647f14" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.131Z", + "postProcessHash": "e88f00b71f8cd5c2e1f37d28e7746f6f7611fac5f5b463ed40810c881977e8a8" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.136Z", + "postProcessHash": "d7ce52873e7f4702f72734e4fb18d53fc2154294b364f31bffdfb2948186400c" + } } }, "178c705b2c62c01727a80327a809f6535d583c870ad39995375a10a363a1d727": { "1589f225c754084b804cb1b7c426921b7979e160c01fe65dd00c2a0a3e3ae3f9": { "jp": { - "updatedAt": "2025-12-02T22:57:45.397Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "f4db6d78a27b8e213d70ce11df2aa7499f3b79c7b7a4b86ce2740d19067538eb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.360Z" + "updatedAt": "2025-12-04T20:16:58.098Z", + "postProcessHash": "7a041037b23f125f83ac4accffd30570640dcc5bdaa8d46ba1154267e16c1f85" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.406Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "f83540f6b2f7e7a9df51656b020786f24b4193d04275ee9fdb9ad9d580d72021" } } }, "28de08e6f00a1c0b51895715997e43dbe463c1c4cff9b002dd9014edc5579dcb": { "46a435b4ba73651faf0dfb756e1f9ac1d59de7e580a40c74411bfb41b7c958d9": { "jp": { - "updatedAt": "2025-12-02T22:57:45.408Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "40408bc7a6a6cfab815971d41826b6903cb21c51fbc4950c62b31c777a90149f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.361Z" + "updatedAt": "2025-12-04T20:16:58.099Z", + "postProcessHash": "b37799d8e57f89d6ee90d1acc0214b04d98187e406ff1595ca11c14ea9287479" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.360Z" + "updatedAt": "2025-12-04T20:16:58.098Z", + "postProcessHash": "6fb2840e6cadf6c6188998005a9e4c898af8f7a6622237c48f5871c52380964a" } } }, @@ -9439,31 +11615,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.402Z" } + }, + "ac01e30e5af0046a52cf7276e30904b5d7a7930becd3ab5c3a9353eca589dddb": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.132Z", + "postProcessHash": "963e063807ff88e8eb1429fc85cc7aee85d05e7fdda3a69bf50e85c928fb76bb" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.135Z", + "postProcessHash": "3f07dc5e5122a5571443fdc5e70b47a11f53e9eb2a50c000b1e5e5f2a980ffd2" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.136Z", + "postProcessHash": "2716ae27985f4783a04bfe50f4a68f2d9002eb2d76a7e5409e766c4e8d7cbb79" + } } }, "349400436c332178133e694bcd47dc9ccdf4d729cfc274f1f99bf82b54fde8d1": { "312b11780c641636863655dd7b1fd8b57e6cba8bebf6946857812ca2c3afe479": { "jp": { - "updatedAt": "2025-12-02T22:57:45.403Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "3b140290d6898eb1e7154e206d6f7cfe03f0e03ecfe0add1a814b42eaad02427" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.403Z" + "updatedAt": "2025-12-04T20:16:58.119Z", + "postProcessHash": "e01dca3ba6112b808bc6c11cb81d07c7ae5d729b69c3ccf123f3cc4a644409c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.396Z" + "updatedAt": "2025-12-04T20:16:58.127Z", + "postProcessHash": "baf4e8deed4b7547d31d0e928dd5db2b78f03040639d8e98bd18cb5afc96a674" } } }, "3798000812ded299b4f0b685fe4df133730a45cf67c87419cd05a769b727f03e": { "1810bd73113c23ab353e5810beb46fbccbee2f443e979883aaf33e93c1afa116": { "jp": { - "updatedAt": "2025-12-02T22:57:45.396Z" + "updatedAt": "2025-12-04T20:16:58.099Z", + "postProcessHash": "4f09b7ae979d76b23ebde92e73a91c1d4874d43b931f21fe20672dbbc9ebfbe2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.399Z" + "updatedAt": "2025-12-04T20:16:58.100Z", + "postProcessHash": "4610e4155c7be83d91d348ad760a5d322adc7c86fd4dd60f748b2b76a8f9d8f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.404Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "4ddfa7dceb72f49edf6ece638b7c5ae84f6ab21dfe241b464876c46b9b6e391e" } } }, @@ -9478,6 +11674,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.410Z" } + }, + "50217ecf6da1c9fa34dbce32b178808ba8ad58e9d6d7984a3cb4706dbb39d58c": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.130Z", + "postProcessHash": "1b04dcc26d760567e16f81ccee97496b0d4ad8bebb8115f19adc2618b76d3b55" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.135Z", + "postProcessHash": "e0f19f1ae350f54da9778ac92aa34f0a2a4c73ec8c98d3ddd54fd4e3d30295c6" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.137Z", + "postProcessHash": "71f42998299b40045eae45a062f5a5ec5df5d1290df561e50ae413ecbb56a3d7" + } } }, "560a771a88f358751a343c140ad56fb357e898496988479f8f38d70c5e6abd73": { @@ -9491,6 +11701,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.398Z" } + }, + "0ef50f0b9a30bdac285fcb77d98560ae1537e2fb0a7be091a31b18662025042c": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.131Z", + "postProcessHash": "904c7dd7de72816bbea9a5e02ddbe3f22a9a01cbfdf3595006be6fe32db2e659" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.134Z", + "postProcessHash": "bdec87856dee99be0d7c772f2515ea849f3fe4e4ae39d32400f4b0e464cf2e5a" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.136Z", + "postProcessHash": "e07f2d67c243e53dcdba25e5523b79b129ce1e0a5621b222c8008c88d2b61725" + } } }, "5c678b3a4246706bfc999afc97cec53d8c982fb87363f87665082241361a5d73": { @@ -9504,31 +11728,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.412Z" } + }, + "a8d446334c20df806169f3e6b58a455522b91e97933d922e12de4ce6b3c63f57": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.130Z", + "postProcessHash": "08eb335e116e9fa325f0176ac6aca99229efe9607102dd2d9cc13d6f200f39b4" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.131Z", + "postProcessHash": "547da346276a79ef32d24761d28fc52fe328372cdeaa9cfa1103c59d680a12c6" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.137Z", + "postProcessHash": "de4e0c669aa2ac406c79cd8cbf1c4d203de3489e4dc7e172c690192393c22cb7" + } } }, "6021378296fe97cd32f847567e8226b5b01ff3e70c1eaaf35a828c9d29135ea8": { "a116f2580c016c233d50250b989b32bbe09ddafa83b8dc9dddec1dfc676909e5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.407Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "82d5f0fd03b979180d5372cd0aa473d0a098cd4676fb4df67bbd51f9e9024f4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.403Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "705fa7d5ae9a07009495bdd6dade2adfbbd6d6b2a19e5c5dfb9ea50c5d8a8083" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.411Z" + "updatedAt": "2025-12-04T20:16:58.129Z", + "postProcessHash": "94bf145436d3dc5fb180c1348c56bf5cdd02f9904a662e0113c8647523767f5f" } } }, "773e022e6828901db117df504dcb5f22c010a9943c580fc510044d9585197e57": { "b629f3340f4e22116ec115e53eedd044eb499d902c10c1c5d836dbbd184e23b7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.409Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "94237bde05be33dda000cee59604d472fab4836462a9b3a6b2114a607bbe97e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.408Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "1ae7cc2d967c3cf3345a60dd8540422b9ef576d7a906da83b30fc8e684db71d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.406Z" + "updatedAt": "2025-12-04T20:16:58.120Z", + "postProcessHash": "c775c8b1d406f4ba664bc1172660a77e2613a33710f867dda6538d40a66bb723" } } }, @@ -9543,6 +11787,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.414Z" } + }, + "3f3a386316e023eeeaa7d4b4aa2d14b972061dcbb89731b34614a1eb9c1462a0": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.136Z", + "postProcessHash": "8cf550e87cefcac7259d28477e1a286590c4037c2952d865ff0c29df18fa851f" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.137Z", + "postProcessHash": "cf837f89c06a10710673d4c96ced28da43222da9fbe399d04eb912bed0f13f78" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.137Z", + "postProcessHash": "24af7c2f180d0349510829b306b10d87d423f9fd78f5edb39d86e80ad30cbf0a" + } } }, "9011c5f16cd48892d19d1f38ab22564d7b0466e5517619ebecc2bc7e71bcaed8": { @@ -9556,18 +11814,35 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.415Z" } + }, + "0a84a3ef199a9a0709da7fb05188f57584d91dc202199a554f1169865ba579cd": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.133Z", + "postProcessHash": "8a5f02c357ce5c5b1b285167f71eecdd17f1de631c71f2d06b086a5c6b2d5750" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.134Z", + "postProcessHash": "1edda774f07519155ae861aa23165a35ffa1eb54e03ccf0dd332bab2396454a7" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.135Z", + "postProcessHash": "be0acc661639da97209b6b0acf8876c9c96371c3446f75cbe61e6b937d750014" + } } }, "93c3b152cbce0393c9c6f97cf591c3711cbf3f81789b2925cea760c4c5cff676": { "a411bf07df5d4b27d21c51466694fc824b2f2422d09fd22f63570974bf2e2f9b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.407Z" + "updatedAt": "2025-12-04T20:16:58.126Z", + "postProcessHash": "1540c6b3765e78f588d6ce512faae698e5ad4fd6d07ce3e9b992e92ea7aa1f29" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.402Z" + "updatedAt": "2025-12-04T20:16:58.126Z", + "postProcessHash": "7b9ec757f931cece364c483c81d3e2cdc67af95cccbfbf2affa57963c0e1b867" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.414Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "1b75bb04f4eb8ac539d44141b5a8a6eae6ab21d19f8e35d0a8baba2aea105fed" } } }, @@ -9582,6 +11857,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.417Z" } + }, + "c783d09e7857a7848fb43d49f6e4743088006f96c6f8cac250e8b578e1a078db": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.130Z", + "postProcessHash": "40f243e8c92f0ccd19cc8dc9fc1f5124ebbc145b735d4071399a4d9e158f8d48" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.130Z", + "postProcessHash": "c3a827c5f504feb9cfb0a8c1226fef246771e9e3b9c3bb37e5526f9e34be877c" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.132Z", + "postProcessHash": "4a80bc0724d7db956c0c584eb9e41ce7b30223f914175533e1502119a250da94" + } } }, "be8dc1cd18e614d45a58deaf1024b50c63f3b407079c8195f643d25501e18a86": { @@ -9595,6 +11884,20 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.360Z" } + }, + "163f1c5c9cce107d4f530475caacec149a18adb4ae0f9309c339bb02c385b9f1": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.182Z", + "postProcessHash": "03f6f00abf2e2cb5bc95c31c25f368ca56f6e674addfb378ef7719f5bf3d7d53" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.183Z", + "postProcessHash": "bb9b0f9caf7d055d362fbc74fd5fd4bf32bc5271dbf89ed68432aa281e8ee733" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.183Z", + "postProcessHash": "4b7d95893d42d45fe136325b1ae16d746fe52cfc523930f36ece8e9383174579" + } } }, "bfd7f9d45ae0b09ed83b9f435d6a0720b54f9af64528eea7b98d70d76a9a29ba": { @@ -9608,135 +11911,179 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.416Z" } + }, + "03a1b2ec675680c9d09f2bf57e7e2788958b6b2b98782821ebb8691f3266533b": { + "jp": { + "updatedAt": "2025-12-04T20:16:58.132Z", + "postProcessHash": "ee2b6ed65dc98f27b6093149c85121afad7bea1968ac3f9361c7e14e2f510186" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.132Z", + "postProcessHash": "1a043a16036a31f132be97c8995deb93cf2ae3de783ff7a7669b6e37d5a8e6c6" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.133Z", + "postProcessHash": "ec2fad4e59c622b004f61d83cb278b83d5d241a88e401ed71c269433a273dd35" + } } }, "c3760dc390d98e6e5ed5a4a74c5c609915d05a7a63963656f7715607d65ae092": { "2904d7fb4addff8bf173ca68df5b540945453ef8fa9eec76a35d8f9b92ab8b87": { "jp": { - "updatedAt": "2025-12-02T22:57:45.421Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "78b3487aa538390ee13d21a34cf4682a670bfdaffd61bea46632c3a16d139603" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.420Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "b8cf694b4595c7c760bb71d762878a2713d1b837615b69bb6ead52676eaf9d79" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.421Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "9b7ada0d1eecf348e6b87a19733054f3035bb81749d1ce6a77979f7853b403e5" } } }, "e7312c644964f4d389a9171edabe14341e5e6fdd852101cf9f16a264088857b7": { "2904b07971746b903763bbcc8b60c7bc05a984fd6692a24f60eeae21856cf64a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.420Z" + "updatedAt": "2025-12-04T20:16:58.148Z", + "postProcessHash": "775c2541160bca09901528544da0511731108813b896bf63429b58d01dd6bf2e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.419Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "8a3a54fb1e1728bdf86231fdda8dfec99417389c9b3fcbc5fb88a86293424362" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.420Z" + "updatedAt": "2025-12-04T20:16:58.148Z", + "postProcessHash": "caa10d46cd72fb5f892ba8f31d7f543fd140d44a280032c8edf603802afbc6a9" } } }, "f5e8eec3fa4fdf1b4c16b4b71f35b278d41db6e5586c66a42fe590521942f347": { "f9704f3dd2bb395a82abdb0dd1b7b09ea97a4499075e9bc8ecfcb0ead44a1d69": { "jp": { - "updatedAt": "2025-12-02T22:57:45.422Z" + "updatedAt": "2025-12-04T20:16:58.156Z", + "postProcessHash": "5a8601576d8bd351ca8eb621bf9362bdb3ba5ccfacd7c60b35da307b4b644d6b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.422Z" + "updatedAt": "2025-12-04T20:16:58.155Z", + "postProcessHash": "e2e8ffc4cfc3a07e15ae4ac3a4bda9db663c2c109bb1b24d7d27e0e879fdacb8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.422Z" + "updatedAt": "2025-12-04T20:16:58.155Z", + "postProcessHash": "cf1414281668dcee75fe5b11b0011c173700d66ae4d61522dcc5060063033d9f" } } }, "0c9700318afe07f773f3675286dbd1308302fb5c993fc403ead5ee2c2c311f85": { "26bbf167b8a8bdd6e415d3cf429c935f63ed38563bdb8697297248361bdeffad": { "jp": { - "updatedAt": "2025-12-02T22:57:29.027Z" + "updatedAt": "2025-12-04T20:16:58.154Z", + "postProcessHash": "2db42029b0e585f854889f7ad8c96c1b70f5a612a3a6547107f6e27f278387b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.018Z" + "updatedAt": "2025-12-04T20:16:58.150Z", + "postProcessHash": "0053f16676e49702d213b29478b3661ea8916be71c88e78ccf7a58effa9a71c4" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.023Z" + "updatedAt": "2025-12-04T20:16:58.152Z", + "postProcessHash": "77e78c20f7e721918a2a7963109df30c745fd5ef960b81d406facf2e19b83f8d" } } }, "1205bf7e48133304fe346efa0309af05787e80fd6f83623b178426d0d89e43ab": { "7a4af08a1b17f2a86db198129d22bf1a71494ef3425bd28e8251e46075a27288": { "jp": { - "updatedAt": "2025-12-02T22:57:45.419Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "3b4169ccb6fa470897f95984e5e4d7c36ecfacc36799c2c78b00637713cdadfb" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.014Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "1deabca0dc30190ccf35a36e8fea27956eff1bbfa0697ca86966f6a6a24dbfe3" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.014Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "3a7d76d598e8016821d68327d3f3e3ff4d52bccb50ac7c3ae7b227c4fc244638" } } }, "3c59f44959e6e9ca6acdb43ffec9355d9485257cc402a212ce1759a0064bb981": { "1ec8c112466533ed9f452783ba3194be3a2def5e0e028ac74fb3913e56cc2f4d": { "jp": { - "updatedAt": "2025-12-02T22:57:29.029Z" + "updatedAt": "2025-12-04T20:16:58.155Z", + "postProcessHash": "f22fc3c42de3b1253b21f5acabe21ec3e1881181fe75e336f80543c34af0d488" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.031Z" + "updatedAt": "2025-12-04T20:16:58.157Z", + "postProcessHash": "e8614eb7b979f611442f92a4f12177a0aad454a9fefc1b8918ecd58d6795df46" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.028Z" + "updatedAt": "2025-12-04T20:16:58.155Z", + "postProcessHash": "27c6fbf289c8c64d4e044c4128e5a130718ad31f764d68b82b5c8b65f67ce5da" } } }, "401161c2a7c478b5179bcce758b06f32dba0fdbf9bc64188f2d7f79dac72dfa0": { "f590407909c6eb71eb806ee84a3e8ff079ef373b53b7e0b97152b2c9ea18f318": { "jp": { - "updatedAt": "2025-12-02T22:57:29.031Z" + "updatedAt": "2025-12-04T20:16:58.157Z", + "postProcessHash": "19d5a9aff54bcf96dbbc7dfd097be760cfdf00d810b6ba47dadf6f1588990066" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.030Z" + "updatedAt": "2025-12-04T20:16:58.156Z", + "postProcessHash": "5084b44332c8923ecd89022f7ce8f5a87bedc43e957ad60d5c29166e3131b6ab" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.026Z" + "updatedAt": "2025-12-04T20:16:58.153Z", + "postProcessHash": "220b2a82ee37d8f2660322604f1685cba98b74a17c384324a0ae06a2165a86d5" } } }, "48ca9336c96e6bf5d7264d6ae62d5ee29644e6c214dc339d83a610716c484ff0": { "6e9ef6dfd8e741fb723339409fd3ec6e0e74d8c83d08b37cb60190c4e83a6762": { "jp": { - "updatedAt": "2025-12-02T22:57:29.015Z" + "updatedAt": "2025-12-04T20:16:58.146Z", + "postProcessHash": "c31f043d91fa86b75264845c64b520f42ab6799775710cfd1e84dd9d274356af" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.015Z" + "updatedAt": "2025-12-04T20:16:58.123Z", + "postProcessHash": "de2a813f350670fe63969b57d5c3d84697ed08edb9f295d9dde142ae02ba0a27" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.017Z" + "updatedAt": "2025-12-04T20:16:58.149Z", + "postProcessHash": "f813bad8ac31feb90963c92ba27f77a74f4c81eff97ef0cb248ca14a5df79ebb" } } }, "51b2a652cdf591979b38ae330c8306c66fd1186f911c915e5aa9e108a6876603": { "a570b7d389235335a4604eebb1f1ee2a84cfb547848012a3f76c135f5a18e3f9": { "jp": { - "updatedAt": "2025-12-02T22:57:29.031Z" + "updatedAt": "2025-12-04T20:16:58.157Z", + "postProcessHash": "6221819924a634bd237f2e1d71627ed52060bbf7e772b3fde60dc329306a2205" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.030Z" + "updatedAt": "2025-12-04T20:16:58.156Z", + "postProcessHash": "3e95b0e05d5afb55b7e6e8cd82ecde7656b06bff3e4c0bdd3fdaf062d3b1c21f" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.026Z" + "updatedAt": "2025-12-04T20:16:58.154Z", + "postProcessHash": "688d8064a1f8a119a0dab808678cea2e7da387cb9bd60a32e02513a513695beb" } } }, "5272155dbd5220decd129a5e4b559edddbdf6ce43e7a6b8b33c93f39ff269597": { "976786fd43e7ab3db7efe0f5493c2e4b732add2abc4ca3639e54d6dba7ea3e9c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.425Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "487be067c8aa6535a54a90041692a596c79eec4dcd3bce169cb23531f73af273" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.425Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "6241169d5228811057271c6026586e81e469491f3c371a8dde989455a88b01c9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.418Z" + "updatedAt": "2025-12-04T20:16:58.121Z", + "postProcessHash": "1cdff9979240f85d72430709a5efb12169bb4beec5957ffcd7ffeca3b0a52a1a" } } }, @@ -9754,13 +12101,16 @@ }, "4a8ab65da6d22806e27f1c0235a64e17eedef58353dd5f027ef06129117f60b2": { "jp": { - "updatedAt": "2025-12-02T22:57:29.035Z" + "updatedAt": "2025-12-04T20:16:58.178Z", + "postProcessHash": "ff5c54c9d0a3a8f9b1783ec7ec9262c672a0245ed307fdf0c64ca587b6390090" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.036Z" + "updatedAt": "2025-12-04T20:16:58.179Z", + "postProcessHash": "6690ad676ddafeb0a2da8e041bbad3216cb1e3091522612dff3015becfd3d19f" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.037Z" + "updatedAt": "2025-12-04T20:16:58.179Z", + "postProcessHash": "17a2ab901ec1651182d62ad7265968bfd0aa8a0aa3922e4d69ae9d81eff7d5a4" } } }, @@ -9778,65 +12128,80 @@ }, "3a04105dee92510060ffbb0e51a8b69619bf2a41b598379b28951f1a94eef00c": { "zh": { - "updatedAt": "2025-12-02T22:57:29.034Z" + "updatedAt": "2025-12-04T20:16:58.176Z", + "postProcessHash": "4183ed3c2db67ec9cb9e8f9d94517cd08cae13d15f17844d3158f6ca8fd673c8" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.060Z" + "updatedAt": "2025-12-04T20:16:58.177Z", + "postProcessHash": "49256a1d63e90849e9830b4e12182aff3565acdd2768869fc05fe764457a0489" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.060Z" + "updatedAt": "2025-12-04T20:16:58.177Z", + "postProcessHash": "42730a82569bd00eff64b862db2fea6999103e907d6088f5b445c839676edc0c" } } }, "693961a5d1fa6bea79f69b1d2a739db59c41797c8d322ac4dea99c908e8abe46": { "b20f75da8b934d12c4230b89c1dd8cd4bb4b57708832dd5f413fd6ddf12d4434": { "jp": { - "updatedAt": "2025-12-02T22:57:29.018Z" + "updatedAt": "2025-12-04T20:16:58.150Z", + "postProcessHash": "b04c2b5831baee54a48fe8887e132433f1b6824b185242351235837468ef1450" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.019Z" + "updatedAt": "2025-12-04T20:16:58.150Z", + "postProcessHash": "6d4320a6a8b4149d556a8d8ff6d602790266dc413d9110ab3440d97c95da43df" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.018Z" + "updatedAt": "2025-12-04T20:16:58.149Z", + "postProcessHash": "611a57677a5b86d3bfc43c859da45b64aa99d0b7348b7b210acb325c642ef776" } } }, "822e90a8485f6ba254a1b6b4f89bbeea67771bd3cb9f9d6241558e4b9f59e8ca": { "3442662c930110d3e163429ea57e15d27f6132307f6bdd86dd62fc64d01d1c48": { "jp": { - "updatedAt": "2025-12-02T22:57:29.016Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "c85c8a464e477b8071b02f233a393e5429d4651221cc99ec2b92520354a20c68" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.013Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "536b521389199086263efc05654006011b73801d6d280a6874d1f5d17c9b53b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.014Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "5bdd0c323f16bdc8fa99589faf2a95916f0e3b9f4029cf672eea725717f3fa81" } } }, "8fafd060efa9d7570d6665629f29f511b108ca76567a0f8ab9320536cf4824a3": { "95dc2ad2c072c0167726cf92eb31cd7af87b0eb4785b0fb839363d03a88ae8a5": { "jp": { - "updatedAt": "2025-12-02T22:57:29.033Z" + "updatedAt": "2025-12-04T20:16:58.158Z", + "postProcessHash": "f6b6596553703dc5fccdbe28c2ab97539f740445d87b55508e731adbe580c468" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.029Z" + "updatedAt": "2025-12-04T20:16:58.156Z", + "postProcessHash": "ff36af709f413ef2e3875184ef2c3cc9e3b03045a9d31f116afa0b8735d7cf2a" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.026Z" + "updatedAt": "2025-12-04T20:16:58.153Z", + "postProcessHash": "8c0272078d83758d7c3cf764b57f3afddd954814e16a837f9285f3db179d85c9" } } }, "910c09772c30498ccd96c4a7059798706b5861119f5ae8e46d899e9a4da807d5": { "419e68f0fe31b19a72d7bfd6b1b28c27298c6d38904baf049d3466be88aac0ea": { "jp": { - "updatedAt": "2025-12-02T22:57:29.021Z" + "updatedAt": "2025-12-04T20:16:58.151Z", + "postProcessHash": "3091b4e4e779031f8addd4073e87dc3826b0ebb248ad817a451468ccadd74fec" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.022Z" + "updatedAt": "2025-12-04T20:16:58.152Z", + "postProcessHash": "522330e6a8a495c02ab489af2dd3f276ca67f1afe1a9723e03f01443983a0a19" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.021Z" + "updatedAt": "2025-12-04T20:16:58.151Z", + "postProcessHash": "c29fba8b1a2fde675a2f1fe70fcb68a1fa04ddb126f6855bd356cd509c7dd634" } } }, @@ -9854,104 +12219,128 @@ }, "a529f5735fbe960afa2324179342c53aec9a55e1f4f1b1fd791033ae87f5a6fb": { "zh": { - "updatedAt": "2025-12-02T22:57:45.395Z" + "updatedAt": "2025-12-04T20:16:58.118Z", + "postProcessHash": "78027c95e64715fac5302dba905a303d295a1ab5a35cb474bd2baf9044eafa1c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.395Z" + "updatedAt": "2025-12-04T20:16:58.158Z", + "postProcessHash": "03488c12c2cfac7eff222f8be4bef0f0f37858497afd231ff72b2841ed4e6ecb" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.396Z" + "updatedAt": "2025-12-04T20:16:58.158Z", + "postProcessHash": "b004d3ce996eb120377ac13e4c53c8b8ee36d503101e49198db88a44130b070c" } } }, "a0c24fe635a0e43acb3d80e4a7fc854ecdfc143306eb5b0b77baccd6c4ef9468": { "e71bd647b55a5bb2a691655582237e95f9ff246c5f45f2f2f663e74b62968fa9": { "jp": { - "updatedAt": "2025-12-02T22:57:29.027Z" + "updatedAt": "2025-12-04T20:16:58.154Z", + "postProcessHash": "5c452a681cbb771fdf126a51b0d88abce1dd6f87b7c80acc355df45f242adb44" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.032Z" + "updatedAt": "2025-12-04T20:16:58.157Z", + "postProcessHash": "59ce7ce283d91095b939355f1b9a7379196e8611ed95cf6f734cb09f7413bf3a" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.028Z" + "updatedAt": "2025-12-04T20:16:58.154Z", + "postProcessHash": "8b170abd619bbe25ea02211f7a27683e30dca441ba6e1f5167eeb33671d96fb1" } } }, "b60ebbddf877960af38c601bbdbf000beb3124a60fee1f8c23fed49149d1c527": { "a5cf8d2eccddd9b6214fa12aac2b98dd4e514d569be5e26938ee9a3b11a0b411": { "jp": { - "updatedAt": "2025-12-02T22:57:29.025Z" + "updatedAt": "2025-12-04T20:16:58.153Z", + "postProcessHash": "0d22f68c26687c6e11d3ccc9f0a66a7aac55d927d0193ef716c4467b92b3298d" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.024Z" + "updatedAt": "2025-12-04T20:16:58.152Z", + "postProcessHash": "d63efd4d61573bd3ad4b86b731c3425925d7d68406480edd57b587150995147c" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.021Z" + "updatedAt": "2025-12-04T20:16:58.151Z", + "postProcessHash": "f0363c5f89d4ecde29b785f84657450c3eb52b4565e2b3b35354260706b2f5da" } } }, "c11fd5cd4c0e0c76b50b836fc0585b7d897d5c6e619c8530f61e70fb13e7d1cc": { "1fc6d064882a931f2ccd7ae4239ad068568c65a8bef153bd6264d39d45bdf340": { "jp": { - "updatedAt": "2025-12-02T22:57:45.419Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "4522b7efc32819ea16972c95f2ce99ba56305acfae119eb4c98edc7715085ad1" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.013Z" + "updatedAt": "2025-12-04T20:16:58.122Z", + "postProcessHash": "c66dd1cc242c35e71cda37f7f97d750be81983e76d43d9e50125f6c6234111bf" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.016Z" + "updatedAt": "2025-12-04T20:16:58.148Z", + "postProcessHash": "c0ce15be35aa928161f93bd53f1d62caf4053ca2e71fc4de141f4e67046bff91" } } }, "d2f8552b7911ad6c378a02252cb789aff8287601b71f6571a0f6e1b7a8e78d04": { "94efb582e11f5b80d588c2052e90bfd70b816f556e6039e851019c86956b10da": { "jp": { - "updatedAt": "2025-12-02T22:57:29.032Z" + "updatedAt": "2025-12-04T20:16:58.158Z", + "postProcessHash": "a9f3d894b73a5e8fbfb355b615986f6554982beaa5b936b72d02d705053e786a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.033Z" + "updatedAt": "2025-12-04T20:16:58.158Z", + "postProcessHash": "5c00236ca9842e274e702a44e1994f85189a107174b02d1ed13a8e5afd727476" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.028Z" + "updatedAt": "2025-12-04T20:16:58.154Z", + "postProcessHash": "91ca53f5e13b0879413261dabb6aa1482fe5da4c89bcd5e759dc619f60162775" } } }, "eb48ea9cc55a5f79da9d6053e1ddc3e175fac421ecfbf7cdd1fba7409a5937c6": { "4bc78345ed8b814098932537f3fc29577489a1bf65318ccf523d0e7979227a78": { "jp": { - "updatedAt": "2025-12-02T22:57:29.023Z" + "updatedAt": "2025-12-04T20:16:58.152Z", + "postProcessHash": "cdb1bda224b383474c02d61a269ea1e223b06fafd0c11fd6c6d3d8d9f0c45983" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.024Z" + "updatedAt": "2025-12-04T20:16:58.153Z", + "postProcessHash": "c356b333abc195b3e8534cb7bdc5ec98e3543b86e1e4405e54683fcb123d5f57" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.025Z" + "updatedAt": "2025-12-04T20:16:58.153Z", + "postProcessHash": "6c445591fbab30d7577c48b9590716968a86683cf4d982a5e12bf1a0eefbb35e" } } }, "ec7b68e24b7e06a320d9daf21b92c0a61d2d196ada1c8350b443b492c228b245": { "f255bcf4104dcca52f9807566387c0fcfe6d06f436994fee4179bc40d148cf94": { "jp": { - "updatedAt": "2025-12-02T22:57:29.020Z" + "updatedAt": "2025-12-04T20:16:58.151Z", + "postProcessHash": "b3fee47b47dc2290e87a50d1f4278c3e481e4240e496558a25e1f1048ee04232" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.023Z" + "updatedAt": "2025-12-04T20:16:58.152Z", + "postProcessHash": "f6c7c9219121e18e781b866bfe788b2a54acde8c102ad2c6072e17d340df89f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.022Z" + "updatedAt": "2025-12-04T20:16:58.151Z", + "postProcessHash": "6f3dd083e77e0617e119144f31c923c51ef132142196b6309807f50abeb5a9db" } } }, "eefff94e72ae2ff41e6e3bdfd308882739e2e719c94cb06245a0ddf4866a91d0": { "1a4e25f6cb4dbccbb5205a184e3f9417ca1d8398e86e5433534abb2f3af17825": { "jp": { - "updatedAt": "2025-12-02T22:57:29.019Z" + "updatedAt": "2025-12-04T20:16:58.150Z", + "postProcessHash": "47d1faf543fe73ac7fa9bea9082ebc4c7708f2096d68b4110b3b4e35f8d2d4aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.017Z" + "updatedAt": "2025-12-04T20:16:58.149Z", + "postProcessHash": "e0c1399f6444e8c2cd3a0448caafd93b609c7f666e849e21477e72fa25aa9518" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.020Z" + "updatedAt": "2025-12-04T20:16:58.150Z", + "postProcessHash": "5f9148af290b8e332f4945188616cdacdc4400d1fb854ae50a49dc96ed9dd428" } } }, @@ -9969,260 +12358,320 @@ }, "d539a2e6e29c28f2a0f9388521c72cad291ebf89023c8cd11cbcfe918314ba5b": { "ru": { - "updatedAt": "2025-12-02T22:57:29.034Z" + "updatedAt": "2025-12-04T20:16:58.178Z", + "postProcessHash": "ffd4ab29f644bac86b9ad1da8e700768882c525563cfd6deb294a32c452c00c4" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.035Z" + "updatedAt": "2025-12-04T20:16:58.159Z", + "postProcessHash": "8f0041399a59000d4c60b36fbd11227f1b485defce1eb516df73378642cc3f7c" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.037Z" + "updatedAt": "2025-12-04T20:16:58.179Z", + "postProcessHash": "47e97c8b6b5ae5faf54dfb5af47097f6084715d5d6d64d3527f8b46df1ca24c1" } } }, "0488cc4c783adb013176b8dd5553d28bd7e7ce03785fd0038e3b2b17e6bdf549": { "718aa60f3c8b05491fd8687c867ff950c98134aa648057ef2a403f78f1807100": { "jp": { - "updatedAt": "2025-12-02T22:57:29.053Z" + "updatedAt": "2025-12-04T20:16:58.145Z", + "postProcessHash": "632b97abdbb009312e6951f9c9f878035b18b973c75d4d4f866a2d65dc768a54" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.424Z" + "updatedAt": "2025-12-04T20:16:58.118Z", + "postProcessHash": "3be43d8fcb031bf4bbdb15285c2a606bbb9dbc5f31fcdb81df882419e639729b" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.041Z" + "updatedAt": "2025-12-04T20:16:58.139Z", + "postProcessHash": "e84914e2f57ef8708bfeca376261aa24fe75eb4c152d98989a2f7c7ab667c05f" } } }, "1310e9bab89510f4aedd870fa23492b42460b27cc53beb62388b8527a75b4abe": { "5d36c63b3cd649e6c42f53a7e1722d9058261e3c5703e736f85a4081ed299d22": { "jp": { - "updatedAt": "2025-12-02T22:57:29.046Z" + "updatedAt": "2025-12-04T20:16:58.141Z", + "postProcessHash": "c26f3a6f63feb987c73c53ea0211cf7053746654e915cda9808291c1d88f750e" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.049Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "e28330739f8e294da253009f27c46947dbea83da5cd17f5e1eba4b184e730b5f" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.054Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "9a5dfec87714972506ff3625ae31550e67a7110f7bf4e935720cee4082a0ea08" } } }, "313f2f3a2287ee9166540ad792489898b8322355b28ee91e96ba66cf781aac35": { "813cd15c21bab5b5ae060ddf42a770163642046d3681ff5dd1dd8a48b6578a17": { "jp": { - "updatedAt": "2025-12-02T22:57:29.044Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "779d0aa00ea9f874e9ba40490b5109d067fc381fb1dec7a2202223373bb1ec43" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.058Z" + "updatedAt": "2025-12-04T20:16:58.174Z", + "postProcessHash": "69521e80448bda580f8100c7e23b03830220179bded07c0a5de1abf92f039c11" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.059Z" + "updatedAt": "2025-12-04T20:16:58.175Z", + "postProcessHash": "8bc37f939cbd3a674a5250f7809afcfe37fd26a5e3e3b98bde5e54de7eed0755" } } }, "38d86ec85c1c8aaad845db190de05e50994d1a3c494195da910589c64b052751": { "3cff21a72fb101c7dc507cfac07bb03d9d16b6445213a5a7553e646f024ba71f": { "jp": { - "updatedAt": "2025-12-02T22:57:29.054Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "da0986bd34eef0e1315058496c916c41298b77d4820a97bef50f3deb226aa0aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.042Z" + "updatedAt": "2025-12-04T20:16:58.139Z", + "postProcessHash": "7698f002940daed3c558e38fc15ac644549954d1e79e06d76c43275daf2d8e29" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.425Z" + "updatedAt": "2025-12-04T20:16:58.119Z", + "postProcessHash": "77a2cb97a23703f71c39b94b4c686d726cb1716c48fdaa414a58712ef0dab68a" } } }, "47aae18e89fdc913969ad0dd021c6affb6a825d67862170fab9bf412e150d04a": { "7845706578879f0d6235708b243856e2005db4e602dca78be25078cff83676ac": { "jp": { - "updatedAt": "2025-12-02T22:57:29.059Z" + "updatedAt": "2025-12-04T20:16:58.176Z", + "postProcessHash": "0f678d59a25e779457f32b639c8820a502dad5844946663151826fdd6292c533" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.058Z" + "updatedAt": "2025-12-04T20:16:58.174Z", + "postProcessHash": "3c8db17b36ef36c42d90a4b20e062b851f3ae4e7a3d62b58e85eb0b31c2a7da3" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.045Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "ae6f375cd3e3ef70a68d3048240a24157632e16b3f92d5353a19971ec2e69d0e" } } }, "4a1e810e51a719b0c246d3a43e6419bd4b987b2e7623567a865586ec6ed3fddb": { "ed63b452ccdcc51644ab26c7e164fd9c06b4fb9dd0f29123b7c142d640dfd731": { "jp": { - "updatedAt": "2025-12-02T22:57:29.049Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "ef7cd524f2fdf6d90e1858b5d23db17318500f976a5a7a2fd1e5586717da19e4" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.043Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "80bd11959669fcd52e73651a4421f670b3debada6cfa53fe7fd89d67a5f5510e" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.045Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "e4e3b0a4c2291fa43e4826fa7b93c070968a8172742a5cf99d056bead067878a" } } }, "50a5598ee25c450c5fb03f18bc79c9f33c4b2d45dd82d93378770a029449765f": { "d681b2d70ad9048fc005dfbd39784bf38bc368cbb6e601d7be30a81c02aa66d1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.423Z" + "updatedAt": "2025-12-04T20:16:58.118Z", + "postProcessHash": "7a37713449cf19d0d052e393040b23052ce7438d72710a0dcc8517980a765db1" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.040Z" + "updatedAt": "2025-12-04T20:16:58.138Z", + "postProcessHash": "c0bed528dd331f0ec70fca32e12e3d3473f860e2f2a2e763b58621292706cb65" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.043Z" + "updatedAt": "2025-12-04T20:16:58.139Z", + "postProcessHash": "97593d93d626e7aeda4debf478235ebb58a33de0553d97723ce614e2b878f0ee" } } }, "58592583285bade083e9bb2abfe89113954c980d6a63fd9134c60920badad2d7": { "8b688b902eb485da1cd904c9a534e7c30138ddc8fe157648544914cd332f7701": { "jp": { - "updatedAt": "2025-12-02T22:57:29.047Z" + "updatedAt": "2025-12-04T20:16:58.141Z", + "postProcessHash": "85ef404ff476e76c3e7046ef1dc469f1942820db60fa2d6d3e793ceb69977f91" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.045Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "edc04fb5c50ecd8ef1fc5ff24172f0e494faaafcf86f204c74b229549b211541" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.055Z" + "updatedAt": "2025-12-04T20:16:58.148Z", + "postProcessHash": "6ec018018df8764805f33ffbf95356549971a61713481f4be4e30622d4c326b9" } } }, "5adf48b603a73cafc111898833bb810f6f9d985906f5a28a6b5510c4ad5ed9df": { "ded6c22af292aa253dbdb1b8bcdd3dfedbd38430db398f57c83b96b8b42647f8": { "jp": { - "updatedAt": "2025-12-02T22:57:29.040Z" + "updatedAt": "2025-12-04T20:16:58.138Z", + "postProcessHash": "a13164d17abd4104449490474446df9be775913c36f63421326a313b32728ee1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.424Z" + "updatedAt": "2025-12-04T20:16:58.118Z", + "postProcessHash": "9ff90027bc5fc935d89f378837cfcb01114ea7601541d14bf0c65c25c8db786a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.423Z" + "updatedAt": "2025-12-04T20:16:58.118Z", + "postProcessHash": "142c8d402e3141bd4bd175befffbdb4d1632a9617ab17b3aec92afe937195092" } } }, "7d16493aea7d06c09ef802117a0be5f6d628751d0a5c7a7b03ce8eb9dc409bf2": { "5c7a7e89cebe18dd07de910c107fbcee8795947ad29a2d17a6d6024a235a658a": { "jp": { - "updatedAt": "2025-12-02T22:57:29.042Z" + "updatedAt": "2025-12-04T20:16:58.139Z", + "postProcessHash": "9f22f5c3850a168dec64c983aad647c877a101fc8b65b6022b9fd9c61eb9d705" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.048Z" + "updatedAt": "2025-12-04T20:16:58.179Z", + "postProcessHash": "bbce2f4caff9f05c6c142c7e7881079a26d8ba55d8c38fdca955b58a5327949d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.424Z" + "updatedAt": "2025-12-04T20:16:58.119Z", + "postProcessHash": "992d088636ad4c139fe770f107937bc5317252cef47c3974db07eda981207ac5" } } }, "948b9dc936c07fa8b4472138f98721317baa561958a48a6445780ecfc6a1c485": { "2f113bab1b3e6819aa420803e0868837c5a60eed370a5c0708d29084e14f6cdc": { "jp": { - "updatedAt": "2025-12-02T22:57:29.039Z" + "updatedAt": "2025-12-04T20:16:58.138Z", + "postProcessHash": "6f038c456e76bffe9a21a1f74f613171f02a645e1b38c5cfd99579bcd2de0d17" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.039Z" + "updatedAt": "2025-12-04T20:16:58.137Z", + "postProcessHash": "f86ab8839e9c68cbc5149e9860687ae5ba705f12c0d9a0fd53f151e756db3204" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.044Z" + "updatedAt": "2025-12-04T20:16:58.140Z", + "postProcessHash": "7b5fa6bbf16491bcf0f92fe63e3427c79b02369af278e54b6333c789d06aca01" } } }, "94df9a623cfec05c2c5b489fbed533e510d65ccbf937bed27f852c60f3a24b6b": { "4d71c012d9187781ca8fcfad6d17272ce0479d7a403fdf6f4e13744b2054c414": { "jp": { - "updatedAt": "2025-12-02T22:57:29.056Z" + "updatedAt": "2025-12-04T20:16:58.173Z", + "postProcessHash": "c6c1a1016168d3362e0650a40a18add96be7a6b8497778c458ca9ed78d995b84" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.058Z" + "updatedAt": "2025-12-04T20:16:58.174Z", + "postProcessHash": "eb2d09a582a79e751b1d025725543d653f2a94e82504db1fb2d2ae7f83601bad" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.053Z" + "updatedAt": "2025-12-04T20:16:58.146Z", + "postProcessHash": "cbb18bddeb9d7591ead8dd615a3443495becda7ae3ff5d844902e19a3270d570" } } }, "abde8721a04c3899ef6606633f77be77e9d032a8fa7f37d834ba01a23fe119b9": { "580c03f819a51524be5321c5af5b976bf750d39a4e3a64a3dd28f32805924089": { "jp": { - "updatedAt": "2025-12-02T22:57:29.041Z" + "updatedAt": "2025-12-04T20:16:58.138Z", + "postProcessHash": "f4c816471ebc4f1384a73a0d7d5ded6de5a6eee864abe32a1e8bd321d78e7938" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.038Z" + "updatedAt": "2025-12-04T20:16:58.119Z", + "postProcessHash": "0eac5f33026edfee783765bc2f2855e0ca9f4742c4b433e198f9f59decc0f630" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.043Z" + "updatedAt": "2025-12-04T20:16:58.139Z", + "postProcessHash": "07cb5f61e01181d1cf23fe740f9ba4cea3819d32b56b7561c48687a568a9ea69" } } }, "ae86875e3a4deeec5b623e90f58d3191bc8e79167da17320095d45b7aefc2243": { "8e8b9e7eee69658acfb5be5d7837a6c6af0457a30ff7676b0d57099a5399ff0e": { "jp": { - "updatedAt": "2025-12-02T22:57:29.048Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "4d6481d4bd406e143d705595c747c5f4306281b4fb360cbf62e9de23655a9618" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.040Z" + "updatedAt": "2025-12-04T20:16:58.138Z", + "postProcessHash": "3aebaa11a3f071b2ffcafa63093eaec0b2b9d3489ca657d83ca958c1debb76fe" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.038Z" + "updatedAt": "2025-12-04T20:16:58.119Z", + "postProcessHash": "332a61d48acbdc12e2c0f2ed974751749a1b053361148a9d3599cf761ae9ddd3" } } }, "b5d0eacaaf66596432fd2e0164bb5f867e3cac16623e968148a4d757d106c3f9": { "dd01468833f830dab589b0b46480f9b998ba99103d12ff19ec3c342a9f0a9138": { "jp": { - "updatedAt": "2025-12-02T22:57:29.057Z" + "updatedAt": "2025-12-04T20:16:58.174Z", + "postProcessHash": "f4177c0137f7cebec30e8622284443325e99ea3adb7b8b536173098bc535b66c" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.048Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "425b878e68b61701ed73af11b376055dd3b00cda6fcb29eacb22e35c9a49c19b" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.057Z" + "updatedAt": "2025-12-04T20:16:58.173Z", + "postProcessHash": "cdf3c1e2da318e9d95a99b7ee28b5c3797b029a2aa5eae72edd32d18ba8f5a65" } } }, "bc185d41a81a462e3988685f733423500e79d9186808359cf876254dfc1df6b9": { "873f51e584f0fef0ed5ce12f52eacab370768a902dd8b25575c46c3ea3925c19": { "jp": { - "updatedAt": "2025-12-02T22:57:29.057Z" + "updatedAt": "2025-12-04T20:16:58.173Z", + "postProcessHash": "759623204fecd566039646ced48c47c6307fd4ab7c2ea910b5a6ad266a6cb5a8" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.046Z" + "updatedAt": "2025-12-04T20:16:58.141Z", + "postProcessHash": "83b1a3353f3a86135b994218793586212f5b612a7af8f8a6da73f7fc66ce8fac" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.056Z" + "updatedAt": "2025-12-04T20:16:58.149Z", + "postProcessHash": "dbe5b8804f3996df76da28cddf8b5663f930e62267e01cc1c3105231e32831cb" } } }, "bdf609022e3136bdae1def5400ec2932bb8f17ea8d7d49a273b0293defd3affb": { "f21711f3b2d080fbcd8a0d170f14659f54ad538d7c534cc91bee92cd96943824": { "jp": { - "updatedAt": "2025-12-02T22:57:29.047Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "b6a7c099c7149d9b5face6880b1458f996a1863b68b3ebe873d5b505fc15cd3c" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.055Z" + "updatedAt": "2025-12-04T20:16:58.149Z", + "postProcessHash": "bf5740b780e901a615f3975e67d265a483819fa291fdcd97237a81a94fc408dc" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.047Z" + "updatedAt": "2025-12-04T20:16:58.141Z", + "postProcessHash": "da5c131e5e0f960e2380f2d74893f76a23dc6cb22d8e763f918284456b4cbc6b" } } }, "c5084a09df628aa86f5f8693b10a55f9e8af3ba5b6e50ed69ff474a580871673": { "639b5a2d990e67de3c2c8aab1480380f68b5cffc8cb43a13b0da74c601a38749": { "ru": { - "updatedAt": "2025-12-02T22:57:29.050Z" + "updatedAt": "2025-12-04T20:16:58.143Z", + "postProcessHash": "b863133cf50fecb93ed71d20426753326b56e807b0b46492a8c0051fc86cd1eb" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.052Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "11c4ec5448b4c21abeb56c3c25677a025456f572957f8b87ce884ba2d45a1bc8" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.072Z" + "updatedAt": "2025-12-04T20:16:58.164Z", + "postProcessHash": "e7335e538de3266b83331757d84897746766e4845c07f689641073a5cd8572d9" } } }, "c6970f5399e645bf58a1525ef6209242f22690e314c9ec2676aa0f609e60850f": { "857e9c3ca17f16a6e7331b2d62e9f15ea308a426462699ae488f7fd808b8bedf": { "jp": { - "updatedAt": "2025-12-02T22:57:29.055Z" + "updatedAt": "2025-12-04T20:16:58.148Z", + "postProcessHash": "a5600e69ec330b6227d2ee6243bfcdc77380be62c8bfc053f310a25977afacca" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.046Z" + "updatedAt": "2025-12-04T20:16:58.141Z", + "postProcessHash": "0affb16fba306af18e9cd5f516bf6a95970850945ea367dae7fb1606b8ce732d" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.053Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "4b40d221407cddb54ade0938d462e12a4d9f287383a1690e6081c45263e9a10c" } } }, @@ -10240,507 +12689,624 @@ }, "0dc53c77f2243cc7e9fdcc853fa5aa06a0cb8fe3a7812be06951c1daf123b21c": { "ru": { - "updatedAt": "2025-12-02T22:57:29.060Z" + "updatedAt": "2025-12-04T20:16:58.177Z", + "postProcessHash": "2af1ec335db4fc6abf6f7bd2501376fb9a92a4c7466a26be441beb1d48f08f7b" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.061Z" + "updatedAt": "2025-12-04T20:16:58.178Z", + "postProcessHash": "ddb1a5994207670d8dc0c57de2d5a3b3f1888702976faa7ddde1a1a735f175a4" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.061Z" + "updatedAt": "2025-12-04T20:16:58.178Z", + "postProcessHash": "1b48782b35b6277b8e397ddf707c73d3f9f479adf18167d3f10053737eb9de51" } } }, "fe52a1835874eff99646b2ecbf9812aaa4ad459489ce76c856750b021e1969fb": { "44b9e40b3ed21a0eb1effa1387bbd83dc88cf7259bae3bbf2af2a134b07516e5": { "jp": { - "updatedAt": "2025-12-02T22:57:29.059Z" + "updatedAt": "2025-12-04T20:16:58.176Z", + "postProcessHash": "7e1f718327561af8008188dc8ce48c45109a6cbd34fa9c7e40c0f386f623b00b" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.054Z" + "updatedAt": "2025-12-04T20:16:58.147Z", + "postProcessHash": "10193cbefbf0c299a15813b48e351fb7f64eb8fe1ddb2f1140a3653c6f4e6bfd" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.056Z" + "updatedAt": "2025-12-04T20:16:58.173Z", + "postProcessHash": "3d572f273c228b7d335ab4b69475db422d64d85b819f9a2413fd086027f7c7a1" } } }, "077683f76fe06aef19e3361bceab4bc549399e0723b4d9d14415d78c7b29cdfb": { "fe9570de03d2029f3efd3701f8a9844fa8bb91810ea7c58923ee8d0766854adc": { "jp": { - "updatedAt": "2025-12-02T22:57:29.087Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "f8fa7d6eae0fcb6e997eab540e6fb68cd3d387424fb2f5f09b6addde83b04402" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.088Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "c668afd1a4504ca87fc6a690b79e72bbb195b73894ed32c4223465d4c5a42e52" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.088Z" + "updatedAt": "2025-12-04T20:16:58.169Z", + "postProcessHash": "0fe4d49dc0c3f4bc0ca8b91b508572e0d80aeeaea08cd70739089bb141a1e665" } } }, "1d4d6e77bcbd23d001d1913843fc6c9748753173b9770ce333d87441932130ec": { "30da2cbfe92790be7c2f95f485c2ea63c4ff423ade0453d52e65f78a6fe652c0": { "jp": { - "updatedAt": "2025-12-02T22:57:29.078Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "d11512251fad1e69f2b21d67dc0af2c6b5181df9e72f88a5976070010a1be125" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.083Z" + "updatedAt": "2025-12-04T20:16:58.169Z", + "postProcessHash": "78d25fbbc48ea834f6a45d8443caa404c8d3ef5995cdc2552d6815efee7f4d4a" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.069Z" + "updatedAt": "2025-12-04T20:16:58.162Z", + "postProcessHash": "86be66d6a1b72899180a27fffad0cb4243dd940c76020f188281b83081fead4e" } } }, "37b9937d3f28ea06521af2789937cb6974b4bb1da71a4e0e38cd433452943f4b": { "ff41c613f12a073c7cfef1f537c5bef8fc0820fa48eaa7f6ad0cb887283d047d": { "jp": { - "updatedAt": "2025-12-02T22:57:29.072Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "523d35936181e624930a8ae3bfa903e8b5058d7cfae8f9729deff0816643a444" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.063Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "7d9963bd6e0d42b12da47d8a8d0b0292e841d7ff0f57a47fe95809372a490574" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.072Z" + "updatedAt": "2025-12-04T20:16:58.164Z", + "postProcessHash": "ceddb2e60b6e749dd8b2ed1e5ac5a05bb3d2fe7b208d4da32dd78a54d81cf2a1" } } }, "4503f45c726f639e1a6502e2fa738700aac770245105ecbbc3d6006506fa8e7e": { "b3e6deff1b1839f01fe2fdfb7c34b1a485c8bd5be58b682ad09b971716acc42c": { "jp": { - "updatedAt": "2025-12-02T22:57:29.077Z" + "updatedAt": "2025-12-04T20:16:58.165Z", + "postProcessHash": "70d36a01709459b2e2f77f7917869a49b93fbbfd9ac9bf69bbf957df53886d57" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.052Z" + "updatedAt": "2025-12-04T20:16:58.159Z", + "postProcessHash": "405d1837eb75bcccd57ff2938776ec65658a0a8ea0df5b8626797029bd3149ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.078Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "f21607efcda7c564974769933667df0761a9f89c253951317d7ab69af11370e2" } } }, "45c696d22175381569602ddc4401df16a7d32249c5f9994c4b98bf2548350315": { "65644f5b55fd61d8fdbe8a764e605ff7e00f6ec53fcdfa43f484a5638a58d2aa": { "jp": { - "updatedAt": "2025-12-02T22:57:29.083Z" + "updatedAt": "2025-12-04T20:16:58.169Z", + "postProcessHash": "38af28789f3d29a5ea1f37f1a934ef5645acc48a9b4377bc0f807cf61889114a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.085Z" + "updatedAt": "2025-12-04T20:16:58.171Z", + "postProcessHash": "2c85d8fdf93c2dc3ee0a6f86b4a93a192ecf72ca8d8c018da5bf1505144515d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.086Z" + "updatedAt": "2025-12-04T20:16:58.172Z", + "postProcessHash": "c2ba60b96c9b02fa2d7323c8e7e30d2e38c839ec16ecc8bf3794dcb2a00a03c6" } } }, "4d6593bbb881e0a74e7a089539eeba4aca7019f581c7caeadeee04c001000773": { "d16ded5082885b0eeb5b28bcee5bf878c87a2cc092934fcfc328a1e535effa1f": { "jp": { - "updatedAt": "2025-12-02T22:57:29.082Z" + "updatedAt": "2025-12-04T20:16:58.169Z", + "postProcessHash": "16889dec3075a18d3d5469749678ccaedd6b679062012fb1828e7a1b4fc2ed49" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.070Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "bebe19339ee3a367d8d4626c09fe986370e30f0469424b308830b71c94b493ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.066Z" + "updatedAt": "2025-12-04T20:16:58.160Z", + "postProcessHash": "0d526ab7a4087a20f0c6dce99d2b36b169b1e08cf7f95ebaceb933cc07ba793f" } } }, "5c12094be2a10a85a875ce129adf37c46bdae04160dbb85b3eb63b9c69e7f6ac": { "bf9cdc73e3b5ca0e62d14af59e1854dd6d45176f362f34533c815c278385d1ec": { "jp": { - "updatedAt": "2025-12-02T22:57:29.068Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "32dd1631155cbf710c91a8c37de93a8831f9afe9782490bc3c554849b5aabfb6" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.063Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "c5617798209da1162b29a36cffc88944166b5d9916565f215352bf2be355bb77" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.081Z" + "updatedAt": "2025-12-04T20:16:58.168Z", + "postProcessHash": "6b7759991d69bdcd85d8a7efa609c7250120c8cf903c5a0bc78b1e032168a3e7" } } }, "62508936e6b3e7a6b965ed3df755188b154e45270320ca734cb0df2e29a942a9": { "9a4adbb5e86533b1fab803147ed4539c344e121c9526ce249b8e3c49744c7702": { "jp": { - "updatedAt": "2025-12-02T22:57:29.078Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "0fefdba4959b4839fe6686c62dfbcef3b9c2bbae07f189d5857bb555d915574a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.064Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "f240b4c5a707eaf28baa58a414fceacced14c036f1e7768bec72eb8af3cbf3f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.071Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "1d7f8ef90045452c9a84891045542f8756299f309596fcddb05c72052dae4057" } } }, "7a1451fe8363988c04d1df2125cc6a560940a7c034905f5e75da236ab427774e": { "7f9fa8dfaab48853ecedafd465b380359704ea83aed218c677074831e1cc0932": { "jp": { - "updatedAt": "2025-12-02T22:57:29.065Z" + "updatedAt": "2025-12-04T20:16:58.160Z", + "postProcessHash": "7229a743d3beacf865a7cf3e442a68be5ca52ffbc900a8f6441d163fd5c024c9" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.070Z" + "updatedAt": "2025-12-04T20:16:58.162Z", + "postProcessHash": "2e6c21ea55498e3d0e39615cb9eb6a6d96c45c25d43720af3fea0d6c72e9338a" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.071Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "eca517bc8e4eb8181c5bb6a31333854af51c91defdc77e305c25dedd5c98e697" } } }, "83523c78b37179282ea3d0f8a98cd8c0e917e50caaf74f38e237b1b1f1fd7dc1": { "7f172e3eb258a3b4cd3c132303859997ffb354f24a60481f04ae0f80fefe2147": { "jp": { - "updatedAt": "2025-12-02T22:57:29.070Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "54f0c8d74362c22d466fc4085d165bd0be24b341de6247d47c34f419124ee192" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.080Z" + "updatedAt": "2025-12-04T20:16:58.168Z", + "postProcessHash": "e1e4c0420f3a53e01bd0ad2ed32939a2842c474acf70f9b0b615a18124520999" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.086Z" + "updatedAt": "2025-12-04T20:16:58.172Z", + "postProcessHash": "8dfe09633abe12acae3876a03cf6f27b54a23146cb0e609b75ea9381797c06eb" } } }, "89a6cf75614ffde882ea0e38b857ec20bc3415e924373b586ee53a84d81b8dac": { "212ef1dfe191daf73ed51386731e37ce6d4ca49f4472b7e471586979e69a9a9d": { "jp": { - "updatedAt": "2025-12-02T22:57:29.076Z" + "updatedAt": "2025-12-04T20:16:58.165Z", + "postProcessHash": "d0e28756001cd552d56ec19b713980e157d0d09f242d24f3e7f0c412ab242d4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.088Z" + "updatedAt": "2025-12-04T20:16:58.168Z", + "postProcessHash": "caf22f8f81da3c866444336e4789ec33e690584e0a3c424e857d08bb9747fe93" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.065Z" + "updatedAt": "2025-12-04T20:16:58.160Z", + "postProcessHash": "1a9b3699a64f26b7446a06465da2140f76585264ea31d76df087d751ea8cddc0" } } }, "8e4e3758c244f276a3f91f720f08400f7d3280b2729ed2535fe4b0a244bc1eb7": { "a3356389fc2d7537a8464f2e1646f8f51af66a2d715df1807a2fd4184083a70f": { "jp": { - "updatedAt": "2025-12-02T22:57:29.051Z" + "updatedAt": "2025-12-04T20:16:58.143Z", + "postProcessHash": "1168933247f82be55fba69eb4de3623fde2faef8e7a84ceb7de95c6c43cc6e36" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.063Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "fb4736d6c394b7d5d0baa0e6fd901b321c0b2f5876bdcd28569370fb3d2e3139" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.084Z" + "updatedAt": "2025-12-04T20:16:58.170Z", + "postProcessHash": "6c5d824573119f4fbaac03e57f92eed4a05f2839280990dbdd8a606fd4d0ff53" } } }, "92c4e15d1b1edd5a34f950168fa129302400e9f6ef4fa378e3c7af3ed6ec8227": { "3c3fcd6c5352af3e3f90c0a4d954793388177b9bbb34b975eff1c8f384d445ac": { "jp": { - "updatedAt": "2025-12-02T22:57:29.080Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "f7f2edfd9241efc698996c78310c0cda4b199faaaafa4ff9d64401fc3afee05e" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.052Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "0df6ec8783ec183fa4b9d701e20d95432e6fb7146bf9a96ccd48980d450cf33a" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.086Z" + "updatedAt": "2025-12-04T20:16:58.171Z", + "postProcessHash": "973f6cbe0a381ca3e786bef4aac7be506d9f189af9e41de6f556b7fb1a861078" } } }, "a14794c89d955458a9f5af44d7aaca8d68a05b6880e98e008a7c081604143ab7": { "671b0a57421a638325cbf9c110626a9d5b734267bb8f974814c03393141cf7b8": { "jp": { - "updatedAt": "2025-12-02T22:57:29.049Z" + "updatedAt": "2025-12-04T20:16:58.142Z", + "postProcessHash": "7a4343b184ce07814bff511938279ea087ec20fb2bb44f109bf21ba7a8151bf6" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.064Z" + "updatedAt": "2025-12-04T20:16:58.144Z", + "postProcessHash": "85cc8cc7723a6b402f93d55a33cebfabe020667d895e1ec8c1020d05691ca3e1" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.067Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "b0265d2d074be875c1475182dce6f9e1b242ed57b782c9fbb22400ff60bdb1d9" } } }, "ae39080b133df67d8884d7a8d76cf775ef202d9bf2efb43947344e07462aec23": { "4c42c112034c378e6000b6c987744ecc184d4c90582c11dc33f577b3f2ee44cd": { "jp": { - "updatedAt": "2025-12-02T22:57:29.079Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "38e4458959a4a356ec453de42734eae7ed88f9f6a85e180d96b2e488440fd98a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.069Z" + "updatedAt": "2025-12-04T20:16:58.162Z", + "postProcessHash": "3774b1056c7615cc021b7d2c0f959eb6994a1208d265a132292b1945654b42a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.081Z" + "updatedAt": "2025-12-04T20:16:58.168Z", + "postProcessHash": "eab82b22b7bd67fb2676c421df09dd0ea831aff5b8186319a7d751fac3be5ac4" } } }, "b3e8c57a2ac90416a86e93c4fc87cc9fc69b9ee772adbd854463142bcf0ad103": { "78a6c5fa33437b43f2619fdc05ba1a3ff266f89bafbeb1b78bc71a0ed76a0496": { "jp": { - "updatedAt": "2025-12-02T22:57:29.090Z" + "updatedAt": "2025-12-04T20:16:58.172Z", + "postProcessHash": "c58808769b8271d5a90cb4fabde6280c7720e58e28e5fd36db0611e2d6f70ea9" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.089Z" + "updatedAt": "2025-12-04T20:16:58.172Z", + "postProcessHash": "28f1a24ac694ca1ecf61eff07eb3ba64c6510630ce51bff6fcc9bea3525f5621" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.087Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "0323c6ef269128ff9f7aea15eabc1fd734a5b65352b9a3b4de12716f96d27352" } } }, "bfa5f357797593cffea8aa625d31e79d5f58effffe1213f1bbb7b709e0c951e9": { "9dbe571f5b98f8fb6c1fe7c120e80cf8fe72a659f77f22e8b74282600d4e9325": { "jp": { - "updatedAt": "2025-12-02T22:57:29.069Z" + "updatedAt": "2025-12-04T20:16:58.162Z", + "postProcessHash": "e8682b4e5860e08b4c6d566e101d8453d9129fa6bbbeed91c125b0a954470cde" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.065Z" + "updatedAt": "2025-12-04T20:16:58.160Z", + "postProcessHash": "dcef040734194d5b58042404fe8427ad8667e58c284008c9b13083c4eed7f03f" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.069Z" + "updatedAt": "2025-12-04T20:16:58.162Z", + "postProcessHash": "70becfa0fc800bc3c41899791b9524ab6454d7db5734a5187b86a82eba835276" } } }, "c593a21ae24f2adf1116e2099fe2cac24733672a1fdacfbb7d9be523e674a070": { "3888654c7ba7da0474c2c33ac3100faa58509581ecb5ff97147be80f6c3ddc7f": { "jp": { - "updatedAt": "2025-12-02T22:57:29.050Z" + "updatedAt": "2025-12-04T20:16:58.143Z", + "postProcessHash": "841b45db78efc802422c48634083d446d21f12af1be3841b6d09d26ade5e1926" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.082Z" + "updatedAt": "2025-12-04T20:16:58.169Z", + "postProcessHash": "23d6a1f5929b2955a22a427fa11c2b6c8d9907ed63bc5d2fae552a00730b35f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.079Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "55433cff9eec68a514235c2b83584b48d05a8f76cc1007fc9259665c2c8ca535" } } }, "d2d56d1eccd2d86a90004069292a4cfc31251986d8bb238fa00ba3a4aab4a56d": { "dc92ad8afa44196810e06c60223ea9ca5b982c40325ac54b37fd95a9f450fdda": { "jp": { - "updatedAt": "2025-12-02T22:57:29.077Z" + "updatedAt": "2025-12-04T20:16:58.166Z", + "postProcessHash": "2e4d5be033432a02f9387fdb20794bf1e9de155b9ddcb759a32d376a13225352" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.065Z" + "updatedAt": "2025-12-04T20:16:58.160Z", + "postProcessHash": "7db0f57f06e332d374c91d66e82a4fa43ecaf0b41d7ec8d3b9a2d203f8740386" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.068Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "e3eaeb6d96aeb4e6db6d643ffb5ffee066927be7cccb319d4e80ae82ccc3a833" } } }, "d36f1827c010ce2e2dcab5998b4c489e963acbe4c2d8322885eae6daf7d3e446": { "2d4e379a75efd761f80eb533b3cf33859ee34ab855d930fab99c5091b13fa5a3": { "jp": { - "updatedAt": "2025-12-02T22:57:29.089Z" + "updatedAt": "2025-12-04T20:16:58.170Z", + "postProcessHash": "3f86850820fb03b68063eff6b91510044c806973eadbb77d6bcb023072740747" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.062Z" + "updatedAt": "2025-12-04T20:16:58.159Z", + "postProcessHash": "13a8e3243e3f41097b767df0afc75594ff242e809c51d6bfedbf2b4bacf526a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.071Z" + "updatedAt": "2025-12-04T20:16:58.163Z", + "postProcessHash": "2c9c58fe857b0ca1e03057ff7e0c18955bbc1ea94f706c8729ee16a0a9047b25" } } }, "f27af8909a343bda58696e815f4b50b00101d0dcd66b99619aa579b381a444cf": { "929021d21964c8a27df287754f3bf673b1e9e43e5b78df9447405b8197530ab2": { "jp": { - "updatedAt": "2025-12-02T22:57:29.085Z" + "updatedAt": "2025-12-04T20:16:58.171Z", + "postProcessHash": "fda61ebc6c2f51fd4ce0a9029a307f737603d5cc238f66ed2b4a95f329ee28a0" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.080Z" + "updatedAt": "2025-12-04T20:16:58.167Z", + "postProcessHash": "7a67f90ee6ce51d89d1321f446ac34cc49f9e0673e8435d344024ce33eb59d99" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.081Z" + "updatedAt": "2025-12-04T20:16:58.168Z", + "postProcessHash": "c9feea73bbf8a6d4bcad88242d5e2b23f3f7796520562dbfde0da9f7acc49186" } } }, "1d24065c2e7fca3ac3f26d0a2b7ccd04f7ff1ae4faa321c7335a8e84eb0ac0de": { "e323f890710302432f3ba708412993f1d391acfb58bf585c82e91d8c3c5b823a": { "jp": { - "updatedAt": "2025-12-02T22:57:29.096Z" + "updatedAt": "2025-12-04T20:16:58.182Z", + "postProcessHash": "5c9a4904d8a32a58e205bb09b951f15c6b98a6f67d41f51ccee29d44ccc872ee" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.094Z" + "updatedAt": "2025-12-04T20:16:58.181Z", + "postProcessHash": "8009e581c9e050266543070adb2b80456a830f766665a7239e5d0a2a5a083ff2" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.094Z" + "updatedAt": "2025-12-04T20:16:58.180Z", + "postProcessHash": "7e66de8cb29014fa12a8b405cc2e6b0f24bdffa39493f4b8ab312c8c78ea2e3f" } } }, "34f6accb938658c99a83aa179d1dfe75fe3f844b0e815b1a8d42a512eb830f06": { "c43e5de4e7fa4afd53423adaa427167edd9077fd3af0bcd8e16a72269e83116f": { "jp": { - "updatedAt": "2025-12-02T22:57:29.097Z" + "updatedAt": "2025-12-04T20:16:58.182Z", + "postProcessHash": "7c0997b879c91b59a7afd3789c35c0ab518fb5ace177f8e9bd3956d002008b9e" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.095Z" + "updatedAt": "2025-12-04T20:16:58.181Z", + "postProcessHash": "946dd8d62c5fc978b2550a24137c95325a9c50ef3e703ac37040af59b648ff25" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.096Z" + "updatedAt": "2025-12-04T20:16:58.182Z", + "postProcessHash": "3e7acacf1fe13983412ee30aadeb06b0f1fcbfd8fe8a4f0cb4db4d1c4be2a9d1" } } }, "45e2237668cc8f027e43e52ef4443b8a53d2c07dde3b858205c9c43057f4cb8b": { "66380f0ef83c18a16b8296671ad4697deea2b60436ad4259cd3c3df09895bbfc": { "jp": { - "updatedAt": "2025-12-02T22:57:29.062Z" + "updatedAt": "2025-12-04T20:16:58.159Z", + "postProcessHash": "8b953e74eaa1401e371ba66ea3b8fcae81ca61e3df63b65a1ebafa3f9d7ed79a" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.093Z" + "updatedAt": "2025-12-04T20:16:58.180Z", + "postProcessHash": "631d6059e85280e29d29d89fa2cca076c1a46d6b9bcd746bf4dcb5eec80ea897" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.092Z" + "updatedAt": "2025-12-04T20:16:58.175Z", + "postProcessHash": "76385f665c1cd89ad706ea9c3da2feaf7d773d50d975fdc9ba754b2a2b749fa5" } } }, "4e39f1cc2912db452edc06d93f7f0bfcc091c2888f064a3281bd99e46645f722": { "48a7640cd750631e03fa4c3747cd09af737c4ed39ad0a40e22ebcfdbc24b9872": { "jp": { - "updatedAt": "2025-12-02T22:57:29.090Z" + "updatedAt": "2025-12-04T20:16:58.172Z", + "postProcessHash": "e8555139a467f6ce32c1dd2fd8fab77fadd52543f8bd0f041a4398da83af34e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.087Z" + "updatedAt": "2025-12-04T20:16:58.165Z", + "postProcessHash": "edc217e5d5ecabf1a87cfc24212e220b46416d1b41edc5f456bf5222288614e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.089Z" + "updatedAt": "2025-12-04T20:16:58.171Z", + "postProcessHash": "ff9033995549b9129425f0f17947776e738291fc7aa1969cd249a604824d3725" } } }, "990553ca9f9ae4591aaae11318ecec98a52d743479ad68505f33d7437ebdcfe5": { "6706062fa424eac816c221cf4a0ecb23afeca8ecbe3f4830da0cee49f3af5b55": { "jp": { - "updatedAt": "2025-12-02T22:57:29.091Z" + "updatedAt": "2025-12-04T20:16:58.173Z", + "postProcessHash": "dfabbecf3f57e3c3659f34df9383a18a4abefe44b35da6382063b424ae305569" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.093Z" + "updatedAt": "2025-12-04T20:16:58.180Z", + "postProcessHash": "5524d7a418a936c38e85116e1f20c0b9bba94eac496fc032df1955684e5599b7" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.061Z" + "updatedAt": "2025-12-04T20:16:58.159Z", + "postProcessHash": "4dbe8b3c11e58ab025805f7b97473e05f92a25e20084c8b80f3c6082332129f9" } } }, "9b3d838535466c0adcbcf2c1821542686b5932d55c219ecd4c54a8d3d723b617": { "b968225991ebd30f1600f3ad485919d0badeecf3a3e60c5cb52b71a85c5611c6": { "jp": { - "updatedAt": "2025-12-02T22:57:29.095Z" + "updatedAt": "2025-12-04T20:16:58.181Z", + "postProcessHash": "73338eb1300d9c0e58340b8b12c3b8fa73d79ae75ab65428054d59c600ae2e42" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.092Z" + "updatedAt": "2025-12-04T20:16:58.176Z", + "postProcessHash": "06243742ef0121a58ccdd59f9ccd7829bdf98956e50d202ee6567102ac39d03d" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.091Z" + "updatedAt": "2025-12-04T20:16:58.175Z", + "postProcessHash": "39afe0d0a47207b615c72fb01a44b079982234b7d2c5ef927ef61c5a84af37c5" } } }, "f8fa9a1c93f8857620e1d2d6052e2a540a9827ab07e015947b84e6fc066cf05a": { "27bc228b35212b29d55733663b0d676059fdafc2d49a527814889b3aa40f6e10": { "jp": { - "updatedAt": "2025-12-02T22:57:29.094Z" + "updatedAt": "2025-12-04T20:16:58.180Z", + "postProcessHash": "28355fc7e2bf804f9a183ad0de03836957fb86d0a41c1ff298da069e1a2a3a64" }, "ru": { - "updatedAt": "2025-12-02T22:57:29.091Z" + "updatedAt": "2025-12-04T20:16:58.174Z", + "postProcessHash": "718f32fc6ad103ebac3cd4c7785fb03ea3942461258ab30f220b74c6c039426b" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.095Z" + "updatedAt": "2025-12-04T20:16:58.181Z", + "postProcessHash": "669c32622edf10c63f6f344a2c8212c6300f0dada140dd2d54b9432a08f02de5" } } }, "11aa99a1bdc8390230a974032f545ad7fc914b9d7d7512e6f3d523c3c3315925": { "25ab99f304def64235d114ed61495f4a871f63a473b431f04505d22d84acd92b": { "ru": { - "updatedAt": "2025-12-02T22:57:29.077Z" + "updatedAt": "2025-12-04T20:16:58.165Z", + "postProcessHash": "0ce05ccba387543046db4c36ce5ebc255b39e9b64556b8a91bd1d3c0d6d94e16" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.066Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "73db27728b01e076fad4a160940069640057ba1705d7954164259153762368e4" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.084Z" + "updatedAt": "2025-12-04T20:16:58.170Z", + "postProcessHash": "0f5dc14bba65bb3b26a69ac0b4707e8d7cf615ed2ea5f70f9ee14896c8f321db" } } }, "15a59bf1722e4b12c28df70766e0baab4b9d5a6f0a0473fcdaa0c562dee3986b": { "38c435040eaac3147a4b165e8f2e2eea100525b71769ee62c7de7604c2c7decd": { "ru": { - "updatedAt": "2025-12-02T22:57:29.073Z" + "updatedAt": "2025-12-04T20:16:58.164Z", + "postProcessHash": "a6a4188aa3a525bbba1a8464fae1bb9269408acc426711a46685bbbb4f9b512c" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.051Z" + "updatedAt": "2025-12-04T20:16:58.143Z", + "postProcessHash": "af70d5f5fb0d75094b995504b5773984cc102e9a70021f65deb16315633b628c" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.067Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "bf9ca63a11639d2e3204f263ae2c63fc01ce3d022a58a0e9bedff53a8c94c989" } } }, "a840f2497ddf7c24e3414728a66fe927662c74a0473293e11a93429df3ef7e1d": { "14417b042f80b8359063dc1571b796f4f9775e28a90c36436b10c493b04268af": { "ru": { - "updatedAt": "2025-12-02T22:57:29.083Z" + "updatedAt": "2025-12-04T20:16:58.170Z", + "postProcessHash": "7761c83a538e1cb01297a8bfaeb6fe8a2b0b5592db62330fb300552506f633a3" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.084Z" + "updatedAt": "2025-12-04T20:16:58.170Z", + "postProcessHash": "d9a6f4e2ee43023defa11d89c875a4585710585a163f028a246d257485ca60b4" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.075Z" + "updatedAt": "2025-12-04T20:16:58.165Z", + "postProcessHash": "f35f29fda8fc5b48d623dbe3a5a1a31cb7e2e5bc229bcf5e2ea6c0b9cbd09eae" } } }, "e843b874a573838613448a25478fe1be3cfe8e1a5c23c7d816af626567769147": { "8cb205aa323de3c2fa63f58b08365d61b559f9ba1b8554ec982b293d9a83f80b": { "ru": { - "updatedAt": "2025-12-02T22:57:29.068Z" + "updatedAt": "2025-12-04T20:16:58.161Z", + "postProcessHash": "35e31c50e23248eb82f207190c0e7c3339422588ab353ab860babd2c6daeaf22" }, "zh": { - "updatedAt": "2025-12-02T22:57:29.073Z" + "updatedAt": "2025-12-04T20:16:58.164Z", + "postProcessHash": "08f3c2ce89d66ea32d7e6a83355b0a83a1def485fe33c532e458647eb65495eb" }, "jp": { - "updatedAt": "2025-12-02T22:57:29.073Z" + "updatedAt": "2025-12-04T20:16:58.164Z", + "postProcessHash": "6a032e4938a01bc4dd69c599e1f08f52d38b885c7403c17ecc56ed09532c0197" } } }, "3177435d774099d4ba686628bc971ccc42a54d0a0a211c8a4424bbc544e08540": { "f15d74887e89dbc77f9957e1568c4842460915108734894efa6e2f081275d68b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.567Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "dd71f578d9df84661571b80bca5395e9f12f766763975bf802b712756a87313c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.565Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "9e2561ce5358c26ec9269ce12757b02a9fe08f36b88aa5c0088c7f58c3d220bd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.567Z" + "updatedAt": "2025-12-04T20:16:56.997Z", + "postProcessHash": "0fc5f392b8bf53f1108896c0e4530cfc2fcd583627a4ba694191e54311b480d5" } } }, "3caedd95aefa51553be1069772560367e021728814e3e4cb4e732e19460e0502": { "c808220f60eb5bb176af1e26539836830b9934b93a9bc1e1e62fd9b90ce36bc8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.405Z" + "updatedAt": "2025-12-04T20:16:57.162Z", + "postProcessHash": "8796decbab36ba86e3d455752eb407cab50c8943a1c8d76e1457a968eaf33619" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.405Z" + "updatedAt": "2025-12-04T20:16:57.162Z", + "postProcessHash": "aa43e8b8e06e43ec2da3b781aa1cdb75c050e66fc1ef768571c0993e5dbd4a3d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.404Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "96424bd47cdff529cdbd3bd14f8c8e20bae50f04987705a792851f781461ae36" } } }, "853246cca55f655f764269048050edb509e178c1ed6b34530b7a3aae600ec2b8": { "0a1abce96f2027f1611f7096e0422a02de923c3698460cb2c242ae3092e25c81": { "zh": { - "updatedAt": "2025-12-02T22:57:12.899Z" + "updatedAt": "2025-12-04T20:16:57.099Z", + "postProcessHash": "e8b5fd322f7b429948f73087023d5e5b3076f16a39ca914d6438260062130a55" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.399Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "5a08d8f32df5112fc6695a64a3d358349fc989650bfa949ca45342b0147b2b22" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.400Z" + "updatedAt": "2025-12-04T20:16:57.157Z", + "postProcessHash": "0e563c9a1a564226c8cb68c731d874e577301fdd3030f036c34f85fba07d538c" } } }, "a030bf426b6662b4674be21ff621cb7fabbfd26f971ddb89ac770557065aa0cc": { "f732d015e8ca7a50761bad6c4404360438b7df18567a96df59faad98662b6017": { "jp": { - "updatedAt": "2025-12-02T22:57:53.399Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "7517b19570b6999dd50ead4d040a8a1c6288754a5eb9aec94cdb2b143b4f68a4" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.899Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "ffce358a94485570f0367847577bf65176a77f4ce3386865d3718f0e810b603d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.898Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "2cff0f21de21ed5667353cd6094ce9fc8894767ce538ab89a0dc566f900f604e" } } }, "06c88066bda47d4a934bcdcd6f121c4c1e22b06d73242fdfb1ab310a2564cf7a": { "f10ca14dce06ec46cdd4e21bcf3783e50fb8f8e2c7873cc6b828db0e89c91024": { "jp": { - "updatedAt": "2025-12-02T22:57:53.401Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "8c981965acba3a66177110ea9fc5929f4934fe5fdda1c49cb77c7c40199de078" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.403Z" + "updatedAt": "2025-12-04T20:16:57.160Z", + "postProcessHash": "eeefb59492e422fa8d8904cc5902021a2492f92e4323b20c949e86c06354c798" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.403Z" + "updatedAt": "2025-12-04T20:16:57.160Z", + "postProcessHash": "04494158c3750e308777c1a794c7e04ea499e3ac07bbe9e2621da456eae4b68f" } } }, @@ -10758,91 +13324,112 @@ }, "fd960e0ad4a4e719414c642095987287a615859dcdfe78dc5e4ade0ad15a3dc3": { "ru": { - "updatedAt": "2025-12-02T22:57:12.900Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "de9e5d3a3fc3d0b24dd3e40520dc2e7d28c9d76f67d3ae200c0b1dc9f15c02b3" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.900Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "5d9f114fa13568a67c76ce493e2288a6293b889cc80ab511c465c58a2f015c3d" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.901Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "e7b7d1403151bdcb36e3e579a0be2ccf971ca1fa134a95c51ad9649819301825" } } }, "48bd4337b75cd02afdef9e5066ef37aa097bb2376a0997cda1862ec2672e0bb6": { "c01428e3868677f56a7361089108618d1aa1b3f64f9d078f8a9dd079aeceadf1": { "jp": { - "updatedAt": "2025-12-02T22:57:53.426Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "424addae32033b2a0fe83b680bfe721272d556e9dd2e811fce25110f1de972c2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.425Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "4781a0fc0800cf15e2f875aae96e5fa88ba5cd07e147c671b572f4a121dc8f56" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.426Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "5969687196357a380481f33399c17910e6f5c05ad89b00e846e219d95f968bc5" } } }, "4a871b3501c8910734e45bfd046fb170eead507a557e7fc029a9720169d74f60": { "a1bfd48d5bf528dd7d49ff5929721a27fac3e265e20a187bfe5603465299248f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.400Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "6be608dbe9f18cb926f5216bb3d4f497f39c60829fb115e70f7dbad5cbb77f8f" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.401Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "b0dd9e9084f4b6b0613992fc4fc5e8f960b334ef82377921c4170788df8f6913" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.399Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "f5df3d50fcb89c63390e2709f128078b0c2b936987573565da483c4e8ca83fa3" } } }, "50f0ba5685aaf3e9d2d05dffeeaa45f47b7ed622dc20465bd6aa71e7192a1a6f": { "430792450e0e247081db5645bfe27bcdf7c5efb4c46fb798c742aecf01bea55d": { "jp": { - "updatedAt": "2025-12-02T22:57:53.417Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "84cdea7d06ae59f48d3278ecb8170fb4d5660eff15718d8d35333970f77de7b7" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.423Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "520024e8556f853641e4cb6b761c2ae78b487c2c8797415b184d1770e1a9b85e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.420Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "8fd63c717473529c1807bf9203e07784428eedd44d4945fcde1365d53bfac19c" } } }, "5929e4805377229948887e5ba720274840b70d5c8448deadfee3a33803c24777": { "4923fea66c23915a7ee88662e5a25bc88b6e63399b5f8007edd0a604f6ff29e9": { "jp": { - "updatedAt": "2025-12-02T22:57:53.396Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "e50c2665cf797ac88af43503c41ef01a299088da95fb349a53949fae9f78f227" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.395Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "90e670779c0c43af0125de3ce220ca51be039e5eb4e9968e2940de873e46fccd" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.397Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "4ec0025495c157bca7990bb35e56a4fc68fb47d87800eb829de780c5dd713213" } } }, "7f4f10424fd5d15211a9b2e7f5376cd61876478ca1e288c42f77a9d27815ed3b": { "49a85cf8c399228a66495a6ff70df4eb90e968fc2a6386b6d0c3a47d1c6934c0": { "jp": { - "updatedAt": "2025-12-02T22:57:53.427Z" + "updatedAt": "2025-12-04T20:16:57.162Z", + "postProcessHash": "946d5177f6df23105f0d6620f085009c727fc143455e76c9bd53e4c50aade380" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.427Z" + "updatedAt": "2025-12-04T20:16:57.162Z", + "postProcessHash": "2ac6749d48928b548395c4566f9f4c191ee4996773b64da145c6c648dc0f018c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.404Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "392cad1d59dc0bbd82970d14971de1e1d30d9e7c89b92e5894d8814d620c36b9" } } }, "8fac3eeff35b863ef1c1a857ec5cc7ec6c5e04a3ba1b53c0613d799e0ab40033": { "cff3cef9c9971227c006470a36ab779082e9292add9a0d6480da3c2873a882cb": { "jp": { - "updatedAt": "2025-12-02T22:57:53.398Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "31298791f8653adb81dc0964705b1bedfb1ae4edc334fcd4507bed24682494bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.397Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "34cb122e0d0ae04ca4742a7f92ea07a3fd122cfb77a453352fa3c3b70554ad32" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.402Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "90ce8c109cf2713bb988907738f5284ba86da3651820afe446251ae5832c8e08" } } }, @@ -10860,39 +13447,48 @@ }, "ffc6e2c25867e91947ebe1d8e03113d4066168fa2d6eeb0262027942d80e056b": { "ru": { - "updatedAt": "2025-12-02T22:57:12.901Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "61a926b7dd43be4d276530370905d9f703533cf6de4af4cdcb498439fea92f7d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.902Z" + "updatedAt": "2025-12-04T20:16:57.162Z", + "postProcessHash": "dd312618c278b8facb6e3e598487f96e066872ac35c65d473082779c6a467667" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.902Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "b808fdf94558bcbc89290379565cf468612ed9e0f0952a4d30da78e645ebab9b" } } }, "d15bbab335414d4d8b8963bf84d8e6840415a3fc839c797f41e13afb347c0e66": { "7eff53190c5a3759339978f7f7f8df28a9281bca9df3218c5f48b98aefdb5e9b": { "jp": { - "updatedAt": "2025-12-02T22:57:53.399Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "7f054dedbc40d52563247297566ab09817332197edd3aac18b4f76d230aedd3e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.397Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "00165eb2c6bf794bcf465d2a301f830cf43d64e01a8aafa22cf8743813c09b53" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.398Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "04b8f497d5c9a626c981a5869ee7321cbac06429949da2371dd53041bc5e8e42" } } }, "e524f82a69f9ba0c9ca77d93ce6f9a713d13f108480d3945dba1962f5772ee46": { "fbd98a73453eb2fe0d0b40e9e69f2c6435180be06375fe9f19e1bb909573407f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.398Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "e34913504c59d9d00453d522dc9e285a4762384fc9e7d314d031a54976d54967" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.396Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "130ce3ac785e32658b9454bfeedd103bc67c5a1dccaf50b870364b9032207920" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.396Z" + "updatedAt": "2025-12-04T20:16:57.147Z", + "postProcessHash": "5a3bdd74597e1936ca9090bd1990f11c00219d5aafc669ad88fd0eacc95c6874" } } }, @@ -10910,143 +13506,176 @@ }, "0d09d70848bc3db09e2e67fdd516909f6d48129455d42ae148932d9d2a956682": { "jp": { - "updatedAt": "2025-12-02T22:57:12.901Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "d8103664b1a847fa6bef521ff704815613caad78d790eaba020f8faaf548de79" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.901Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "edaffbf281788a8e7d6a60da73af2efc9346abf6b0c1cbaefa9ba064b217b866" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.903Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "99444020d4481fbebc7ab3f960ea44866766506c6451ee7fbb2ce234316e2cd7" } } }, "e9001fe7adae3ee521c4e8d3e207693d2c40ab3153b629428457ad95a126e11f": { "c925c5d3c0431c9ee3487e60721536bea2826b1bda255f0e4e9add7b81f2f4d6": { "jp": { - "updatedAt": "2025-12-02T22:57:53.404Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "5ab560c10ed18615d03091f13219d0bfc6bb0e86abf14df5ec04a43cb02ea110" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.403Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "12e2a94017300a31ed02ec9fe4321a2a463a798411c87490ddeecc764e4138d2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.402Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "034d210ad3d995d2d075b26f187bb2e46f1e6fdfc31fa74ad0527e5443f3f197" } } }, "fda80bc8aec70f334713b34cf3411a9095abb45fdde35b4e250c9120d32dc223": { "9447f95299ab26e2dc058db4d4939aabd60236798a21696a52feac53fd714475": { "jp": { - "updatedAt": "2025-12-02T22:57:53.402Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "a205c133f70a480e619102e007cbecb8ae1d0e3942b37177b8e312b49fd4e8b6" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.401Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "e5e70e609c379e76709d149c68915a89be74d3f8ced7b8fdf61af7bc466f3559" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.400Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "8d72e6478a2a73f96e949aa7f819faf503d59ac817c2bbded1fe84355bd6dcbd" } } }, "14ced74ae89aced82700fb3f104dd8a0694c5c0ea94167d31d43f1c1be2fb09b": { "cb8ca75fddc3df71a3d63cbd9d7f7fe682786749238844ed9083730bc07d7cec": { "jp": { - "updatedAt": "2025-12-02T22:57:44.732Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "a58334ab040db4d023ac0cb8c7655f0cb17e078595a3f63bd97d76aae7d6eba0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.736Z" + "updatedAt": "2025-12-04T20:16:57.178Z", + "postProcessHash": "80dea245dd61d7d35c2d015f80179a6c5b18a280d15b5cf1754d8e61fa94e2f3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.730Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "3ef78f1c5de52962b99b1ee78cfcad8a40de97313a0adae3ed96248f9b2d061a" } } }, "1e26f8437fd2a7483c1f29a2f11a909ff981448ebd08fd7cdce54aaa31e8a511": { "1c028977ab28be717baea644e55afe62584b4eec751926769c92c424bedadeac": { "jp": { - "updatedAt": "2025-12-02T22:57:53.424Z" + "updatedAt": "2025-12-04T20:16:57.160Z", + "postProcessHash": "71fa796e023cd3cefc4517779a07090310944aee7fa3e51a1f289aac72c9fd57" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.424Z" + "updatedAt": "2025-12-04T20:16:57.160Z", + "postProcessHash": "bb1c53e33258fb6e45d1fb486c27cea40edeebe7f8ec0ebe34892050aadaa666" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.424Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "9bd1173ca982a71caa682aa32febca106e12eab5bca7ba96375b098838fd1914" } } }, "1f59d80250965bf621c38a8cbddf5b2e006f67743e1775da704d9729e6c40a23": { "e842588d4a31eebd61f2d17142402ac96b93d20ff0258eb94f82f5209a3ad2a1": { "jp": { - "updatedAt": "2025-12-02T22:57:53.414Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "d31ab2ff3f69894d1ce674ac6760dcfd7ded8a2cac351fc924548b9d919f6170" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.422Z" + "updatedAt": "2025-12-04T20:16:57.157Z", + "postProcessHash": "6647d7d8cec76a0bc190bd245a41465dcf8dafca760914496f04ddf337245732" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.423Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "696ff4969a8a9e704bf0b3ade0a0d12e767c6076618f7095cefca065288e9cfe" } } }, "2024dccfa8cbc20dfede60ca15d6ac2b721789dba97415066cafa133d075bc71": { "ed44ffe66e8c1a1ecf0ca6bc07d18f43272ec161a9e95d0e798e64dfe432b703": { "jp": { - "updatedAt": "2025-12-02T22:57:53.419Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "3a50b457e8999517e7792832c9229d589e1d18cb06e42792f3938f5c76d1c8ef" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.418Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "24d82ad1d826bd77394e21b2da590ab151fe326b6dcc5fe3feb72a3397e80175" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.420Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "cb97308039c57e661051b3ccea1bfba2b9a766b6a7c6ab4d26d64d73d19af3e3" } } }, "44c5128a3edb936a9926b4b513a202ff12d0f2f99f0c07bcfd8be1cc4723be33": { "ae80526735bffb74b999220357c744c052668c14fe1ac555f4b49132850620f3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.724Z" + "updatedAt": "2025-12-04T20:16:57.178Z", + "postProcessHash": "1304dcd9cbc9d8e52f15f4fe9b8a9fac97ce8ebdb4df2843fe80dd3d35763e22" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.428Z" + "updatedAt": "2025-12-04T20:16:57.148Z", + "postProcessHash": "7c82ae28142bad07261113e1d0f731c6aafd028178142f7cefb242b6b1fd169b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.723Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "b491f395f6680f58fbc4cfbf80279e64e861faa11995fa9801af896c80b37150" } } }, "46ae531d604a943d4340ae2e6288c87ed68270217d4e05122a7521d428519ef3": { "fe9a9e2137d1cae06dba9ff8e83ecaa3649ff47e77c5892e5e7eb1529b298c64": { "jp": { - "updatedAt": "2025-12-02T22:57:44.723Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "7cc3686db7c556b7c3f92eb257a9ae5cb97a4969417d27e4c37da984b8d7729b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.727Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "75ea797e8b92c03fd5f31fdaec80e4dcc1853604d614bca0f0a1d88507f9cfeb" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.723Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "872e7f1d97e987b22516ef07e7e41714a3780d3b18e07be8ce72ee76f9a5cfd3" } } }, "54e53c16ab3f42ddf8578d9835bb9d7b843c7a55b19f498defcfab1724ec045c": { "35a38f29e12929f2b225b703480bed8e37445662a61cc1d374ec38bd2400c7f2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.730Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "8af1bfb8892506b27b9ead1f444896519ec5957ccef8ea3c1324867deb441e39" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.725Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "e97a29400eef6e87767305ec21b2fd1ab030c6b460853b3bb084f9f5190da3e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.727Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "576fbafbcfba7f48bf8a1bf0f53b52625dc0844d681cbdabec1208d2351a9083" } } }, "687b783276b703fe2b34bfa19c6e6eaaf919ae2edb3b092772cfd3710319c962": { "355157027a1047c82f7755ab15b218d98a8e5232865d69edf8a51337a364b541": { "jp": { - "updatedAt": "2025-12-02T22:57:53.416Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "961ff026ae9e9ee2d2304c4e6c5365995d44ce20d40b4c19ecc836e9fbbc6502" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.419Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "e74cbc09a3bee2d8c936354f918ace0da7063a617e4486dfa3ce3d21c56af465" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.422Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "d5da0760b75c5c07e0eb60b743dfbfd78c849283e75b8d309834993a14cf5d58" } } }, @@ -11064,156 +13693,192 @@ }, "744f86f0af840c394271f9f85293e3266bb9cf9887e2f173377f48cf9eb8cc0c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.406Z" + "updatedAt": "2025-12-04T20:16:57.164Z", + "postProcessHash": "a8642bbdb2fa77fe24857c491c932dbd3c5866ca8b53cf85fa99845d085cdaeb" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.406Z" + "updatedAt": "2025-12-04T20:16:57.163Z", + "postProcessHash": "3d4dd68d7796517134d63a08a1c05b9b319b896a9e06f9558c2264e30bb11396" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.407Z" + "updatedAt": "2025-12-04T20:16:57.149Z", + "postProcessHash": "65fa71a0cf535da99bdc6da8a88dc2577a592aac904f7da02efee28946c50fbd" } } }, "afabce4e754c0f29b7593d81718787eebb8e7a9f5199eff6f15750cdc8d874f1": { "b814da04f0b9e71448b22e3ba39231b2c53371ce962656e59fcc8215b53d94b5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.724Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "4820aaf6aec729c4f75ea92b6ac755eb097d73ce8b10634d450a2138e71e3d2b" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.422Z" + "updatedAt": "2025-12-04T20:16:57.158Z", + "postProcessHash": "8f898a3c21366104ba1dee25137a2264a680814dc7f724750bfd9a4505c9f1c5" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.417Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "4b7d3fd65c5f1f3991c3207c6ee4f0436b070910d28b90c643917d0320a2dcfc" } } }, "b27a2ee952ba7dd3cf6b2ca009a0cd8b86282ef4f43897856e759dafd88540fe": { "32217bcd37f2a7cf5746ec775356d9800b981082b7b912e76343ed0600518f76": { "jp": { - "updatedAt": "2025-12-02T22:57:53.422Z" + "updatedAt": "2025-12-04T20:16:57.157Z", + "postProcessHash": "ba458c770417c3b87f8ffd8f8101ae11ed4bd4e23ca7d935733f9f7bd475b0af" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.416Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "00e136f806691a817b04bc0d27d7d3671a7cfdfae9b4cc35d0776546c4627c2c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.416Z" + "updatedAt": "2025-12-04T20:16:57.153Z", + "postProcessHash": "b80b5ce824e8cfcc974ee8ec4e52b11ba5e30ccac522a1a779b9f1ef322068b9" } } }, "d1dee74d727375041e64ceadd78df956b10784ab7e1b4ac16460115a7e9d4ef8": { "469305bed4de1b5eb391960ebef6f0f5096cd86b537e42c0f37ee9f35e087a4c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.420Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "32603e9abfde539ff3dbb3dcd3ba8ac87b5f5e0f65c1d777cf2172526dcbdcb8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.418Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "0873de98dafe03bd79551c6738857a36ad3f0f75c55ce7e3d9b893418326e4d9" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.418Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "62c9b82d42ffebf9b7cec9adc60515e2493f2828c71b562f4cdf51b4ae2897c9" } } }, "d536aa9054b9ba72e39103f4f8845be09447ae23a9e70d3baf478d3d2c2b8737": { "74f18e7520467c6186fd7fa39a49176648617574146477b17ce7062d7698f2df": { "jp": { - "updatedAt": "2025-12-02T22:57:53.425Z" + "updatedAt": "2025-12-04T20:16:57.160Z", + "postProcessHash": "c89fa664c40433fab3d8d451f19b3b07c7aeeeec723ca2ab0b45d6fa51fade20" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.426Z" + "updatedAt": "2025-12-04T20:16:57.161Z", + "postProcessHash": "18f222f4b50764f867713c6520f2d7158e8028d745bc3bb335c06de12921df61" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.423Z" + "updatedAt": "2025-12-04T20:16:57.159Z", + "postProcessHash": "c44be70621f1eb90c168ec209765b0a77655679de1670fa62e56fe1a4d96f839" } } }, "e56710647bd4bc05384aa8d37b2b94c4c5fe719ebbc36c598369a813b3fab06f": { "7fdb5ba5a1e64258c7ea2522a25a1d7238e73b82d6eb92fdda33bd598193863c": { "jp": { - "updatedAt": "2025-12-02T22:57:53.421Z" + "updatedAt": "2025-12-04T20:16:57.157Z", + "postProcessHash": "920a572810dd072379d529b51f6b657b73a6785336d0d711eefa1a28b0836d9d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.421Z" + "updatedAt": "2025-12-04T20:16:57.157Z", + "postProcessHash": "0803a17052e5e3b44dedeebdd10bf6959e2c332b335b0735a58fa21cc002b168" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.420Z" + "updatedAt": "2025-12-04T20:16:57.156Z", + "postProcessHash": "7b871a227fa56f11e560136c070b360873555c7c2f8d1e1f8f0c7b6766395f3a" } } }, "e631f7491be262a58a274692876ad3b89d0729e8d0294ce9b3dfa4ed45f95788": { "f3609e7b117bdfa85ee3d950a8fd8f7afee96311aea43d1a833a253a135d50ab": { "jp": { - "updatedAt": "2025-12-02T22:57:44.725Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "08d3b9a5def68da4a98b354aec6686e0f704ae8f78389449af2c76072f10eabf" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.722Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "a5923c42720b44df7af0c4b80eb685938829f695f1fcf271fd7fe49402b5c93a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.722Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "1fb8911e1cf29f5a85d088d6fdb72604a4f2e8517fb6b79edc94ce6676b6e04f" } } }, "ec626ca92c333df76f0e8018ae7bd3a25ac79ecb449e8db31702fb29bb04506d": { "ec424602c359c5773d3bb1eb5b167bdedb80fb98f907e5848b487a5b40325f67": { "jp": { - "updatedAt": "2025-12-02T22:57:53.417Z" + "updatedAt": "2025-12-04T20:16:57.154Z", + "postProcessHash": "831479ae3f3f891aa475766ec35de4ceacc08443bf2d935faac9efbac2ce75dd" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.419Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "155c19e936c48aba8fae4d860dcf7a1bad71f64537babab122141ed61d540a5c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.419Z" + "updatedAt": "2025-12-04T20:16:57.155Z", + "postProcessHash": "c98d7cb5941a8791f12122cd88fcdaac0001790ea6f3691764f9d3992b30aeb4" } } }, "fc2a90cf202e8e1844cfa26c61201f10a6c234df6585fc1c8aff86d125238563": { "5680229b7edd18b624f3a4822177aadd2e3930de72a0edd50a0d2924b785a146": { "jp": { - "updatedAt": "2025-12-02T22:57:44.735Z" + "updatedAt": "2025-12-04T20:16:57.178Z", + "postProcessHash": "b32f50acc7646ea86a88d446de828f1371378e31c7984d284df9e9744247ebf1" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.734Z" + "updatedAt": "2025-12-04T20:16:57.177Z", + "postProcessHash": "a6045143f1a80019140158603205e46c72216ab86cc0cac5bd443114f705fb55" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.736Z" + "updatedAt": "2025-12-04T20:16:57.178Z", + "postProcessHash": "ff828dac78825c976627d6f0c204d33c273ed6b0d4a26b54f8e1fcd3152fa2d0" } } }, "1646d3380fb5c92ec41482a9d98b525c37462130d6b01f32e1855b0e5f91c39e": { "ee6d9f1af26926d6377c040c2405ae576469664c532845e1d506079f9a027314": { "jp": { - "updatedAt": "2025-12-02T22:57:12.910Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "3cd9564f93929f9f9e53661082227b2f8dd7e1a308dc761bfef3aa374bf22073" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.750Z" + "updatedAt": "2025-12-04T20:16:57.173Z", + "postProcessHash": "db3da653aceb91fbd5cd9fe15efbabad6beec429685b767829e0df442057eced" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.912Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "f2739b13c7d2c01e9fb03504146ed30901b2d231814c69973dd7fc8823356f5a" } } }, "1ca8dfc5de116b6a2aecfd00677ce016075dee9e46cc6f57c85776d3ea9b3bd5": { "e84e0b80c498c3151e15f60e104f2cb38c6e40319081435e228dbfd13acf010e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.729Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "524f3b14f5f1b1077ee5f36835ce25929439b048d582d99f653541098d0df3c6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.726Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "d40ee2981bf311256964d4caf6f73800cd1f1efb91d57bf6a3d1778e04cc9e6b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.731Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "eeb9f2b7c2feaeb28ad8ca95f200bb4486dbfeb66fef4499cdf13e5fb123d2b0" } } }, "1d1e36aa27a61854f94b1f60418f1a1d666d53319de3e83255d9388fcdfb4069": { "a0e30e85a93f908ea864b663f52f1dfce2a0d6a87372b01c7bf971316d114876": { "jp": { - "updatedAt": "2025-12-02T22:57:12.916Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "04047bd2159c019e594318463a01bb92061f631eee6579cf8bcea05ee7b848dc" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.914Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "3f1979f3bafcce587d9094d5710776a858fb415674ad88b9f42f17314e350a97" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.748Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "4081be5dd96b98dc3cc08002dc95f7b4a36bb50ea10590545833125c10e36b18" } } }, @@ -11242,195 +13907,240 @@ }, "e73593ac8091e2275dd3b89189a683806e63bec8a294b91cf59c5af40e70da83": { "zh": { - "updatedAt": "2025-12-02T22:57:44.739Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "bc93c3ce1bcf6f45ac85f74cdf6081de6ff0d264250905ab94a541da2fc8c35c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.740Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "a2217b3e8c26f8d077cfd933e95412ae71c554425c9c2be1e20714634f5a01a5" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.740Z" + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "c12199a2942607a0d555bcc95ccdd42efc6339bc7ff4a37f1682686143b74ec9" } } }, "4d9f585a978f327ccfb66e8db78fa87ec8754d231c098b1a8929e4f912be5651": { "f0713cc147455f15a45af300160f8c01445b53f171e027819d998a3df1dc3b17": { "jp": { - "updatedAt": "2025-12-02T22:57:44.733Z" + "updatedAt": "2025-12-04T20:16:57.177Z", + "postProcessHash": "6d79e66e85d907da1ba44e4b6d167289b923943a7dd33794d94db1b2fdb9e6a2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.731Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "0b2baa0a619687d87e6bcfdc21317c76b7ace172ae5b43d73031778b009da43b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.731Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "2ae59659caea63f6a2277d208d31b2dfaa0d2df65cf89d96a6eed412663d74d2" } } }, "509c73a63f9d009e86d97956ae4e1701003ed2be70dd32b5c56c66bd65c22609": { "c01d58d811ef80a75a56846d05c7b54259075a78eb6a2deb665f4405f861a7e2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.750Z" + "updatedAt": "2025-12-04T20:16:57.173Z", + "postProcessHash": "75fb4840e26cb5d7199604998c3376b1b7f8106ceb766d41e3def0db881e2ef8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.913Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "827b865c8161983df7961751cb70afc5f376a6323a433ab5c2ce9686ad6003f3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.751Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "bb9d2c48895c3db64be46fa1ad1ee71c45c18239a259c53bfe56a95ef416802c" } } }, "5bd267d7d3d49be2e95b491604023a269bf78bee49b4a83eefa9352690913107": { "9e71d3c2fa185cdf2d0231b06c410ed213fa00b972cdbfefe21a9aa8916bf03a": { "jp": { - "updatedAt": "2025-12-02T22:57:12.912Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "6fafd8b9e34a1659f6aadefd22e5d9a5a80072941adf1dde1e3c3f2210b991fe" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.911Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "a752877d3db7929f07ffbe44cf04ec6d45c318be512c59aa2f49a0d8d1833385" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.915Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "2f4c65d86ec65bce7c404c804fc4dc59f99af08332d7413b7893ed9a95a227d2" } } }, "66dafb9b646deaa517a7b992eec446570c152c02802db14e18047fc0fba7a0b1": { "f246fb415a6d823d2e1229aaf83e9eb73611213283605b91a0a23a1dbad24f50": { "jp": { - "updatedAt": "2025-12-02T22:57:12.911Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "452d13e6a1c161a9a66de42797062a81da272025534b7efc8642eedeb11f0e6f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.911Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "b5bec1ed0f0616ccd4acfe250cac7b1f176c45be28851a4f30dbb65be7fff3d6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.748Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "22c2ff77d64151ec71da4791a540260a62ec1647236dbab90c1be6a68fa3821d" } } }, "7d4c81a663e077a5e75150c0e14d27c4ec51b540adb7aed379113d299f3c76bf": { "9a1b6a07af2168ede1ef0940be49f9f7462ec53241267251f36458e33a1bd688": { "zh": { - "updatedAt": "2025-12-02T22:57:44.746Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "24398af82ea9b903083a0e791016e7ec0d013924ae50d4cd453e43f835d6d6c0" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.747Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "8e0e7df65dca44d5e506fc47d287c7ba57cf5f00be92512b339dedff94a56f4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.744Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "6e272cadf693a029e8adc6d1df57f46a6aeaadb59a76f139d97ddee8525bea02" } } }, "8b2242e50cc879742f4d4efca957625a1106cb09f45a18de469646abc82467e7": { "343ceb09449e64360e7e7fca397cfc927ac8e348304b9893b3946e0ca65d8fae": { "jp": { - "updatedAt": "2025-12-02T22:57:44.729Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "cb84e2a3a41791a9a3578bf3ceac68629f3a071bbf50af453165978efa31d18f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.728Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "f2c27e0567fc4cd7c88b7968e0560af22f673fb655410cfc414e184a1c4102e1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.732Z" + "updatedAt": "2025-12-04T20:16:57.176Z", + "postProcessHash": "5d731d40b0bc2834df96321a79cc69df8e78c9e6069cd79b31e86204af640ede" } } }, "c02bec6d7a15ddb4727d64f0c82f001b4a6994e6095794f3b35c713c1c69cd75": { "f05e5879650490f810241a7e1f46402021938daaf4688d3368c183eeb6dd5b65": { "jp": { - "updatedAt": "2025-12-02T22:57:44.747Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "8079c98ff4bb6b652aeacf13588c202ccea659d061366eb1efffed43720f9c9f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.745Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "cec43996a94208098088ffd2bb27ce912a7e75540eb7dd4971835693b53a4e48" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.749Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "4053e0868abc9232ea17abc85ff8c6257330a80055f68059e39df250fc751709" } } }, "c35a4c218452080886d36470ffc05c5a0554e095f00432e0d7735900c7ad9435": { "9e5d4bd1e5379d30156d61671b947abb64b0c0e6ce551d838d6da2c7907d2ff3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.749Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "54a2f92406a9b4374c14312b2276a533cd4f5d344fdb02f14da4f5d9bd6b2241" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.743Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "bdcf0dccc560a3518d729d39784f91e5a6e035cb7fd4c4dd06e73c7d3b35fb33" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.748Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "2d040557545d069d5be6e14e76f9bce8e556222d423fd0700896e6e46530a448" } } }, "c97fb19d4fbdf784a9e8916b6965cc8a3ea8fe90f09cfb7c399b3b59efc788a6": { "7b99574846f0eeee45a44964ff5ba57e7c06ca117dc6786a3b1b13201c58cc4b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.726Z" + "updatedAt": "2025-12-04T20:16:57.174Z", + "postProcessHash": "82154cf8758a7f6773e716b69705f77fb7b583e7840ce91a6158b6b478ca98e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.734Z" + "updatedAt": "2025-12-04T20:16:57.177Z", + "postProcessHash": "cf95ff5663136844d227530b1831d5b50e6eb779ae34506486cd99569e0f5ba4" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.729Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "a572a478c5d5764d5e45f002e3bb77821ecac441f393adf01b5e8447f4cd6047" } } }, "cfcb90141d0b37458f1c2f25a8e506c7c2ceb4cb4e4c27f996e474f6e8c5b159": { "2e2c5497230ef2998811f833ae91e6403540c85762a127d81135370dfbdb4e46": { "jp": { - "updatedAt": "2025-12-02T22:57:12.914Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "6ee4cfadc5db84433f762bdbf9818c5c0922f5a33143499afe0c5dcc90b90f90" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.751Z" + "updatedAt": "2025-12-04T20:16:57.173Z", + "postProcessHash": "282c689ca4cba17b66b30e5373fd7138a849a296d6d99e60f25dfe10a20ea01d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.912Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "89961fc8aeb413c5f98b4ea1167c31ac285c0ea313bd6f2249be39d4e0eb5f13" } } }, "d5c4d2aff5bcd49a39c5a8969a66d9058ea8a6641de98e1f49a707b2a5eb6a06": { "c0bd7005e30dbceab4454c02004199f159d34c9dec509a5c13f2a23d8b720cff": { "jp": { - "updatedAt": "2025-12-02T22:57:44.726Z" + "updatedAt": "2025-12-04T20:16:57.173Z", + "postProcessHash": "0d0e0854cf1ee375d3c17058e36c1b2cd01431c869d02bd74344d95893aa5e1d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.725Z" + "updatedAt": "2025-12-04T20:16:57.173Z", + "postProcessHash": "85125b17604871fd054ca55d8b0e9064b0d97026abafa9b21f46b7e6a93768b7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.724Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "0c5e301c8b0d9abba8127519bd9824ae13297a9b244bff4cc6cbfb573ab82c99" } } }, "eac3b18e7887fa005afb72b037867082f68f247bb61d91f3260e28d28cb1e85a": { "d2aa320a8841951470c1da7b5a35b1b69bf507d11d9b795481a4e587ec4b7bdd": { "jp": { - "updatedAt": "2025-12-02T22:57:44.732Z" + "updatedAt": "2025-12-04T20:16:57.177Z", + "postProcessHash": "4713abe9114293b350a795658f94cfb2ae89e54a6f761bfda6d200054045e84c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.728Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "f9ab1d9381b23f6dcafdfd9e0330d7132e7849ceff808f6bcc1fd1e9ac808a75" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.728Z" + "updatedAt": "2025-12-04T20:16:57.175Z", + "postProcessHash": "f322b93a81389be0d11970f72917a1d2ac098e53e427cef70c7237dce19064d3" } } }, "211a9e255fdac9865968252978823dbe623bf314b09a28779424fb52243ba37e": { "267373ee71eb85826ed3e41dfc0938bb71fbd6c83484df63fbdce933b1a28d1e": { "jp": { - "updatedAt": "2025-12-02T22:57:12.917Z" + "updatedAt": "2025-12-04T20:16:57.191Z", + "postProcessHash": "805fd19ff4c602f2850082a3788e91137da590b0a5506e624d1191c2a3055797" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.917Z" + "updatedAt": "2025-12-04T20:16:57.191Z", + "postProcessHash": "0aac739559f8d67c117483f607fb237d769afcff8df1a6622eec36dcdf51e8c4" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.919Z" + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "e5400f54f86d01733e333f888a8adab5ffb2851b5f7d61481635c1d2e3318450" } } }, "4ba1eac8610621c18306898ccbcb9d4eaf5521b4b230d99cc774ec22219c9a28": { "1aafbee1019940fc3e073990ae3817e08af6f7e2ec670ece7d26a194827351bb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.746Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "6d3449efac91033dd32fc88ed893dde985acb51de8799a9f3738fd9ee65fae8d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.746Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "7a33e7c9c658f1291a1e0fbf52e67bcba5373a24f7cedf800a5106e9aa8649dd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.745Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "b6cbbe9f23e9499168c94fe6ab435e7bbe3835b0d4ae9ffa92a2f227c94c7972" } } }, @@ -11448,689 +14158,848 @@ }, "6b408329e73e771ef16b20b1ea0ca91a113a919ef9db09031d4181b038bc38ec": { "ru": { - "updatedAt": "2025-12-02T22:57:44.737Z" + "updatedAt": "2025-12-04T20:16:57.164Z", + "postProcessHash": "e7d05f74b3724d6e7be3d48780f29d73d4d3d4323736422d6d4efaab99ec505e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.737Z" + "updatedAt": "2025-12-04T20:16:57.164Z", + "postProcessHash": "842927dfad9484fbd66753af6a2d8719a4f415ba0bb9d12552872a2cbf590bce" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.738Z" + "updatedAt": "2025-12-04T20:16:57.164Z", + "postProcessHash": "252e785a0b47023f5874f3131a853a2cc2353f8cc20bad895e33b5a70ab50c84" } } }, "67ea1760ac764890c103f9795d76f618a583b0bbbe0d32ad38a77c020d119d40": { "9a32d6666fc830213628b9c378f0039bc1280491f729f8bb75dd81bd764f13e5": { "jp": { - "updatedAt": "2025-12-02T22:57:12.919Z" + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "35a28ff1a0c297409ece9c1c4995682f8ba8fe7dab5aa29cf0b35ab7c012d555" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.918Z" + "updatedAt": "2025-12-04T20:16:57.191Z", + "postProcessHash": "cb33ad53f402cf52d03d7dcecd869ce79f4b06152ff6c69cfd77de18bd5354a7" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.918Z" + "updatedAt": "2025-12-04T20:16:57.191Z", + "postProcessHash": "d0e05b542760a8088a91dff0db3e43252f7630f29672115ddcc53e05d8b2b0fc" } } }, "71b7871a9e60b8462bb9bc1ee2ff376b1641403aad826100b88e087426e5841f": { "3ad40142a5980106f0b667308b9b61cd075b9a565aa267c085988df32d9f9d20": { "jp": { - "updatedAt": "2025-12-02T22:57:12.915Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "8652d24892b17fb4ae0ddb7c7c1b75120f52a6e06752088bf1be94b92e92a420" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.916Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "1400c0e00fc4c7f99a37a9e7f46b79c2f823e84c98a3bcd69dcdfd43c95bd8e0" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.913Z" + "updatedAt": "2025-12-04T20:16:57.189Z", + "postProcessHash": "7d78a2bb50ab6d832348a7f4c107f177ed483e14a6b88a4c9dc0140c6f4e6cdf" } } }, "a9dd86f5f7da605aa9337f714a106fa513a631fcf9a168aa7b4e9a3b7ccaa531": { "ea6fc6dcc9635bc1877901795f75089be17712230ae183401a7e6eeaa9cfcf78": { "jp": { - "updatedAt": "2025-12-02T22:57:12.916Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "113fa3369921d52a6237371b71a586e3612bf94e1f430822f6237332145dfc34" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.915Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "3643c461c3e93e4fcb57e6e774a9db110432c960867d5386baf8b8aeb7761853" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.914Z" + "updatedAt": "2025-12-04T20:16:57.190Z", + "postProcessHash": "c0b514a291ed65edec996da0ae5c2147f87747135722c243d37c6b252eeed478" } } }, "b4b5cab881a02e5c4333f93e3149c6242284e0666d745952f3ccdc86593f7b52": { "112d13bcf3046cf70aa9ad7b11bd473fb40eb530504362a77d2a53dd8f9adac1": { "jp": { - "updatedAt": "2025-12-02T22:57:44.744Z" + "updatedAt": "2025-12-04T20:16:57.169Z", + "postProcessHash": "385f087637b7541c5c174ec209ccdd0bcb0c2de109d6c6ec9b15d3efda97d15b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.745Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "c55eb2afa6c1d084d1ee2725110c8958902567088d2a127becfb2e46b357cdb6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.747Z" + "updatedAt": "2025-12-04T20:16:57.171Z", + "postProcessHash": "8820ca4e726b05a378d4d79899c770c42d1beea478570fdb19d2b594c838e555" } } }, "e21164b6c8802133bb1a3d2aafc3fd517ab74e6f8d293b7d293ae968782a8bd6": { "04d3d33fa3cda8a0df74a6fb806ee0f2d01d7cd25cf9f21c9e07d1830f9a9a6c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.739Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "c03769baed1aaaaa4e6d2aea30b44000231e3104d58a9984918ebef515c94ede" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.738Z" + "updatedAt": "2025-12-04T20:16:57.164Z", + "postProcessHash": "ba9b3ff73fd4a3705c7b56351246beba00a0c0f96425631864a2e1ce5a145a0c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.739Z" + "updatedAt": "2025-12-04T20:16:57.165Z", + "postProcessHash": "8a96d883bc49b687bc4272814c04e60d7f155e7571442f81826f5421206236bf" } } }, "f9aa45e8fc85d0cb2d4c76b0e287f8743a40e6d92257f98ad0691dbde7bc3a9e": { "4866f2bf5a753196ff65a8b94a288fa39116ec9e4deeb7ae77c0598af8d582d9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.750Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "8c6422e1872077a7a927e0da1f8eac09e648dc006f0da3d34a6f572879b9fe78" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.749Z" + "updatedAt": "2025-12-04T20:16:57.172Z", + "postProcessHash": "91505f8ea4479a5ab7792f93a4d113b64c60ef80a974a2eea10734c68efc62d5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.744Z" + "updatedAt": "2025-12-04T20:16:57.170Z", + "postProcessHash": "7a748a661b3084e76eeab5cbcc9eb176235c66d5a4fcb787d4a98564c1088c32" } } }, "3e29eb5aca75381e4ec8ade4e6a0cf7d26b53d4a25cb26660ec2c44647941a73": { "c0bfc76e21aac5582f52b976b44aa4baf44b8f76caa3d562ec73e6e4ef161a92": { "jp": { - "updatedAt": "2025-12-02T22:57:53.434Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "151bcadd4c923b9862325f9755823cdf859bc84eb95ea470313fb9f022da7c1e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.776Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "5d58c65f13a89b24ac4448806db4d00cb7ae43a013c6f47955c0a00afe3de57f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.779Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "79ed65cf20bd3d7d227ed95257b609076bb608941f5d29beae688b15945e751f" } } }, "4b875d4cf08501af46c9a0dc4af0b755918205b50ba44a03d48aab3f7f49ac54": { "658a06aa55917c46e77861ee9b9b9643be0049c255c7052d4f6ae6166e655b01": { "jp": { - "updatedAt": "2025-12-02T22:57:53.438Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "69d32dddd32fad80576c63b57429ecca3a122b6c8009136c668dd9946aa770b6" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.454Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "fc47ddd4cda9e7fc1901d0f088e327914edcea2e41fa383a95bd3611a9416eb3" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.440Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "b824f32e760efa617a347035036762a9b00955e347aa387960eb67d1484fcad7" } } }, "50ddd976e3ab8042db7b5db277b40561a4de66f66d7343d572a7ddd20ad31bd7": { "0aacc185d8105f7e3ea27585dc11ab225da3bb6c1db23c8daa11af166d8e972a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.437Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "09ae37a0cdc40e470fabff8c64efdbfe23b955cb23f9be779d27345ff60fd8bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.437Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "35912aaba657807fa31595e1cb9ec89e18e127090efd45a5f43655032ae45408" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.436Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "19dc8b8e7de78c7a302c55093682c1d34826dc25467a85442ed96d91ffbc13f1" } } }, "54e7a0d28f060089af44ed7367d75f254a6d1b252f6ea6274e58dbe249470b30": { "4ced947fe881a2f40e14c2be1395d6c2cc3e15fe93e42e71df52ec929c2dcea4": { "ru": { - "updatedAt": "2025-12-02T22:57:53.438Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "a0ab3ff0502f3a558e579e695e3243f4f3c27cee9e8d0d061f8226ef8751b75e" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.441Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "1393bd754272ca3eb031a5db62ed1a53aef9b0d691bb60a4c2c567b7682fc617" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.454Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "d712c11950de830a233a80929ae7ffbbd8376af425cf5ef4c38d66da59e7733a" } } }, "7a97c0a8a1d7a2b7124253b37f3cdff0f274d654965381e7ee3aeb4db3323631": { "ed2621c01542cd6c73825e5fe7639beff16cce375577d0d908b8b02c4bc1371b": { "jp": { - "updatedAt": "2025-12-02T22:57:53.435Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "2d4701dc2da306ad1a38d1c666f0339affcb3cd3c24b15a4af221090d505091a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.771Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "06cf81681c478bed652df00bedcc52ee5084a7a5561fef99e7a7a73e5ed37f97" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.771Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "7e4922313e57d9e6a00ec88700dbb3c7dd9d47a1a75d3573ef7f4516340216eb" } } }, "893f6ba96a900463e4a20bfebef45d262bc3a3e1452bbe2f889f333b52e5fee5": { "b3a0a7a9c4f2e4c526bb71ba0bc5e6dac553aa232350b1910ad7fbf035734c06": { "jp": { - "updatedAt": "2025-12-02T22:57:44.774Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "d3a1fdf2682c6c39fda93dab6220525c675e6ad3fcd8a788873cd77cff17d94d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.772Z" + "updatedAt": "2025-12-04T20:16:57.193Z", + "postProcessHash": "951b342d1ccb8881976bf1e2f9b5fe8d97e575852ff31ea4331b0663f995d63f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.778Z" + "updatedAt": "2025-12-04T20:16:57.195Z", + "postProcessHash": "934c6204deb40439d15110cfa2ed5240d945d9d2ec72c5a6b2d74f63bcbe0eab" } } }, "95a73804027437518f6cb49fd17638db0b1d6b9361ef329c1d59b49231f45112": { "e13f5fe9c753ab5e1cd5c3b9ef8db4c7e56caa299572d07d0368d8af887e99a3": { "jp": { - "updatedAt": "2025-12-02T22:57:53.440Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "9c2fe3ffa79ba72b45bd928c07adacaf95bae456c7d8868d03bc2be9f2d5c66f" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.454Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "7b037e53d83390fae8ec37bb8d0ae2a79543ee2e3d5b68302fcd4afa77f32847" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.440Z" + "updatedAt": "2025-12-04T20:16:57.207Z", + "postProcessHash": "c9bb4d56ed8417b134cfa3834f4b13600f4f0d7c32be5bf23abc887654212945" } } }, "b624c3e0df3b6286b5d61538607b9030a6cd27129246f0485ab94c5f1b0efd7c": { "b4c584ccbf84daf8b7fe6aae9e1c393e8220224a9cecec6d5d2024e0cb7aa654": { "jp": { - "updatedAt": "2025-12-02T22:57:53.442Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "76be40baa6f7ef3f4f007909fe6a3cdc98fae142dfb129e3869f21a675dc67d1" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.443Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "8a5ee8f30cfcfe49679fe5442caa8eaf1c01015da6c413d9b51b5f20aed952ea" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.437Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "f08af1c70cd966738ff927a3b82a076aed152b1805ea25755bb9ea2f61c43983" } } }, "e210bad99f1e8a957566f3f34d0853651d4ef532d83ae50fc1fb032d24e2dd28": { "0b6791886d00299fd2b8b71cf58d276a85916e6880c408cdbef78333d00f1d3a": { "jp": { - "updatedAt": "2025-12-02T22:57:53.435Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "19cff1c4cda721d059771ccd6ae2ff3b21a89225bf13aec95f44366453497d01" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.442Z" + "updatedAt": "2025-12-04T20:16:57.208Z", + "postProcessHash": "3453a35b1230df47ec563c21dc722846556e8fb25aa1a063d2471ab6057e83b9" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.454Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "d587718d4bec41d6390e88a1fe9cd6d9632c97ae6b53ed345d41b131645337a4" } } }, "e77458d405603be885e941ab39a2c03ea7c893b38a1ed1b1c4a5beb9a703c04f": { "f78ef201b8464bb62128fd17fb1bcf8d3f42f167e5b4f4c8547866c5ecfbc7a9": { "jp": { - "updatedAt": "2025-12-02T22:57:53.436Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "c38f352c88fab87be0d6d50ed903032019effeb9700132af50c672ca1232f866" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.436Z" + "updatedAt": "2025-12-04T20:16:57.206Z", + "postProcessHash": "9e35d3b55c465a9923e1594398dcef60f24e46cda8e7da109ae6b1cc9eb55e31" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.771Z" + "updatedAt": "2025-12-04T20:16:57.205Z", + "postProcessHash": "31e05df97c316b12e89a4fd2a480db22e36d391d3d214675bf1149b2a5aa06d4" } } }, "f38d701b9050913191927978c4e261dec48019c2bef4c6a3139c4059c71afaf8": { "0e1ad7c4e88f314e2b810b6f27ec43ba78bfe09eca3eec7d023374756f07bc64": { "jp": { - "updatedAt": "2025-12-02T22:57:44.778Z" + "updatedAt": "2025-12-04T20:16:57.196Z", + "postProcessHash": "d49e24ef806f1cecfed3b6a1177aae70fdfb8686ae43af0ebf2b5f7396d7ffdf" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.774Z" + "updatedAt": "2025-12-04T20:16:57.194Z", + "postProcessHash": "f170fc99d41fe81fb7d14f4b2e50e9d6c4f31d15990c9183639c2a3c0d6f9d30" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.434Z" + "updatedAt": "2025-12-04T20:16:57.216Z", + "postProcessHash": "43102884836f2bd297cbf28fde00ea42c8092398a1d4321d49262d1f8a460479" } } }, "06b6f9b31956eb6e3cebe7421e22abac9ad0de32434585b3bedb572ca22fe779": { "ac6f44e72647bc384df3ba5b105e8bc37e9ce25a9c1c104570232ed738108026": { "jp": { - "updatedAt": "2025-12-02T22:57:44.858Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "4ded16e84324c21d80cccb0f32e2b59dfb524128ed95374b8afd9b82be3703d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.861Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "8ba7d5b07a4398585ffe22c12885f9971a129242ee810d2aea1b550b9ed60302" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.824Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "8b9694119bc72b519f92c1cbe5287c592044ef0c31a3c6fe1c7892b736965230" } } }, "088f126360fc7b556a09516cc41a4880d4599464d2cb1ff9f6ea02417c6df429": { "04f510d66c9b376ce9989e4858fb9d1204bb45b666002f527435e252cc2dc4f8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.530Z" + "updatedAt": "2025-12-04T20:16:57.293Z", + "postProcessHash": "26d429dced3e924be0288b69b409282a280615f75a8cf50793820c7daa3cc52d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.529Z" + "updatedAt": "2025-12-04T20:16:57.292Z", + "postProcessHash": "747b02623a400edbd379f0b7729a6fbe2717367aeca268a4df43f9264240c091" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.504Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "f60c967a30c0c2541cf3df47e0a4a7cd435e15368c3f841cb3c170584e3d8d23" } } }, "13195f1c973faf9aadf39f45b6a4df596efad0f6e4df019051e13dc77eb9fdfa": { "948846a8743f4a90ac77c6ba53e93f5386df8d5310a4b8182265798313dc6dc9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.824Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "0d3cdbc8ac0e21e20fffba486861b7f728cca6daf83deb19df843409a743b595" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.498Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "16b8959c03562365a1cbb3d2e917410e0bf6a6b35a08deda432c22e0cf44405d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.500Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "f1c9902152d0303586d4dd07c2a44651e1cebf0c014ab691c4976ad1526dcc39" } } }, "2505693dc142fd4f445b3882dc548fa0cc38adca662a63dbfdb437b0f67776ba": { "f86b0dd8e53eca99c2eba408e02d7d92a906e77aee88846c9e24a2d79f1d998e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.853Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "1614c93a944a1b328134e1a37678a167d2ee301a6c1adc398ed63391518a4d0c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.856Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "0af0e47384c13918e148385055ea9d888c45e5b1ecf275e68288dd9112a243f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.497Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "7bcb9cf41f28d7fd33a8c0824cf439fdff3c128b07fd614d554aa8b795cbf553" } } }, "266e0dc9c395c310374563d981fa2685a69b11a4eb800352e56423b5bd7e2901": { "d344c46f769e848e76522e3e0e64f31e4c4cd999a3de3ea3cc10400f0b2826ae": { "jp": { - "updatedAt": "2025-12-02T22:57:53.500Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "424c76a162d1f5caf1035de5528fac8fdf26545a0217250ab5b46f9d9e84e986" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.501Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "7577f7638ec3ed790705f38fec1295a344323dff3261850bf81be20e7a721196" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.502Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "2bac3dcc2e99832a1c653bf9fa7f9325f8b3672bf2f8307aef79be22faf7782b" } } }, "3c3cdb595236de7ad8f9d05838ec2b8bf3f90caa6bca9eb1dbb703fe9b2c5f67": { "22c4567427f06c4ff596058d0963e1977f619d426a1cb0b04f22ad1721307091": { "jp": { - "updatedAt": "2025-12-02T22:57:44.853Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "bf81837d3d819fda276ea80241f91504fbc6cfbe4c75b8dd6ecc52949ee7f35b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.853Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "f1948e89bb71bbd33a13b1ae57fc73d67c7ef63f3e92b4875cdd66c820db4890" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.854Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "ab03c6921615fe1b548e0c8c69c84cda066a90a6c5be52d6ed8263351870c3e2" } } }, "3cb2ac954c25f39475156759f2f4f8c8714328c659aaba596322bf83f3e3ecf3": { "da8c2bbfc6c34aa9551b3e0a532d71ec831fc09659ffc38734155072f907743e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.859Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "7eb6445aad7bcd8741a2821a620cec91b44f09467c7e75cb58b50554e6ab4fae" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.822Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "6b0ec5ab30c8e3aaa3382eb694064c52e4375af00d987965a5f15354fc18eb64" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.856Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "e62d98a0f9241d7b429e28d56a1cabab265cd42680e9d149f5bb8358ff4aadcb" } } }, "3f5009534c38cb29edcc48a3b2c5b50aa0363797569ad9ed3c962e075be3d711": { "e52f05211d11daf47cbab45322de5fb579805427116030493d255d74a6de33e6": { "jp": { - "updatedAt": "2025-12-02T22:57:44.833Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "da6647a3338357109a9e71c98a281c675ff0ccc6eac2400de7410101fda587f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.834Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "c6b0508aba9c34b85142a2a03705452eef4fbd8b77b5dc6f3316a800b20586a3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.826Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "23e348e3f6e0a858bb1bb6bd286eb5b8c39786f2e30bb4f2259975d423d756a0" } } }, "51d439a5ad94546b36a253aeeb85868911bfe6475f4fefb30756a75f43e01dc0": { "c9a05803f13e75801b4f09b8c52974299028da9cd5533d505c572edbdd11b9f8": { "jp": { - "updatedAt": "2025-12-02T22:57:44.831Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "05f56f30ef04b9535382e0b2a5b47624a9cf4179c72d894c0ca77a7416d2f4b0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.831Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "01acbb5125a2932ea1b56279f1fa56a840a63098eefa1dd3d061ba0c192bcdc7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.831Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "7207b8f6046f7d321c5ed5d109961b0da1dfedb7c130964859c836335974b2f5" } } }, "5227584ef900ca7684b844bf9b013a21d6faf12f8833191ac40e941a5fa9878f": { "5405382560ae38c848c605acfb1a4ec134912ef6bcad95aab5381530689e735b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.834Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "db291d623c82bf0be6cabd34ec09c0e7287f1b3dcdf7417fb271d06b7398e946" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.835Z" + "updatedAt": "2025-12-04T20:16:57.272Z", + "postProcessHash": "4ba80d3d0d5fef5310f4c71708edc047767302d2b2accdf9d1f6e72b37eb321c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.832Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "c630912128d52047b6387f1ad77d02f71b1201662810316e0c4453c1b197f54c" } } }, "a5397922ad119e6b298a6b4b378a68f864ea43c8323107a35954165809de0589": { "488ca0a5b4cba0af7cf4ca440e3733d6860db7e0e1beb8403ae74e4cfd8e7753": { "jp": { - "updatedAt": "2025-12-02T22:57:44.823Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "ee7fe5118f1f915918e457aaf37b8151034d094a2ec845c538cf1f02de610b6b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.858Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "259e3fcbad2e423f4f95e2598e8197a9d37fe98388ac2f5f8a008c211559c140" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.856Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "5c96f03aa28e39c27f05050e37d3b1dcd8f7a56d84edec0469cd482116ff06de" } } }, "c6e56f828d1b34579ba790f93abaa05b29fb89f9585497258413971007a3a246": { "c2f203731c8694cfaf84b37109a789c0a0167657339f75db8fc7b685f948d2ea": { "jp": { - "updatedAt": "2025-12-02T22:57:44.834Z" + "updatedAt": "2025-12-04T20:16:57.272Z", + "postProcessHash": "24f283c9cd476c2a48c3aee5b8417cadcbb1fdc05f9b0e152d803f325c11d748" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.826Z" + "updatedAt": "2025-12-04T20:16:57.268Z", + "postProcessHash": "52a21bdc476e9374ec2db4171ebb16faaa59f8b78149d4f6fb62ffe8e0faa3f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.832Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "29eff4c8022671e8d9b8a0bcfce281233a22ad86b770dea408ce5c5a68cb0995" } } }, "c8b0b34a39a4f363d421259bdd17b9dd8d0d01f815eda9607f0d9ef245895275": { "1126bfe846bb5fcdc4b0c7c2bfd10807cc64d6e12d190d2c824329258baf5efb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.854Z" + "updatedAt": "2025-12-04T20:16:57.270Z", + "postProcessHash": "09e9eddb32ef60c33303976cec19f0d6b0e1c8b45cf92d5f6d24a2975ebd17a3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.832Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "3bd8eaf7ffbf5db593349877d750fc1a8bc99bdd6792c9fd762eff7f4adc3503" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.833Z" + "updatedAt": "2025-12-04T20:16:57.271Z", + "postProcessHash": "3ae5a119484a39bfb8ed0b66b973c99466b5b6e788b6f5feb698307d73356728" } } }, "ce10e9c3dd234b8bf0fa7265cc3f51606b9f80563a4be89f36f9805412c6a452": { "f80ac33db9f2499ec8763473f9aaab8f92e4f89d4fbb898fbee33da6e7d210d4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.859Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "a2be5c3064b066096beb34893ba76a1d73d0761328dfc9e5f63ecc8d61976698" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.500Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "21056c6da95bd69c391c6ddbbac23d7fb05deaca91fe4ddb14c9d4d725bfc117" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.499Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "76bee9bac3d6392fcb69b57a0be48e8c03a8d37efe5708929e39dd4339a47437" } } }, "e8941cfe3ebe51cf895d37bfced51319951864655bb65ed34110cfbbd542b577": { "1724335ae6c5171c92d1126311524dbb7f3ba7d451a7907320b5c0cbe7ebb3aa": { "jp": { - "updatedAt": "2025-12-02T22:57:53.501Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "2f22dbb256b6604c5eea6702491fd864115d50bae9e62d76fe52eefeba90f72d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.857Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "3d7bfab33fb3beebc2f47d43d3e73564e8785737cfb98e27af85cd0d18311ae3" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.498Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "4632dcc47cf57386beffab1bc32b061d35a434d723cf247c9778e0d56f0d2f49" } } }, "ee1d174b1119575726aa2ce11719dc7482af9a58eb1e4c20075010bcc5bc200a": { "85b1114daba44b005630b9c50a7b4b79dec7d53f4ef54586f1ecd92f3f5c5d72": { "jp": { - "updatedAt": "2025-12-02T22:57:44.855Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "8759b322119c72c85670392c3d59d9d0656f281e25bdc1634c37365240ab22da" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.824Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "7e175bd3056ac9f209c0715b0617049aba1e96debb0bda42038f955da0d47c3f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.497Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "5b61ba70e8514da298d0c329b632814a34b6bc2248dca28c62f8ecd6d3d7c6bc" } } }, "0cb711998b74fbafee6f9dbe1cf42999cd6bf81fb67aba52da75f9d6e7820916": { "1b31920ed434089b5c438486640b5af358c740bf6e33cef31bc59a7a8cf7708b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.578Z" + "updatedAt": "2025-12-04T20:16:57.309Z", + "postProcessHash": "2e1b8c5fcc8a474d50beb165e736ce2693ecc66b5782c127a36f4adf5f1da57f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.577Z" + "updatedAt": "2025-12-04T20:16:57.308Z", + "postProcessHash": "9229db9a058c9ee3310583fc1641a345d5f9192be849f1c882fc37d235115518" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.576Z" + "updatedAt": "2025-12-04T20:16:57.305Z", + "postProcessHash": "ddf2dcc8559d776bcf2965eb2508cdd20e2beb8c0af525a720890322d26d07e8" } } }, "0de197a09c02a6e7de6b2120720f01b2f26dd69cc09e57640234c52fe619cbe1": { "a3b2b2da1705264e477035d4c4f93d27e7c159e13c8fefc67fdbac404fa1df2f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.577Z" + "updatedAt": "2025-12-04T20:16:57.308Z", + "postProcessHash": "0eaef6fd9ff13bab0d54d7a3ef3ea3001fc8d429659cc1898d26bb35c50396ed" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.578Z" + "updatedAt": "2025-12-04T20:16:57.309Z", + "postProcessHash": "efa91f04ae6e3c902352ecef6b1d4cc8ca244cffc6058b42322aa90b28484aff" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.571Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "99ded51be520e91023b89bc582d9fbd164ff49269b48877bed7911a74821422f" } } }, "39f0108c94bbc9ceec29295e4a5c4a30bc3ed66e79dcf055c93bcb5e07df95b4": { "f14661437615304886b90084f8db1b8e50ccb8718cce1d8bb57271192cb3f924": { "jp": { - "updatedAt": "2025-12-02T22:57:53.503Z" + "updatedAt": "2025-12-04T20:16:57.293Z", + "postProcessHash": "078db896105ca6ec4db4bc231126e5c66fa6bfdd68113a50c4860cda8ffae0f4" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.503Z" + "updatedAt": "2025-12-04T20:16:57.293Z", + "postProcessHash": "8d86c67345a9ea56aaf8520bb073f8da4617b4c8bdd3869394546ac6d8ce297b" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.504Z" + "updatedAt": "2025-12-04T20:16:57.293Z", + "postProcessHash": "783d7db53fd2d35af5e455fe0cf632085577d1e0042ecb08da164093f170b087" } } }, "4511c24ad879085d0713bffa28b8695c1a87d24872ce30015bb857f43c961627": { "f33dc7dd4c81c9ff62d672ddd22da52fe2b3790feef29653e27d7dbf105dacdc": { "jp": { - "updatedAt": "2025-12-02T22:57:53.498Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "f70ee990ec09fb26a5f18f4e100ccbb8860c3fea679934c49d180427f3ee413e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.859Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "7d76dec47d71b8ec7819f956c69e3c6417601ea853816c8f9bee880c630c4954" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.858Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "a34f63191b6be3b7343907180388ccdfdc122dd49221329ade5cffc057a87135" } } }, "7209b7ddab6e5d0aa0edb6dd2a9d28893ced1fa4a5e84eca66e18a12cbc9a471": { "b55f055c6ea298013d180b87459ca4cbef2d564e3a47054885bf85eca5781ed7": { "jp": { - "updatedAt": "2025-12-02T22:57:44.851Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "9d7c6b091070d70ac59a7804d5c5355e0515a83464a2e6d21c2f7ff864dfbb91" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.497Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "ae02d1de33f53b9fa36346ad18ab138a8405cbce9223d07b05b7d3269acbbe23" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.496Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "fbcb85e48a30800d2da009a4b6803780c9be4fef72b2dc0c153bb927d557e745" } } }, "8d5ac58622d05dc878c50a9901e001b81276e5c37349076f70389f7ec8731cb4": { "2a5bbf839d622f7ef15b7a5b8575e42dcbd0d1ab16bf6f98ab233f94cdbd68b3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.857Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "09a413c47ee6e7afdf5f2c95a169a52f19468d7c352113cdd49546e7d5190d36" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.500Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "0bd343513020712269ce0d65704dc4dcbb3b0d3a68d4a9b1e522fffb9786e5f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.860Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "a202b6e2237fa09f8a07e1220dde750433b60c3951c744b6b58f17dd9a437e60" } } }, "9da34b15afe0cf84a2c73d8d1acfc85dae89be8c90605898caceecbc4626da99": { "ce873407eda99feac5ab7638cb9c330da28e87de5b88e7f7e35b3b8dba2c1ffc": { "jp": { - "updatedAt": "2025-12-02T22:57:53.499Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "e92f062fe32ed6db1f6cd8ab641480e969463c92588a305eb3418d6f57f79083" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.855Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "0128fd0a6c6d0679bb03f47ccc10268b47c2c952a3a93e9244e2ff1d1e55cf66" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.855Z" + "updatedAt": "2025-12-04T20:16:57.277Z", + "postProcessHash": "3c8581f194bd4a595619f28cd1d6f8f5c0eb99bd330ae6852db48de914423b3a" } } }, "b1eb4813b41c7ccc174d13cca2cec0592da899961dd7a66e59673fce738d90ed": { "d63a4009d7fadde4213a7f160c8741c105b3a63db320d984e375579df904dfc5": { "jp": { - "updatedAt": "2025-12-02T22:57:53.501Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "f0ee7007f32a86cd3400d49e8a84dfeab84e0c15494475f8812a81a3246e671f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.860Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "1b74ac3d8a145e5aa16b591b3e0cfbe44811728dcba71efb679e74bdfcc80e78" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.502Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "fd2522a37aa373298ac79b4e831ec56e16f3475a1ecf1addba7d1de98326bb4c" } } }, "bc635d7f6a9111bbbc3d31c625fcda3adb9eadc78253335799d1b3a12a509df7": { "b7a3734788840b662f127af66b64815bd7c85bf39dd4cf42306c85eb6f392d01": { "zh": { - "updatedAt": "2025-12-02T22:57:53.526Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "f668c1906ecc471622866253444bce4dbbba81ee303815c59461a39cb723e65b" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.527Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "85a44378f4f3cc33d096e0e5e4e9a1b39e47c1bd19196f41121eef4d52cda876" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.528Z" + "updatedAt": "2025-12-04T20:16:57.292Z", + "postProcessHash": "a7fa40f6a14b41717889837c056e03602a03fcc685634f9d6783b496d6e4b88c" } } }, "bdf357b395b129f57e836477b2fc57675705bcf48e1acda08c190ab17a75951e": { "3a0381755f449a5032606d2fdab638ca733950978814b42e1aceb74203a2235b": { "jp": { - "updatedAt": "2025-12-02T22:57:53.507Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "e733fde436e4d1cdd1b16a3a850b017590f1252e03c72d3b1c4fd59bbdd4021c" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.525Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "903f4c0dbb76a9524b16849a407d8357e0ea37f163e6edc0c6a1fa55d2f472b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.508Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "8bf4c2fbe0c3987eafe627ccde29067e14ebbf8c233a82a46cc1671c961564fc" } } }, "c54fab4cf7043c79b8ce701279e089e154ad852ea3c4248cb2c8da671cbc17db": { "b6e7b7146868d159e85bc698be8dd009a8755c7a8c993e4406163a4d71a408a9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.857Z" + "updatedAt": "2025-12-04T20:16:57.278Z", + "postProcessHash": "41a6188cc374616d128c18df81d3140869c5e20978970ea3aafb1dda27b766f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.496Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "ab963f9ca2421adabcd658f0f70ac9373a95357be12483dcd252f44b6ac1c080" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.860Z" + "updatedAt": "2025-12-04T20:16:57.279Z", + "postProcessHash": "dad5f4be538885d3ffc44200c5dfbf6b1b8625c672101ec45ceb13f43a4aa2c2" } } }, "c571247fa3e091098d027771a55d5ebe774d6d531b2c5384736de73837552959": { "e5aeca6ca592dd8ef3c7bcf54b278d64dd04a95cd012f8594105429290303c21": { "jp": { - "updatedAt": "2025-12-02T22:57:44.851Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "70317a6694045b71857ce268d875cb7550889448d2993dc3d3fc7b6722c93a24" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.501Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "e2b642d977e3d55608fd20f37d1a7c6a3cef4c43bc0196a1f39921edd2b4518d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.861Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "040da56fdae7b56e6f28228c7ab6fcbdfbd21c9c5b7ee941c67cedac3dee016f" } } }, "cc311a7d9ae7be3e04c62efd5c5b7aa8cb9d6075749b29e99939d01baa76e3fe": { "3de10984a294ee3ab3e7105d5ba6c42208236c0f01721e7189efb0af99ca2490": { "jp": { - "updatedAt": "2025-12-02T22:57:44.852Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "68de6ddbd11919eb90e17cb859f090f9fe4826b765351cd9abd6bdc436a68e27" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.495Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "b0421b22dfa0f35205b9ec3448f3acea71aa7b428233172862f4c7dc8935d0d9" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.499Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "ab5b892070b71822fb1e850e318bce95aa4040ab6e27568ae4ad69b24a0b10df" } } }, "d49e422d3649c455dff8cb00cabeffadfc176bab2340583e83d8416f5fbb799a": { "551eaa35224112c0edb3f9e68292a528790f07b1ea2fe15b67e97ec37689af33": { "jp": { - "updatedAt": "2025-12-02T22:57:53.522Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "9047b1a1ce5ef529083a4f4e66f87dd1e65d1f11d61173a167a0432f21f63cc6" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.524Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "93d9250ce7cce03e33c56bd3a435c7e1f78e70319a1149e5f1283c48b1068ac4" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.524Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "7517cd395271d6ddcc3d8462629d643227ad67dd9ddca3997d7d4b60849b9e68" } } }, "ee343f5a3bf00722c8dacdf2e096fa970da83e5102fcb1446bbc99a4b089a390": { "72f38826fa27979a73a67e5413b3854cc5f5f2bfca9f1efe2890e20dc90a5020": { "jp": { - "updatedAt": "2025-12-02T22:57:53.496Z" + "updatedAt": "2025-12-04T20:16:57.287Z", + "postProcessHash": "7cb4776f767afa600d4d14aaa1468bb99a7f2e914c6b97795ed805decddd953d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.498Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "ce78cf6ab45e1585e2e8922d19e6b8353de38bce29a1cdc806abeb1b57491ba2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.823Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "89b770c93118e6faea3bb41e26d286f89d835e2f041b8d274aece6110c93ca66" } } }, "fc30da7ebddc5996d940ca4f9540cee6fa6b79f9c37ee5aa6cd56665488a65e6": { "20ab3ac2e587dcfbf842ef0e2dde364c4fac02225d76cf6a5a4b6a646b77e4d6": { "jp": { - "updatedAt": "2025-12-02T22:57:44.852Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "c573a67fbf84fd397879e6ad586a7af22f5c3eda91cdf71217de47c8355efcd3" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.502Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "0a7babdb599d6f9f73df290e765fe3e6052b89dfd37d43b7489583b9879b48d5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.852Z" + "updatedAt": "2025-12-04T20:16:57.267Z", + "postProcessHash": "17db5702fdf78b723e8c55866b796e8cf761aa9974781c611aef97cc8e628d9a" } } }, "fc92ad70da88c48808fdb53f68400c73f6f900eca6a95d544909915d2b22d9f0": { "16c47449f52759987429555de611585f7f1f6d6770d4c1ced0d74ae244ab45df": { "jp": { - "updatedAt": "2025-12-02T22:57:53.524Z" + "updatedAt": "2025-12-04T20:16:57.288Z", + "postProcessHash": "313471062279195612d59ec683acc31c310600f389d7a3eb32134b97c60aeadc" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.522Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "9ca4c6bba584c6effeecbc9feeb4f2ba429797f14f57181a757ba9f7db7167f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.850Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "53681e067e663881ec038a649582e0291ad872c6f0ef52be1a328e15d58ee3e2" } } }, "fd2a3635e203221890fdb75fdb12cad083607f12a05af6e46565b58b28626a3f": { "69e391ff6463d09b09730e7e4366b4c486d3bb1759441114546febf2e97601a2": { "jp": { - "updatedAt": "2025-12-02T22:57:53.513Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "459fab8c49fc5cfd4b7051ab1e3046ffda0eefb9e597dd4203225fa9ce33666c" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.513Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "6551e263181aaa1489502382270c64bd30c4ab0186d3b55f8e9a7e72d8e83718" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.514Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "a6ddfcdbf2ca3d0a02579a9ceabfd35ae2b3d7e23066340fff77852f91425ab8" } } }, "01730b1ec82c24f14c646ea28a41edd24f15dd6258eadc1f9401688d51eaad3a": { "303c8891cee64e9b8765b1a9436de1274fd2bbe91f843406837e41ba74c6c318": { "jp": { - "updatedAt": "2025-12-02T22:57:28.593Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "7b515082c0f5e5420daa3650fa3cc380a73a2574df6744b63416525eff25855d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.568Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "e5854213d96361777bb240e739aebd92cedf07458f92ed47950c11166a299a65" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.600Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "fa1230b67b96a704a904f1de343f1a12cfdf4350092fc16e05a0e77cf977ab27" } }, "99dd663f4b6f1f7c866a09ecc4aa890f3ab244afe08834a22596edafef952ca4": { @@ -12148,26 +15017,32 @@ "0f88f2bd27c6a3bc5b20ffd358c1599368da4a7821aed81420035a719675f40a": { "947a7d558e471c72cf79437a217f341c9e6e2083cef8d20956a3839b9c085fa3": { "jp": { - "updatedAt": "2025-12-02T22:57:53.525Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "509b0e81381ceba67a2bd7f38851ded5dbfb6064ced6cd0f0bc026e3c3f9ecb8" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.514Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "5523d7cf4a80df0f9d9cb6adc390111a1159565579e5cbb5ac58b04f49e3aaf8" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.523Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "7ce9f7d60f73fc99710842c473069d5d61eb676797ac216df205a465957fada4" } } }, "2407e7afa831d557309ac0a87b9eae46f720441353201d2e2c78ed3e2510ab2a": { "1e84c0fe8d2d9789e16132b70f9a5d9a1b2fb7084db35c324173b1b69a5e7848": { "jp": { - "updatedAt": "2025-12-02T22:57:28.600Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "c13fd145bf3ec22a4f5049d3786c53a4a161c327530faeb5662d7a05cf9b8e9c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.604Z" + "updatedAt": "2025-12-04T20:16:57.331Z", + "postProcessHash": "1231e26d15f813af1da5526b490d75e644d69a24c384c47d74d853c41e4620b8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.601Z" + "updatedAt": "2025-12-04T20:16:57.308Z", + "postProcessHash": "f904f1242fdd4c130610f3c5e7152cc280b07888349f0edcbd8b0b08917fbcfc" } }, "634a6e8cc715dfe8bb5f0ed37757b192d5e78bbd3d002d9e758fc5e3428bf252": { @@ -12185,13 +15060,16 @@ "24f89815412a9281c45be003f0d9b1edaffe253b9fb6e44d0b69114de2a8bb5c": { "856a0875860cb4e9fdc7fca531785d1b4ba67b93fdace5421889ea8cc500ef1f": { "jp": { - "updatedAt": "2025-12-02T22:57:53.513Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "3f29c0edff8c076893b1653c96c169ac81e6b32116d3230a094a15c098c4148a" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.514Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "82d77fbfea0f0d8d28e675d335cb2703c17ed4a0a9324290baf01ed4fb3e5168" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.514Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "56b0d0c90b1ae721ea921022009cda51c86067ab538d454be5bd8b2414a22c77" } } }, @@ -12206,57 +15084,83 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.580Z" } + }, + "6d9e97bf1687b499e1a9a27d241d573cc5e28e2662548216fe4371463c54c4ea": { + "zh": { + "updatedAt": "2025-12-04T20:16:57.312Z", + "postProcessHash": "580f12acd5b3f1e589a4e86de3a1b6124ec4a9a4a03d7a3cda396eb212dc7d13" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.312Z", + "postProcessHash": "d47f76de738a3be1a5f05bc9753bf630bd4e3acafeb5635bf7502a65d56d96fd" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.312Z", + "postProcessHash": "2374a6cc4ff589f2118d5fc19846d46da79cbb296ea0ba250e586a4a5f824047" + } } }, "417572f3f0c0dee81daaaf436d03f842c9160631b01f165595d06d9e99f3c6c0": { "bedae71b49b3c79b70e3ad0767d167ca7bf7f0cf3792f2786f3be6e243ac41f5": { "ru": { - "updatedAt": "2025-12-02T22:57:53.507Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "9704c671cb50cce74276808cd8e85b877fd221e56b8d65175d61f86869cb36e2" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.539Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "05c5d7b1230bf7f5e6d2a0f63e011de905257e29cddd316902cc65c9e91dc96c" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.574Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "d4c5adb58897d54fc6aa74a6a3eb46f90f2581a8f24fd2175f825e56cc477405" } } }, "453e82594457c450e00def5b4a049c6817c1f11b3242ecdc0c113a4fe824bda1": { "3e341e3a84064fbb72d1f07486692fcc58eba4c23ed96700a8697e160736a689": { "jp": { - "updatedAt": "2025-12-02T22:57:28.566Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "3647f93ac0a6b4d5009fde88bd7a04ae473c4842f5ff004389c0e7368609a945" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.568Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "fab5d69bca9099677cd5517333080a40ff1dc876a91651a4fd4e8c2f4114396d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.571Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "98004e873cdacda8371ab02000c139bf001cd68fabdeab41f6be62a83738c6bf" } } }, "4f6f1a6da73f8d186f0a18ad4c69138ec62d12a6b38064449c0eaf1293c82145": { "19880790e9525db190f5e72d85ffc766a344cde65183576c30c03ab560c76bad": { "jp": { - "updatedAt": "2025-12-02T22:57:53.505Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "6ec53797367c3ebc2ce4c7165fd57e48d81d4ace1805ab4fe3e24c813411b27e" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.528Z" + "updatedAt": "2025-12-04T20:16:57.292Z", + "postProcessHash": "8d142e45fc03591e71e1a5c124ecc0a690b9387b8bdafcf9e872705af35e6ec0" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.529Z" + "updatedAt": "2025-12-04T20:16:57.292Z", + "postProcessHash": "5f8376dcb03c2d681af52a9674195e471f2a15b060c1c35bd18dd28edbbce8e4" } } }, "544e14c8df8e9aeba587c7a01debdb6de7b8d0dc480e2a471b321fe3cd637687": { "56a8436026a55bc58795064c90dcf48eb1783d7c4aeb6e25f3c6be910d52bfb0": { "jp": { - "updatedAt": "2025-12-02T22:57:53.506Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "3309ac26454d7c7a591bb12a8684a11469bd8819568940e1102b21e376b25163" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.526Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "e5099c2804de3c48feb8fb4e320f1436e267e05af979556d5e1d37a36d5a1495" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.523Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "e90dc6143a0c62219cbee394a9345870a2220ed6e1feaddb05d00a8148c06b93" } } }, @@ -12337,44 +15241,67 @@ "jp": { "updatedAt": "2025-12-02T22:57:53.538Z" } + }, + "c82ff44284c408b2ded721e502858442d03a3ddbae238dfdb3853b6248d03c00": { + "zh": { + "updatedAt": "2025-12-04T20:16:57.314Z", + "postProcessHash": "c99e87eda3d6cfce7bec036222adf14d2267f2cab17cbfd52e0d95c191b94b58" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.317Z", + "postProcessHash": "762ef504005d5e2ed61bf443fcdad2c2667d78599dd4440f13dfe8e5a02067b2" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.318Z", + "postProcessHash": "3363a370fe68b10e6d232720d7787ecc43c2290e528ed76a69be56ba21866485" + } } }, "596b0a954cde794b5e64f8babd89a81c1359843a6046215dd00cba539357857d": { "af24567e7b2b1b9a842510afc1c41e6a4f7a9634fdd16e4176a76bc4b3c3e091": { "jp": { - "updatedAt": "2025-12-02T22:57:53.523Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "88596ffc0ddd693e986f59d9a6079ca8b4a5d728db48c1951819240549fc3f86" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.526Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "1a8766ed83b6a019bde334db8a8f4973de29f13e052742469d7b405efb17e26f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.504Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "037f1e27b831a82caa36c74dd376abdac207d5f98eb1f96fbb860880c44b9709" } } }, "65351c23daaa6ae3579c1740e82b1f56ce6eb541ff65d23ed1f890694f6ea440": { "b999ab8a06deee210039a2eaf91d71da758c776e64c8fc322d876e73e8db2861": { "jp": { - "updatedAt": "2025-12-02T22:57:53.507Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "e1cbbff414f33459b26bf0893207408ce2903dd105839a75ee86c91b16743ae4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.569Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "0cf732da495c32436559f4f2ba0f04c042ded5dd61e10f9ff44c1bc10511223f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.524Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "ae6e23df23eb8f2b90b785ded2b8d1f0a1c61c640ad2d6540c39ce2d13b34557" } } }, "942eceae58e0a094962eb7383ca418c7a0fb355bbdf35ed09b1fb271b8ef0622": { "a06cd352188c57c4dc80e07b3511cf0c55b644a5eac9806b52fee16a901321cc": { "jp": { - "updatedAt": "2025-12-02T22:57:53.505Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "d32cd5e7f2ebf1a47c2c2cd968a1dfb8c6c57cd21f5d38abc160498b1a9f78b6" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.525Z" + "updatedAt": "2025-12-04T20:16:57.289Z", + "postProcessHash": "d6c162124e1bf92901fa87011b96b27d44b39783580fd49099df23680604ee5d" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.523Z" + "updatedAt": "2025-12-04T20:16:57.286Z", + "postProcessHash": "cb123e39f275ac0509fcd9459a9acc36afc1bbd158aaf7cb2e8b8817c27f0ada" } } }, @@ -12400,6 +15327,20 @@ "jp": { "updatedAt": "2025-11-29T14:26:30.873Z" } + }, + "1fdf9e52f42d0544e013d1add4533d1be51f9c79d10ad4b2c0ad50d18397d074": { + "ru": { + "updatedAt": "2025-12-04T20:16:58.185Z", + "postProcessHash": "fbc5811502c45f5643d3bd41ac8f0be465fcd90bbbee3a8298dd57aca433d158" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.186Z", + "postProcessHash": "980b6df408ed684681a336408850f0b295fd356e27332fd0e1497855528b222c" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:58.186Z", + "postProcessHash": "70c177dccacbf27aa34239bbf9d3b0b8fcc2cdf1d4ea067826e0ff8c9ab7e2ac" + } } }, "a4265198145ae0d412153c5abd0417df656a57368c80042916957e2c00936a91": { @@ -12438,65 +15379,80 @@ }, "2a201021555f42613b72e16bcc5e7fb5cfe39d43a84cc1721f1dd53636ffac1e": { "zh": { - "updatedAt": "2025-12-02T22:57:53.517Z" + "updatedAt": "2025-12-04T20:16:57.296Z", + "postProcessHash": "0aa606fbdc8cf580ee055c7d79304a881b23437cc02365f3cce50f66b049372a" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.520Z" + "updatedAt": "2025-12-04T20:16:57.284Z", + "postProcessHash": "52aca83bd5cfafa2945813684a0ad8831500a8c7cae58bba8e378bf06851d8be" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.516Z" + "updatedAt": "2025-12-04T20:16:57.295Z", + "postProcessHash": "faff7facca908e3763249f58a4b40763d9584c8147e628ba6a333c73ccae9a08" } } }, "acaee03135e8e96bcdcf34c15546b735f613d1e5ae560184c16e47ce55501204": { "8a07567dde3044656ee0f3a1ecdd3437e3653bc1dbd011b4bab9edb2c0e04c95": { "jp": { - "updatedAt": "2025-12-02T22:57:53.527Z" + "updatedAt": "2025-12-04T20:16:57.290Z", + "postProcessHash": "2d640ddc219cfd0151320cfe6e3eabc440522d0d770a3c321ed870ef2e17459e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.850Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "ee1d334def6a104f5c2a53ab541417ad2263fd9443e5bdb31c8e9ed776db4f28" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.527Z" + "updatedAt": "2025-12-04T20:16:57.291Z", + "postProcessHash": "0b11e72fdce531e876055fb1221e1c88b7690a4b388a7641cee9fe0323e0a48e" } } }, "ae900fe149a5b14ee7a25d9f850a7fed9bbb24da3497c1861285d73a625852e6": { "178aea88d150360011d964d55863a4f9f7585cb6ddc5b56d142898d29ed03414": { "jp": { - "updatedAt": "2025-12-02T22:57:53.539Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "788bfff5144f6a500f40dd126b5c6d911017468dc256ed40157d1f7359d15df7" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.540Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "ca9d87765421615160d3c47b22bb049264593e0cb85ed2b79a522935b113c518" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.540Z" + "updatedAt": "2025-12-04T20:16:57.282Z", + "postProcessHash": "152ea1d9f99b3420502d46a9abede098c1bf2bf7e5f9beb6b33d1a15b04cd344" } } }, "cc14be3df8410373edcf3ea623a34273b7005b0668dcb8d261ee3fbada8f972a": { "029f36173935f1b92553f610da6f3be5d9b0976fea74e17265186d40a9f8f8b7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.580Z" + "updatedAt": "2025-12-04T20:16:57.310Z", + "postProcessHash": "a827fc513500c2d4cf2d3b1966ac7e7da2c1ab58c8ee1620ac6d1dbae43206d4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.581Z" + "updatedAt": "2025-12-04T20:16:57.310Z", + "postProcessHash": "3f3760414d5c656218fa33e985fc17bc5e69822f4e58f308f18e2b20b380d06e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.582Z" + "updatedAt": "2025-12-04T20:16:57.311Z", + "postProcessHash": "9da1c3df2f732f3e3995bba33e5452580d7e993ebb072b6a9f2d08872eed80f3" } } }, "d8cbf85de396e8d762bfdc573d415e4482bb687b9017d25d153c264728283316": { "62c5c6e1debf8e9f65330683895c791394dfa2b8f1cab9a3413558667b58ec1c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.575Z" + "updatedAt": "2025-12-04T20:16:57.305Z", + "postProcessHash": "2252db4dfc3354fb1a7dd1bdf301fb2457479a789de100b8aa96e76dbef61dbc" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.574Z" + "updatedAt": "2025-12-04T20:16:57.304Z", + "postProcessHash": "349b791ee8271e741426e4d3a9f83ed6dee72ba5563412acf1b3d7ffa9853d1e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.579Z" + "updatedAt": "2025-12-04T20:16:57.310Z", + "postProcessHash": "70e589e843a5e37904ad0c2668f382fda0cdd8663dad7d6e018049f8965b4076" } } }, @@ -12566,31 +15522,51 @@ "ru": { "updatedAt": "2025-12-02T22:57:44.849Z" } + }, + "b81c1ca4160f512d76aa66aea31b1a00513a05510d0c7431d4387b7b7e5a851a": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.184Z", + "postProcessHash": "c023dd5b5d6567253396c86c788ea1c65d194b9bbec87c30cfc8b44fe108bfd2" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.184Z", + "postProcessHash": "223d8530b3a6a601d0aa40258e26fd75a14071446877330b3d7b00952a9b87ab" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.187Z", + "postProcessHash": "47d703b015a424176f3507ada6b94dd6e75b67cd449b639d1218486bcf5a9c77" + } } }, "f6c796e2a223d7f3109d7a0e087b02b576111cee44e1affe20a492544e19a35d": { "5c1b2453bc509571ef5a9c5a79343853e690b58e16dd273eb4fedb719f0aabd8": { "jp": { - "updatedAt": "2025-12-02T22:57:53.532Z" + "updatedAt": "2025-12-04T20:16:57.295Z", + "postProcessHash": "2bfcde4c178caaa41ee9d92cea889e42306134e9e9eae2ec9255ac17f17b9946" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.531Z" + "updatedAt": "2025-12-04T20:16:57.294Z", + "postProcessHash": "b32914dfbe7d3b083561d734bddad72bcc459fadf5b258dd9ca5abad3c43b7c4" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.530Z" + "updatedAt": "2025-12-04T20:16:57.294Z", + "postProcessHash": "362567edef24d913adc2530aa71113919db3764891250038124349e7b091953a" } } }, "063fed20736d4477e2229fcccb85e98116869443c6a55d44a629fcdf41af8097": { "7f5388455b7501e345c1421c779eb60c54a09041e604f2ab4fe8d4e90d30f442": { "jp": { - "updatedAt": "2025-12-02T22:57:28.608Z" + "updatedAt": "2025-12-04T20:16:57.346Z", + "postProcessHash": "eaa2b42206addda6ed7ae93ff880938cc9f48f9a1676073f4cf91debdf12b027" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.608Z" + "updatedAt": "2025-12-04T20:16:57.346Z", + "postProcessHash": "fcada88a349233c3bad0d87086ca14f6a52e1ec877e6172608a6a8d16c380f0b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.606Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "9021d3c4f248ba3bf4df279dbd5b6ec0446fef84e3a654bacb0745a398a53b8b" } }, "3cf60c4c63f78ca99a909e11fdd7b9ea46873225acbb7444b8eb966b6e9c4838": { @@ -12619,26 +15595,32 @@ }, "7676a41c6d1e719ba8b13b8d322ace741b11f0fe672d3b38397d5e1d23081fd0": { "zh": { - "updatedAt": "2025-12-02T22:57:28.612Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "1d447f51f2f16b63cc6084bd0ecf680602884c7400071565a098bc5a6ab71204" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.612Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "24662cac1f3f521f5dd2ec8b21f2f015925edd343119d56de4730a0ead5f1c57" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.612Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "d103cd2525c13cd38cd8497448b754272bf9380b8a793841d3186ec7965aa937" } } }, "18909e78fb8faac8cc03a75ea4cd8dd121bcb0a77585782c21cff69accbaf023": { "a2e6924b47ebd77edde457f769848cef9b8dad9baaa33f85fdaea23f6bf1d821": { "zh": { - "updatedAt": "2025-12-02T22:57:28.618Z" + "updatedAt": "2025-12-04T20:16:57.326Z", + "postProcessHash": "9a9628d342e8f6dc2dbe7a3a800080b1bb6c5d9a0547e1293c489adb2d090c1b" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.622Z" + "updatedAt": "2025-12-04T20:16:57.328Z", + "postProcessHash": "a8397ac4f0be546d168971e8665dea9fabc5aabafcdade39a4feed2fd5874324" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.616Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "d0bb911adc260c897a35412b423dce88479a8d0187c98e045b8b822802ad64be" } }, "4e4923ee24e6317511ddbea23c7b6f8e03c0277f9d6ac0d56eb56dd0caae3746": { @@ -12656,13 +15638,16 @@ "1c4c51a336d1e6dee310539258abd450be7834df46548255e22fae5d3686a247": { "e554f5f0de4eb19dd4293e28213211802e65c6533c3bb4dcd80a8d90c76744fd": { "jp": { - "updatedAt": "2025-12-02T22:57:28.597Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "286c16d34499436aa693a0ddee010b42ba4e113eb3c01b2f7d58ebbd8b1238b2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.605Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "579a2017db5d0020a032fc16d80079353d49e159d028a5dabe776fbfd5338595" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.603Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "cd32f6b84a7796b8baf021a51bf339722643bf029d84d2becbde4d5ae3577f20" } }, "5c602e02407de45f557f89046cb7b30682c56eaedacab7a3bfc0f1d208a1813f": { @@ -12680,52 +15665,64 @@ "39df2af9870d3b0cc9ef00711b97902ed3b5f6df0419aadf0841770290785d7b": { "a18203de1411607a70e1437450eccbf17a073e8daa45c5c42ee8e0cba812d5f3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.572Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "3bbb7c5646b1c313a0b2367ab05c903288e50ac49f7f9deb71f7c01520ddb224" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.567Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "86fbb14caf7aadb8d8a2818917678146af7931b697670fbdce4f9d1b7a218a99" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.506Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "72159bce6a7dabffc4c6faa94b55c651524ca392f6ec5c0aaa8b350ebd514991" } } }, "40220941c00a4eef0a2069b02906e525beca179d4a354e0a2e5a911c363640b5": { "989d53822380f38745d79c1b84562bfb045e678799d0f7110947e9bf5d599700": { "jp": { - "updatedAt": "2025-12-02T22:57:28.573Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "2082266e455a6d48c347970fda672a150958563147b5e43bf527526f0976e557" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.565Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "5caeea8e13e35337bf86babfa9f5a350f16fab4124cf08f6b4fde5f92b9ba661" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.570Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "37b44bce42e61a8d7d7a9332c88f90b4c23dd01f8235d3d3f853605ecdae5886" } } }, "505cd1f1060fe51777563a177f877b84419bab382656d36901ea1615cd4c5f44": { "0a35a92e535e80b3a150fd73abbc1751ae0fa2688543577feac7ce7f4de53ae8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.599Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "fc96c2b38f72f2b3449e782982f40cd3eae0c0ab289e27cd5f7c7229ce10572e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.589Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "ba5d5828317b239e5f587fa322418377292531f8ff57472751dd9ca075336a42" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.593Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "7064dc165b41d5f55589467cf9e7682d9687e1d1e96dab5ba2ac6972b16049e0" } } }, "67e57dc33f46fa357a2eb4f340d87b17225b6a975701da70873323aa80e25052": { "e431ffa5cc74af67d9f087cdd47582b18d0fa8c78bb4467cdddf3350e7c9f2b8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.584Z" + "updatedAt": "2025-12-04T20:16:57.311Z", + "postProcessHash": "4bbfe75a58a9a094e4afe17a67997cd41dd863b56c366e909278e821bc0fefae" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.583Z" + "updatedAt": "2025-12-04T20:16:57.311Z", + "postProcessHash": "f2c89a8deb708462dcc86022c87e4a3921d196635920e81fafa5cf3a3a743df1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.584Z" + "updatedAt": "2025-12-04T20:16:57.311Z", + "postProcessHash": "dd01575ef0d018272e40eada9c99645d47339060fdeb365da8be4642328e5d6b" } }, "4b25da1f59890cfa5a986a65cb021896d1be71d0919069a68ca0edb32d4bcb78": { @@ -12743,13 +15740,16 @@ "6d56ddb9a5b3ccdf4eae29f57959e9374f0ff177ac9800e0d460527344dc64a0": { "0828f5e8f879eaff39ae8686b298e753cdeacad3c2b4543b71425525219f9c9d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.601Z" + "updatedAt": "2025-12-04T20:16:57.309Z", + "postProcessHash": "6eb58a1dbaff7d05bf5ba525de6a925053f253fb92efd106d6cb7b1c8b784c7a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.579Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "a437b785c5c3eff3540f0be62d6deee48880dba3f640158835ee2b5f442fb29f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.578Z" + "updatedAt": "2025-12-04T20:16:57.309Z", + "postProcessHash": "81e8b5475f03bffe605a99f0c0690c886226e0decab9f1077ef8df0c3fd5b6ce" } }, "eed3064741cb620f55aca288e3255646f75384bcfd7a8d5d63104f69d26df546": { @@ -12767,13 +15767,16 @@ "839030474f427a460a6acfb9a8caa7662e1cd0c337e35995054bd2c956ad05d2": { "706368d544074057b5fc0c6009711a33093a9475013e238b17fc5efaa65612d1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.572Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "9c97c196dc45ecf9fa55e6869537f8c510762e56c80e711f03dd412295366c1a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.573Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "67b8e340c665026a24c580b7982ce697638efb626d0c2b3668b3755abf8b3754" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.576Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "868861b6480309268a332e8a4d81284cf6e37f3b30970494e91deef98da56de2" } }, "06b19ed602eff6e0f4c0b38b69d416263621f402f6591d9f78725c9fb8213249": { @@ -12791,13 +15794,16 @@ "90511d719daa226bb864d0d2bb0fb993971dffcc30b3fda0d86ebc7ff7157a9f": { "888beb35b0eff4785517b52bba7c01e8651e6b39b235dcf0f4d1b64d155f5311": { "jp": { - "updatedAt": "2025-12-02T22:57:28.569Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "c9b80db8eaa6f8f4ed9f8caadc4306719738e0d1e89ee1753f562bbbfb2e9b8e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.567Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "5f9112578336a20fbb7b3ce7324b3a9313ed74178b8e520b33f9687b268a28f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.596Z" + "updatedAt": "2025-12-04T20:16:57.305Z", + "postProcessHash": "51bc3243fbc66b24886f9c4b5095800777a8ec8a83556c10de526244e57ff0da" } }, "1921b13e0a9667a216009da681a38ff65bb6e8e5e69fad4427f31e0dec85b1b7": { @@ -12815,26 +15821,32 @@ "a0e5cd4bbd52095f645996d5a20cc34d462aed2b014ca882138e4ede52f7b410": { "b82f6c4650551ebe5f3c0e03e15ad59d0e9d79edf78e121c65d4de264d1e000e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.570Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "351078a5538a9f6d285579f25a5fae91dd088c6353d1e46d3545616a0cde1ce7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.571Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "7af5dfa84bd1a3b103a94ed818d28d9337e89689410cfac51b8d94c264146d1f" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.506Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "e0570f566f1f50b92950d535c27450043b7bb8302d6bbcde7e9b145e0ecd04a0" } } }, "a65902363effe5836b0d9b1a3304f47945b1560915ce6fb45c560e678eb9a539": { "559af2b30070d162a884137e78494631c1fb2dc0f934eb9c73be1908b21f1a29": { "jp": { - "updatedAt": "2025-12-02T22:57:28.579Z" + "updatedAt": "2025-12-04T20:16:57.310Z", + "postProcessHash": "0cdf2a1059f45ae55415862aeba44b5e47925a607e4d77b012f5486531cc4ce4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.574Z" + "updatedAt": "2025-12-04T20:16:57.304Z", + "postProcessHash": "e0b1b777bdf85bf3fcbf38f505ede90154215315a38144ec37db37d9be3c1de0" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.577Z" + "updatedAt": "2025-12-04T20:16:57.309Z", + "postProcessHash": "b2c7a87f4087af51695b0055a8903a52792cae36dd0eb232877c7ed54b9971de" } }, "5dd603d0ea09ab4c18610adfac733616566b9465cbd159ab38037b65cf3ef036": { @@ -12852,13 +15864,16 @@ "b52e68b0fa137214aee6134202f0428952a2f49c83cef796e483c36598106cd9": { "804c074882c5fb62e56fb916010e71aa86f56b3a61ac2796d9e2b882c4043025": { "jp": { - "updatedAt": "2025-12-02T22:57:28.595Z" + "updatedAt": "2025-12-04T20:16:57.304Z", + "postProcessHash": "818fa8b5d240dc80887c8349e6e3845bb76058570e5be764cd801e19c9c65cff" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.602Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "66db6be77d7cbb3817249eafac2734b9261ebb2ff1aeda609243e85d438e9bf9" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.590Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "e92899b6039da3f5253b3ed7cc11394438828ed1de16cf709ea21ecbf7bf3388" } }, "6a2d3b6f6eef53b77da900c4d9383e4c650a1b67df3b4bffcf8c1c982c61e7b0": { @@ -12876,26 +15891,32 @@ "bbbf8ab907626ae0bd4c2e7f8f1e1a30e187356616b813a7f2bafdcb968b16e9": { "64de149ea6c99450b1d0a61247789522cc099815c912ed33f53b378aaf837bbb": { "jp": { - "updatedAt": "2025-12-02T22:57:28.576Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "c5e855930f492d837f72b3798e758cbd66b97ae48f27cc8069748399c06082f9" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.572Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "5f8cbe0541de96e1dc1605797dccf4713a5e2dbea234fb1df77d9c34dcb60122" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.573Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "050be624539858c46962c96137072950834fdcbae08498139e6ed17ecddca4d9" } } }, "bcc86da4edd4f06169f90746ecfdbbcb5c15d530af9bb41d9716c0c160095c27": { "84ddeeec966f0e2dfd116753b726d6198217ab15321a4c0ae1b82e6a8f3e1e66": { "jp": { - "updatedAt": "2025-12-02T22:57:28.565Z" + "updatedAt": "2025-12-04T20:16:57.285Z", + "postProcessHash": "2aa69280cfb0677d90703141f55f84a615224c3a0026750f0fc94d0b54642d5d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.599Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "ea45e02a54de9f6813b22f7a95b70089a788d591c03afcacb39d09a65ddaec3b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.593Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "d60f2bfdbd4e141c75ca6b3ed047c27426561db76c9d0be5cf428f82d131dcde" } }, "805a3c63750885bfc49f7e57abe2e015684ddc6eb6b23a0704a589da9585ba31": { @@ -12913,13 +15934,16 @@ "be5a795a34e525ece1f0651b8ec65280cd3f71026a239b44cb087800474d6992": { "1f1def9199237717c6e401f064f97475739dc42921597c57ebe5a793493c5a97": { "jp": { - "updatedAt": "2025-12-02T22:57:28.602Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "d66d3d33b10c080bd3677908c49e8eb91801f2f349e5148a82cfcc41e8ec78d3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.600Z" + "updatedAt": "2025-12-04T20:16:57.308Z", + "postProcessHash": "5b3b89832275f7e11cc13ba35e9588b1171d1e0c08a5a319623aec4ecb87bca8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.598Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "f37f4b5df617bf45b1c7a81b54fd0877b33e1aa1f63bbf839b382dbe53e8b67e" } }, "ab85b9c6ab0099af15f4a6be42857b06c4aac25bf43a4a5260304fb4ff3e6f6e": { @@ -12937,13 +15961,16 @@ "c3c4a5cfc613b8b144029f13d913022c2d41ebc3c333e2fa61ed8d2f0df5a81b": { "e66af8a0cfcf657334fb0dec46bb2c8ae44a17ff27be743255dec8ebd2d523dd": { "jp": { - "updatedAt": "2025-12-02T22:57:28.607Z" + "updatedAt": "2025-12-04T20:16:57.335Z", + "postProcessHash": "4b4746ed04b10d02094cd254d0cfae8548d7af8648413ab5655e62af5f080f1a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.607Z" + "updatedAt": "2025-12-04T20:16:57.335Z", + "postProcessHash": "7b9754ed156c08b7b2f0b0a7ab299d93790b9e2e7c27009fd53c974476edf274" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.606Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "a174619288f6c43c4957dbdb012c3fdd5d39bb9980a3bf7a5a6eb32531b6ca65" } }, "f9910c6a76a99ff49c7ffee4a3987ae9207e8d7db851b61ec2efe4f6c5c50886": { @@ -12961,13 +15988,16 @@ "d559f4bb7e0e75b052f6989f63565615397e09d8f05bc7535ae634a02281b78a": { "41bad05d87569563af64a625d8b65a4688fa0a40d7e5a6a9a09b635d45ce52a7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.583Z" + "updatedAt": "2025-12-04T20:16:57.335Z", + "postProcessHash": "3fdd00612e70e3cae7a1e39d26c5b03ee9a92b5e2ceb932c72b8c0f7c033429b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.582Z" + "updatedAt": "2025-12-04T20:16:57.311Z", + "postProcessHash": "5dfe755084c092d26a425feef43bb43801a946c6713e6c42ac8f6444b747d795" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.580Z" + "updatedAt": "2025-12-04T20:16:57.310Z", + "postProcessHash": "2d729e4c9d03043495c928b2f343404715f4dc36091cb098acd96a8a27691bcf" } }, "fdd897a9063250be652162e225953c203c05911a364b1cf87f47fa2b3ad6b297": { @@ -12985,39 +16015,48 @@ "e54eba7f7c2e2d6d452b2d73f4934f9ba018e180585b2bbdb2f9f14bb9b5510d": { "d88ed4dda50a3c9ee265b067c0abda94e3cba629d2d6c9a695d77d254c4cd372": { "jp": { - "updatedAt": "2025-12-02T22:57:53.506Z" + "updatedAt": "2025-12-04T20:16:57.280Z", + "postProcessHash": "cb3906b287649ad685240065e10ff08b549198661923fc9c00d343d61e7acb5b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.566Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "08e63e299dfd1eb81a8949cdc5c84ba8992dd13efef6e23cf22f27e03764013c" }, "zh": { - "updatedAt": "2025-12-02T22:57:53.540Z" + "updatedAt": "2025-12-04T20:16:57.283Z", + "postProcessHash": "051c6ba75ecb0214b36e6bbb1a617cf84820b61f2add4868164ca65f9a8d1f99" } } }, "f871545252cead274f81eec090f4a37c79aad733b302ff49eedc5242ba29b1cb": { "5ee24061522cb5a7ed68e5bfa59c658c0cb620eff70e3736f5e3800597533e77": { "jp": { - "updatedAt": "2025-12-02T22:57:53.539Z" + "updatedAt": "2025-12-04T20:16:57.281Z", + "postProcessHash": "97fa1940afda204bdfae7ef0698932fb1f20b77b3d6fb6646fa9497a9809f938" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.589Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "939fc5a4ad8e6732892b5f5dcf196d5b3fce6ba93fe2421ee64635734622149b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.589Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "efd0113977183b8a85d939d6e198e8e58912431201e2fad5d94295dabc29f316" } } }, "faffe74d9a382667c3bae357941f229fd75f36539ad2c510a09ec0603f9a2fbe": { "6b948fe473605eff70a23fa450d8b860449256191d68e3b6b1702eb2624fdd18": { "jp": { - "updatedAt": "2025-12-02T22:57:28.575Z" + "updatedAt": "2025-12-04T20:16:57.305Z", + "postProcessHash": "fce038ac7eb9a9aea16ee3f0df901d09ccc0fd593f417cba290813b2ce9f95f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.568Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "5ca99c96764a95d0521b8cd6739cfd4b307ca883c1f9a78b8a30a951b7be8691" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.570Z" + "updatedAt": "2025-12-04T20:16:57.299Z", + "postProcessHash": "5d303650a888e4ab0fa6aab3d5ffeecc10fb4e0fb75f0be41fa1964f7542915f" } }, "295554629ad06cadfbea57e08411547a44046d88bbc5cb20b34c13534fca808f": { @@ -13035,52 +16074,64 @@ "00f0f8e4c4cba686bdd32c7eb510c5ff9cf2847654153d708f69ef3d1fae55b2": { "4cdabdb9af849dd79c526565751107e9b1abf0b12889130ad0f45424328feb65": { "jp": { - "updatedAt": "2025-12-02T22:57:28.636Z" + "updatedAt": "2025-12-04T20:16:57.323Z", + "postProcessHash": "1d8f17e0cac25caf1c0fd5c0370b14b24110ea31dfb59586ea960ef62d989832" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.004Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "377925c9622a1c4cbbb5772d566fe5ac2ead1d0b8389bbac225a1fec2af160ff" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.001Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "137d9ed60942f9e09678b18d11c2cf0d0aacec4ab7b7ffbc00710b4453d361b9" } } }, "0819a7d3c5f71c0454ca26bc207870bf57571e75b815f9e6048c755eba88da5b": { "7c183351205668c7bd2a340b5ce1c7a91fbae1b7555a939a4d8e6611fda87e09": { "jp": { - "updatedAt": "2025-12-02T22:57:13.008Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "3e53250002a908508ba5bfd86b8a9ee2d3f4bda26965ef6075c0213a21787f10" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.004Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "7c340f50d95a1d207492ace22af4f5fdf05bc8d36ce267e825b8dadc94e19945" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.635Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "e68a92500a2bae965d138389a01743d8079b3a53c42959de8ffc79007fed9ad0" } } }, "0e624ceaf217ed28aa49746f8a0d8e6f11f50144de84c79c5bfc3cee61b7f1a3": { "2c646c9eed127c879e1e79d90542ee56c28b87e87984ce2e15248bed89ca7aa7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.636Z" + "updatedAt": "2025-12-04T20:16:57.323Z", + "postProcessHash": "76364bd2517931a7a74ac987b1cf2738c8bcbafac975d32d17be16f77e0c6c8e" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.999Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "a897092c812aae90c34411e0bdb591a819ab56246b4a32160c8322e9cb45b5dd" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.006Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "def4e4e96d601aa5e530a0ecf147afc5c2e13b889b6dcc5b52c7e886a890a6c1" } } }, "224c18d31f8261a76b7a8b634ae516b4db00ea56c8386a0105219e27c840b99a": { "50eb9caa6eab918f879445fa3068f5225b4c5a8c96b081ac18c8bcad6ac27df7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.626Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "b4e35146f8a449507e0f86cbceef0422ed1250d4c8b994feebb1dc0fc866cc9e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.625Z" + "updatedAt": "2025-12-04T20:16:57.331Z", + "postProcessHash": "0bdc36c98a8278ae81cf854218b431439b21255a23afaea6dbc92dba4901f27c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.622Z" + "updatedAt": "2025-12-04T20:16:57.328Z", + "postProcessHash": "cfc8d50fb0dfd8eb1fb0e85cd465d9ce72aa4bd03941be886681eb2fb1542e75" } }, "ee5e9f3162a1197524aca6e97cf739a11ca65612be1b597e70bf103f7727993c": { @@ -13098,26 +16149,32 @@ "2395cf7e448505fe5dff52c83b83b0eb98f08d6b30b33dff50d6380fa7e5932f": { "773ced00aebc468e3a46c4cc78b523aab8880ec08d2fdf077d970783ea2663cf": { "jp": { - "updatedAt": "2025-12-02T22:57:28.624Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "c49850125fff8ee458e8f0de4f26d446c79789229afed8976e76cb416cf0ea87" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.613Z" + "updatedAt": "2025-12-04T20:16:57.321Z", + "postProcessHash": "45c1c37276d5c33a5f5b3d6b7adc05b35510be8963cfc596a65ad38d4a373f57" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.617Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "48b20004d870affe90e46b6c2dd910d51d8ca94b6c6f348d5aa9bfa118fc4b05" } } }, "4267fd6d09b46579b7a05881566bd86bc360158a24ce3eb410c8492cdc79fb22": { "8ac15a7c8118a1f822e5766749040b60de68b96aa79518290966e5ca1c45e8d2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.606Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "c366cf98666f63abfcc503326fcb195d5f1aa6278e013dff18be4fd9ffc14aff" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.605Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "2997f04e7e25d4995a728ab6a4b82077c83dbc186b07c5d120be889f94171ca9" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.606Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "ac1cb46371140d2456f213d4d641429e1bf92943cdcd5d9fcb4d059c5256c9ce" } }, "b0b56f3e8d31dcc0efeac6d2feb5c7674647f73163b6e6bc288532a7e63ee696": { @@ -13135,13 +16192,16 @@ "56433df9b9399e37671c12717a7e397ab2aec3e086e226fcf8bb3a338e336f38": { "899571967dfce1a8941dff3771b1f23612d934928bb1aef923cfe5bf35044d6d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.622Z" + "updatedAt": "2025-12-04T20:16:57.328Z", + "postProcessHash": "bd379b166fc7006f740992c64b23f90eb06403a72369dafc7fc7d3046ffa91c2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.609Z" + "updatedAt": "2025-12-04T20:16:57.296Z", + "postProcessHash": "23a3b084560a0f780be6d8859b515f2e1affc8a675ae74e6659af024a9c12a3a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.614Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "d56aefd816fdd7b0eab0fc11d9979290982a331c22f971264cf0ada5297a8876" } }, "01b3c2c46b1f5b875a5d0c20393042830caf8d92a7c7820943fa80463f760cdd": { @@ -13159,13 +16219,16 @@ "7b92c9515ab243345c2edd443a9f36e432abeb01df31d0d197db37f7733b65f1": { "5ee9cf90ed14f1a1e4f53532e4ddf568dfdf018045fcb0f4857e0860a4f59d17": { "jp": { - "updatedAt": "2025-12-02T22:57:28.603Z" + "updatedAt": "2025-12-04T20:16:57.331Z", + "postProcessHash": "161bcac18157e7016c1d54ff844edc6315fd2290d232511d44bb1ce170e372b5" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.601Z" + "updatedAt": "2025-12-04T20:16:57.308Z", + "postProcessHash": "535394d1290b9eed0e08b378daf85feefcb8bf0f956a273a2bc0fcc32f9f2676" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.604Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "310ac316a791e9d418086b595bc3147dc80c881b2490d7713532656df473cfae" } }, "d2a0556b2ff529a2a6bc6f0f1474e276bb5cf693229f1efb4d5f047dca2bba21": { @@ -13183,13 +16246,16 @@ "8150184b8463d89e5a92277a564104c399220d435ffb6ec7e6d2560672bb49d6": { "65ef97770913247f917baecc24d78605da1a7fe09e65d74352bcc872ecee75f3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.603Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "92c9a6b4a7e010f86443edb05cde0f6ba899ce91c2b558843a1d1a5599988f52" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.599Z" + "updatedAt": "2025-12-04T20:16:57.328Z", + "postProcessHash": "83752fc424e879d7ea5539b529314ea814bd9fd9bdf67fd1433d9ad1d178065d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.598Z" + "updatedAt": "2025-12-04T20:16:57.328Z", + "postProcessHash": "8582217a8e01ee1ffa96b1f4dbfb64d0bbc5640030ebbc75079920b1493281ac" } }, "6156562270e5d113b2b835c17a4908c76e95978956ef4a57eaa61e1eed78520e": { @@ -13207,13 +16273,16 @@ "8af19e1098601767cbf89d205cfc0d3cd2c79ba5ae84fa11d9cea6cc91850951": { "1ea6cb562e40b65d31dc3a004ea35849a51555792b4b7af61deb33ec7dcef5a2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.621Z" + "updatedAt": "2025-12-04T20:16:57.327Z", + "postProcessHash": "d2bdd617c67f6e047b9d8a6df5254f8097312dcf4172407144e09bd1e2ed87b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.623Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "4f88fc8506e03f3d67d8c61bf8fdc831ea8ae1c9ac4e9a80508c79029ccf91f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.623Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "fa249089ef796bdaacd00ecc2a1bb51cca8df31e75cfeca32c2a6f2c72ba7889" } }, "53c40fa63ea8e3ce50b35d7c9ab69f8ef252980df0deba29ae32d97edd799b2e": { @@ -13231,26 +16300,32 @@ "8fad6511e155deebc0c7b7055ddf993b7213668bd651d77b46f4fef11c363990": { "00a2be5a931770b44b5dabd0013f35d169228fbee45d460fc63c58245bf78264": { "jp": { - "updatedAt": "2025-12-02T22:57:28.594Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "7a9621f3b4e8a8ffbe96ba5464fe444683347f2ae00f3d0b688639c87f4aa0f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.592Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "d2b05f7c0d65da681eea171c5c4956b1a7c5fb6f7ee665e58b6afad06e39d39a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.590Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "3f0e8016cfe2a85865be2a746b1482dd76a2239fd9394b925a7221ae9e63c7b1" } } }, "9c4e28b8ca9495afc149856472ed437de0b32bd3bd6c56ac4a4d4e1b43fe6351": { "fac18639f9943a62c5d07663c6a681ee610f19e3b716dd3c1889c591380616e7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.596Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "58b569dba3d014cedd2df4d5d39a348da84fce12c256a227aabd07a5edb33817" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.602Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "648df7d5555e5874f5757e96751cc39ef2af1a21f1480b6829ec5fe12171735d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.596Z" + "updatedAt": "2025-12-04T20:16:57.305Z", + "postProcessHash": "389058faec7edec8862e8164ba94b80939e9868d11b2c0331710ac7e693879a7" } }, "3076a78d7676e145757a5c63a2dae4d7c0c748942ad74b8e147613b5ae9c6a2f": { @@ -13268,26 +16343,32 @@ "9fd477532adc3dadf2dfed8071d354140eb7b667bd012aceca5476a9b5aeb7f1": { "cc0409c62d9e4b650b3ab8a4a2c2ea56b508c8a34ed0235cccc67f60cb557c17": { "jp": { - "updatedAt": "2025-12-02T22:57:28.591Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "795811d806fd285a9c28b4f7f6543ac1c43c30d3e35570371977dad1a0379382" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.594Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "657bb54c5e6324ec9d266a12b27863d1aac5bce2a1bf04dca26dcbada28a1cc7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.597Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "1b07e99d1640c3ed4002128f18be7436f5f8bab510fdc88fb0769d02a374d292" } } }, "a6ae8bc3564075f789b35e0d49da8a62594a8bb06f9690bba789ef78cfc292e9": { "1bf52ed247c8937e8664e15b1e9b1bcd0e5ac293d7ee7d115a60dc712f220e80": { "jp": { - "updatedAt": "2025-12-02T22:57:28.597Z" + "updatedAt": "2025-12-04T20:16:57.306Z", + "postProcessHash": "ec91e4bab186e82dc52ba5563d738cfac173d730298d7ab68c0c278e3ea46c73" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.595Z" + "updatedAt": "2025-12-04T20:16:57.304Z", + "postProcessHash": "a992ba42669168eeca7813d412f3efa67e8c9b5cff927b784f366f4de0c0e64a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.598Z" + "updatedAt": "2025-12-04T20:16:57.307Z", + "postProcessHash": "5b4ed66ab55b53826e2d247641da956d91fb8747c6d7c59308354d22872e6c8b" } }, "c338f65f8d100cd47e3a31d6f9e78ba015c1346b791cfa3ff6677795952a0807": { @@ -13305,39 +16386,48 @@ "b24da7e78415a317d4fd792bce74b8acf47ca7b376eb80c5d2a81e9b874b5ec9": { "1b40db05914f87442600e04da552a114b9d6566703fff238531bf2dce4b3fb81": { "jp": { - "updatedAt": "2025-12-02T22:57:28.616Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "0a5adcab2242c263ce324b570718ecbc305b0401480411587706453341a363b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.617Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "1b6794ee7249b73f13435d9b86be2a71cc8363c287c58aa727fd089b938e2220" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.614Z" + "updatedAt": "2025-12-04T20:16:57.323Z", + "postProcessHash": "19c2359ec99db4e2a45dad399cffa393e9d1be86afead98aee821b60eb0ef188" } } }, "bd066e14efb9c286ea6f6324b04ea5e37363afb94dde1cda3efc2008e77fe6c2": { "ac1b069ca0882ed4666acf6095038e0b7cb288b8596cbf3b1ce1e54a9df05e43": { "jp": { - "updatedAt": "2025-12-02T22:57:28.592Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "4c4daef5f4db94d5ba3dfb3342a432d6126d936c6b24aad299abf8d454f59253" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.591Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "cd4c4a9dfad3fb0b15b19a4936f9b9d3c350bf3ee6672cd306ac3b2ee74dea43" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.604Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "2b116109b3b219cc6ba45dc1b4093ddff54f1d36d670c246283cf4b443f4ecad" } } }, "bf91c67796c6e32777f0840f1d8c0dac89f35de0d14c1139dd6dbf40f832fc76": { "e06158989a858c27f8bd73bda291193cf471adc5e5089c7af3495e699b7133a8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.590Z" + "updatedAt": "2025-12-04T20:16:57.300Z", + "postProcessHash": "297e98ea3aad68e9ffbccc0bf7b02257427f6c4ff887359eeb3c67e1b0ea5e6e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.593Z" + "updatedAt": "2025-12-04T20:16:57.303Z", + "postProcessHash": "664a404026abb06703cd561aa2718ad3784397a0b483ed8e10d1c1cc0838b285" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.595Z" + "updatedAt": "2025-12-04T20:16:57.304Z", + "postProcessHash": "c97127d0e2c57a240db3ce4bdae420f78a04a8cd0e02e996609158ed5b232007" } }, "c648cd50d62fa017bf9f75cb6b83f2c181571125b4792b76c86198da57d6b234": { @@ -13355,39 +16445,48 @@ "ccb6f7b23e140ff82e19fc0391ef805c0f15507170cf5f60a78b0ea7f7bcf295": { "7b7eb66a4c1f465cbb23aa2d3f377abddba9aaa6d13866786810216306d2eb6e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.592Z" + "updatedAt": "2025-12-04T20:16:57.302Z", + "postProcessHash": "1581f26ce67088f94dd7ad8c4aa97df6558268af1f7e3fe67f6d9b7e0da0dde3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.567Z" + "updatedAt": "2025-12-04T20:16:57.298Z", + "postProcessHash": "5248910e7f231cf89e4fa66478d436cb6cb245d90a48485eb1ddf1e004f32c1a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.591Z" + "updatedAt": "2025-12-04T20:16:57.301Z", + "postProcessHash": "184615b9cafb1ae587bacf076cbf14ef80f2781d19660d9337860b4329913a58" } } }, "d79bc535529875a738bd248165a718dae8d93446b748ae71439f9b822c83972c": { "1a78ff0ba0c6860dc7ce6357e1df29d3b791afd1f3ea81e2713f99d9dd8d0199": { "jp": { - "updatedAt": "2025-12-02T22:57:28.614Z" + "updatedAt": "2025-12-04T20:16:57.321Z", + "postProcessHash": "a2d7f5974b6f454ac11680a0bd63edf78ba173f1a97f889a136c1d2e83b28a44" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.619Z" + "updatedAt": "2025-12-04T20:16:57.327Z", + "postProcessHash": "daf68967a813f6644d6e2ed1d1363a0eea5c6b9bf84953ca583ad91f17ee721e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.615Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "5861a3e5141fa80c3580faa362da408f3c92042386dd8af88a9423090167520f" } } }, "ddfce8a030ea9a41c75ffaf59f53e35d31dae419777bf895033c3bb59f724f4f": { "208b0171b9edca76f341fabf5a8721e36aa0acf7bc2a179c676bc7f832d4decf": { "jp": { - "updatedAt": "2025-12-02T22:57:28.625Z" + "updatedAt": "2025-12-04T20:16:57.331Z", + "postProcessHash": "357e8595e939e31858b9f811586cad754a77c7cdca0a1a6c9af41affe501f19f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.626Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "9157b73a252cb4140ce2e2eee4f438d01f02b5f1ede2aed8797dd98acb13effc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.621Z" + "updatedAt": "2025-12-04T20:16:57.327Z", + "postProcessHash": "fc92b9ba10dd79e16cb89a683c3085d94b207593160f487dc07df134243d4744" } }, "703c345dcb09b3d9c115ac6d960bc44df5ebbd21bde9ddaeb3fae08a63b1749a": { @@ -13405,13 +16504,16 @@ "f181f03d87970ee159e60beef4cf41dfdb497fd8d950cab4164f13908b4a893c": { "7faa2cfd4739cd08c7746e2c9d5cf78d1e43b82946d2327a789aa6199df45737": { "jp": { - "updatedAt": "2025-12-02T22:57:28.618Z" + "updatedAt": "2025-12-04T20:16:57.326Z", + "postProcessHash": "602b1466c243d91c9d06ec5d0c0e3a9c877613ca5c2482a39999a77787b5f6ae" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.627Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "9a1f14dca77ed7e578f38c48f48b6a383bd39b4aec0dbf0fd60eabc925cbb296" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.626Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "7558b4f8026ea47d8185df84c2f058c0ae9eeadeff6df54f3550b138e69c0e3d" } }, "01426a0b27a8046b6721227a23a347197804e15d2e71c528d46080c264354921": { @@ -13429,13 +16531,16 @@ "0d57e95520df30873578c0c59ade72141faf51c3e622951bb9026968b4b2a96f": { "7431e15bce3d11f80a9c93dd6b0ab34918f7076002de3ecf334f6d12092d564c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.609Z" + "updatedAt": "2025-12-04T20:16:57.319Z", + "postProcessHash": "346503cd98cb08839b02324043bc108b686e80fe75bbdef3c6dc2b34e83bf9cd" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.005Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "a86b66972b7ab41321fc046a8a42b697c9c3096e4fb94b66f2d3cb5b576d6771" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.007Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "ca93ad94377b7e2978d0b8d830b476ccb9af1c75375fc16683554df759d54940" } }, "5f3840a29e61dbec4841a38be6722a8b6d3bc7281cc7d9139a2e112a47d2f251": { @@ -13453,26 +16558,32 @@ "0f2ea76e0db5a6d5b78533ea69f6bf742d59e3c92cd69159341e1c7049a2aa97": { "9da14b2a7b04a5c4ff51174e32fb113e58f6e2c9b60265a9616f729614a2c9ba": { "jp": { - "updatedAt": "2025-12-02T22:57:13.010Z" + "updatedAt": "2025-12-04T20:16:57.362Z", + "postProcessHash": "ae56ba378e0fc4cd74cd7fc3e8ebaa9485d6d22b06f814418358b25e1232f508" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.006Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "2e966ee52dac97d0722a461c678867a698fd881e0a5287f079313c986211d2b8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.633Z" + "updatedAt": "2025-12-04T20:16:57.321Z", + "postProcessHash": "6cbf64ddba14d807ea74c79dbcd278c80c611fa7242e48b590a287878ff9779f" } } }, "10434de80b8f7631a41e8302a81a53ee7a3ba2207f6b8c0e029fb257d3df5290": { "f5fa789355a9b2e73239700688ffe0178dbe2df95ecb160944f05217ee8ad885": { "jp": { - "updatedAt": "2025-12-02T22:57:13.032Z" + "updatedAt": "2025-12-04T20:16:57.373Z", + "postProcessHash": "ec58a94ea222bccd3722e1d38c0087048f89d79e13ebd58f2d2448d5df5dd779" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.637Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "d53cdac6cc40c72ee1db4a2f45e9f98ceca360c1c3d5873c433a3e06f2ba3d84" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.638Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "2ccc2715cbec20718a7d0f36ce3519c5b2ac5f080fb3fe4413a3c4ed9e110249" } }, "b92611dd6a3b9c353cc2b1383d42601917df331aae94df51cb078400430f456b": { @@ -13490,13 +16601,16 @@ "11f2e3a49b018a860171016a699fa740752c02bc0aa8f5f79a0c57498338ec5e": { "9a24d918c600da936b99eeeae5ccbfc49470bf55f4b8fb491fe3cfe11f944857": { "jp": { - "updatedAt": "2025-12-02T22:57:13.016Z" + "updatedAt": "2025-12-04T20:16:57.377Z", + "postProcessHash": "e4366dce510cc4f48f0d00369e03c85adef4bfafe85989d2196dd532788d4368" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.015Z" + "updatedAt": "2025-12-04T20:16:57.376Z", + "postProcessHash": "6a465469d0d34f18f1eb8105d292a3f41535f05c48fbf4aa33662309d59ae45a" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.015Z" + "updatedAt": "2025-12-04T20:16:57.376Z", + "postProcessHash": "2fffe74e37a38312472d291a24aacab6175711851f3af240ab29002e3985099b" } }, "e5c755dea01abaf11bf73c6cfd13729ae4bfa33d43a0a52ea3e0173460d3b39d": { @@ -13514,13 +16628,16 @@ "259e682225d9b71ca3ea983216e57cd82c14b1caf25f00ea510ceadd3a70a0a7": { "e92b39158d271364bd947f52f560eff998ddd5a35107c0c0db7941eefd4f7424": { "jp": { - "updatedAt": "2025-12-02T22:57:28.627Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "2c89c66fd842068f81582782f55bf3db7be45f312e194edd1ecf24710a3c3cf6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.627Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "05a5cc22f9465e028cab2e7e34dcf83f63d324a763ea677cf20d28cef1efbcca" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.627Z" + "updatedAt": "2025-12-04T20:16:57.332Z", + "postProcessHash": "271c4a904089c31acc201d494a15da7ceaf4fe5e7fc5b7d5829a38fe7aa462da" } }, "e91d3b791b454b1a0ef05b57d40abdbf146ab1474ff1aeb70caabf9e4d32b816": { @@ -13538,39 +16655,48 @@ "3b9d54215d217a013fc4c62df11f26627fb8449a0489b74cc0f54e6b67f41ecc": { "f789cb25007915b6d83be12f4ecf35805e8a487063a7a59b47c497602ae41559": { "jp": { - "updatedAt": "2025-12-02T22:57:13.001Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "b93942550d4f13e4608e61c4919aff309878cec0c1f3b8531deae7b6aa1d0883" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.611Z" + "updatedAt": "2025-12-04T20:16:57.319Z", + "postProcessHash": "3bed2cbe08157f7e65720bd4f3d213707445a409074fe6e534276d2d247b5e17" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.635Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "dd6042b12bd38833372719ac100847e42452316c16514ca7774060fb1dfbca39" } } }, "45c1b7f8bb110c2b37f34cc31252826058699640eef30ff8486c08761af44c43": { "605cfdad7a54e1e2f7b6a9998f6bfa8f8ff7b6a25aaa39281d58591fed0758e5": { "jp": { - "updatedAt": "2025-12-02T22:57:28.613Z" + "updatedAt": "2025-12-04T20:16:57.321Z", + "postProcessHash": "fbf7e8253d0187de311233bddd5832411c03cd9064e4c0334022e4cb6341f709" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.616Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "b543df8a1170e4f9035430887993248daa94ef0ceec32433468a3f719cdabf62" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.615Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "6dd9f8078c3311a8a62a4ac2f43747ca728e659b147c423021a6d887ce8bc167" } } }, "4ebe189e35f19f43be4ad13fccce5f58bb2e700e31d412f381923ab97bb21792": { "11290a6b37b6e349be2a192bfb007c68cfed8458df5edab5bc48fab969b1ac0d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.631Z" + "updatedAt": "2025-12-04T20:16:57.376Z", + "postProcessHash": "f3aa7fb921a9bb6983e3ef3220919b8562cba6e543d9f210cc95c1f563be3050" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.630Z" + "updatedAt": "2025-12-04T20:16:57.375Z", + "postProcessHash": "159dd3d2341a402953f11c38a3f0c3dc82a5378a485b64a7b0883fd5e6cab857" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.631Z" + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "32e61b90a0a20690ec1ba918fc1c6f026f77f55a2c7d9f57c630f958d7c55157" } }, "2885a781a007a7ada8f0db66ad248161e6af984d5a6d09191834d6a2543db968": { @@ -13588,26 +16714,32 @@ "57f74a21cf2fbbfbe54dc4c14d4c397429d32d51ea09651cbcba81a78f831e03": { "9aff12963c1e1db4b1b461b751a4d72394a3a26138c1713efd31eb628aa3b7c1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.008Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "c38f2ec056ee4d641a31e2a3419d0f1655b3e1430a5bbb3702c23e038aa0e92d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.013Z" + "updatedAt": "2025-12-04T20:16:57.375Z", + "postProcessHash": "e17f998daf99f105c4b948bb0c3c451df9c8fd4bc6f5c96da5702a9d79d33e8d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.012Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "78056a8cca964a79566a705fbab1ba64ece85d05dcd53fb92064ccd9580e7708" } } }, "5b953d0cf7f0899bb919bde841ec506eb51ea72c7ba0a6629bb4b5752d8de8e1": { "09d39aeaf27e0704134e70bcefb0e1beae4d8ae180b6967a491e3e56a5ed68c2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.624Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "55e3f290a454180d3fdebf903007754cdd354c8ab36a98108a836b9fe9ea8cf3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.623Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "c6e0f37164e3390410417375735500eccc2469268d6b5df33be2a6727c3f8923" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.619Z" + "updatedAt": "2025-12-04T20:16:57.326Z", + "postProcessHash": "ceee9f6d78a56dc582c09b2c0d3bd86a5952da0c0335803ecb67b1bdf5af210a" } }, "4576bd6efc4271a9bbfd7ff64a67e6a5cd8120b9608e1f0f96745079351d1a69": { @@ -13625,39 +16757,48 @@ "5e82ab99152b96f656e8dbc01527a2124dec1e8c721a629d4ba5aeccc219db56": { "4fe49458ceaccad1ac8e3af48d763a09070b1428ec46ac6e0a3b4c19aa2aff54": { "jp": { - "updatedAt": "2025-12-02T22:57:28.618Z" + "updatedAt": "2025-12-04T20:16:57.326Z", + "postProcessHash": "14cc4128b0b10a523a62d38a67e92df33fab8f0e8c30e0d1de1781422ba78c12" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.619Z" + "updatedAt": "2025-12-04T20:16:57.326Z", + "postProcessHash": "773aa462513cdb8bff054c5d168b838ace49fc6139e0481ef4c3cb63f416e2f4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.624Z" + "updatedAt": "2025-12-04T20:16:57.330Z", + "postProcessHash": "182388194a1734545aab27af003dd4a42c7efa700ebd0f375787bd0eb3d5c3fa" } } }, "61901cc301281214293209e58b53b0298e1dcffad02805348907ec14f5a36253": { "9b549c4be17898687f84e0ef17ef02ef8a374450b44096f17620746288db980c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.637Z" + "updatedAt": "2025-12-04T20:16:57.323Z", + "postProcessHash": "bbdf4318da03da611fe8a31f48ca6010e63e573efcfd298fe76bca8becbfcfaf" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.611Z" + "updatedAt": "2025-12-04T20:16:57.377Z", + "postProcessHash": "a32ec351922de7bda0e40dadf1288310c24c77040bf83812b464c176fd479c75" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.635Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "0e2b65f8bea6a315c3a95a3143b9da2aaf5667649c9cc076a283a01d41e91553" } } }, "7da42e0930f95e1e717b1b7d09f8c9528d7652d99f593b105c4acb72f6fa5667": { "50932fd08d4699598cc688097e4912faf9007f416960c261fe76c585098ca41a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.629Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "b9764312783be53293a093280762e4a4a1a97f24f855551c43380f07662f3692" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.628Z" + "updatedAt": "2025-12-04T20:16:57.333Z", + "postProcessHash": "8e42f2c096a711ff84c66197ce8f0c37d39295d71e5dac2c74cd49096cb4b8aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.629Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "cce7a1cb0691f50afb1b2e562535608c3bd265098dd6b39bc95d9814c506b99d" } }, "ecb585ec1fef87ae16a59e4392cad2214397ab76b4c1f98f967c69dae8f1c139": { @@ -13675,26 +16816,32 @@ "8232385318fcb8ae5ab151696b217b22f9436e7402f061c4116986347a039995": { "d6b3588b7d8f126d5702902b6c9d58f3929c5d5c37ec39e19523d2d8bfcab2e9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.002Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "10b2ade9671e9b326e84a4c06046aa33e06de45d622395c9b51b9df823c2e748" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.631Z" + "updatedAt": "2025-12-04T20:16:57.320Z", + "postProcessHash": "ff255f1dc714e819a56aa6a6eb38719a9b3166987b295cdcde1fad75a89c6aa4" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.003Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "3c64b5b30f690e7bbc82f79f845431cf09db9d37bd4ed747fed72f3045b14546" } } }, "92dee0e9ff8e8566f9e9c069b27afdb02424f6cbe89f46ce3425000461a7553a": { "0bf98344271fffe8297f5e0b128fbd8fe78c79025f276feb745ffa937822b839": { "jp": { - "updatedAt": "2025-12-02T22:57:13.010Z" + "updatedAt": "2025-12-04T20:16:57.362Z", + "postProcessHash": "adab7cc27d91046c8b9688e59ef4d4c6540b31462a5e2fcca6c9ab8e8eb91ec9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.013Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "241c1d1805d993fe97ce52ff50f86864fb6337c464ec3a63948f93807036a995" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.621Z" + "updatedAt": "2025-12-04T20:16:57.354Z", + "postProcessHash": "3e505cdcc2fadbf93a990a3d930aee9b9b622b44f554ba53467f21b1573d11ce" } }, "9f3903e67cba6261cb9be580a31bdd170498cc260e9c5d03c285d123770043ec": { @@ -13712,13 +16859,16 @@ "a4b6a047b28cc22275775b0dd79539a2be86c95aa7ced10a1b187f12caf79320": { "1c1a6afd778c1e57b53ba0589eef4c2e7796b728ad68d2fe6448ef393e873f11": { "jp": { - "updatedAt": "2025-12-02T22:57:28.609Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "db3ddb99e9dcb30395a25694148b2cd07f4da975c1947339f53203f1e3536cd6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.610Z" + "updatedAt": "2025-12-04T20:16:57.297Z", + "postProcessHash": "8393352f6720b065afa697e891047626b48bec1aa1ec8ce9a41e12c66bbbc093" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.615Z" + "updatedAt": "2025-12-04T20:16:57.323Z", + "postProcessHash": "e6696c664728c64b695908ef46e66ba27faff8384e25d2e4656c69b1c3b3db18" } }, "af460729a46f510430df896945c2d1083b7a1cba8c13e697cbe6e79e39464dfe": { @@ -13736,13 +16886,16 @@ "bab8d56663945997ddb532be83b22cfbee4c23f88c78951638579d4d2d0d0dc1": { "313348b374bfb31391b047b7169401cb2e79bf5da554b1b2fc8cc3592be17c35": { "ru": { - "updatedAt": "2025-12-02T22:57:13.016Z" + "updatedAt": "2025-12-04T20:16:57.376Z", + "postProcessHash": "e577668b5d021e62f0fe438888a24fcc3638ff72b21a81e4b728751baabcb865" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.014Z" + "updatedAt": "2025-12-04T20:16:57.375Z", + "postProcessHash": "534be383b39d8823bbab4b0204cd35646fb6e22e8e29359b248f53b7553ff03c" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.015Z" + "updatedAt": "2025-12-04T20:16:57.376Z", + "postProcessHash": "2b54cec5e7a83e203f35486ff8cc4ff0346aa96c96f5f2b5f58509105751e972" } }, "8e64108c88e2501340620384ddf14dd58507b8e1c81219be6f6e610fe3d9c772": { @@ -13760,13 +16913,16 @@ "bb10891887cb78110e7cb4ceb74ff22432d01fac9a3bff7cdeeb1886f79b1a65": { "caa3bae4c975b756d6c9bef7d4ca4f1118fd3ff3418d4538a30aa4c9e33515f9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.005Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "d3a4b69aec4bc641d6ac5ca52021bb192bf947527a6c168effdf177c4fb1a7fc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.011Z" + "updatedAt": "2025-12-04T20:16:57.365Z", + "postProcessHash": "41336a080bc6c9e1bf72d61f10eb0b7e0e97d29de6c229a097af8f830ed3b352" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.003Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "ee47f6e1a1b7b8065cb762d132e35d9123919a45fccb6ea04c0d3b1817773627" } } }, @@ -13784,26 +16940,32 @@ }, "f874e3ae6b9b2413ff9c4415bbd53d217ecc53aa9c8754f7d8b43a840a56a1dd": { "zh": { - "updatedAt": "2025-12-02T22:57:28.632Z" + "updatedAt": "2025-12-04T20:16:57.320Z", + "postProcessHash": "e7bd76d537c68ab6b2e2c12672361ba0194e9257fdb22a5739c3450401bf9557" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.633Z" + "updatedAt": "2025-12-04T20:16:57.320Z", + "postProcessHash": "3e207ff861cb6feb7c9bd057f983a061043972097bd68ad51611e043f5766f0f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.633Z" + "updatedAt": "2025-12-04T20:16:57.320Z", + "postProcessHash": "accdb14b48bc008e9bdbb3adacf02fc78701b6866bcbd8b871502a567a9a5043" } } }, "d30ca35ab36b18a178b446411bd0aa3076c904d43108702bb21cd5d8efbcb5a6": { "87455e817468ce7973be52c22bd571c3c34f449207bac87d98cf83d2f67a119b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.629Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "f3df9a4263f5c85238bfe735afc98bf51c39122cdc1381c5346b2117b53f0184" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.630Z" + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "33085ed160e0a70b5fc8c4752ce695612393dc5823516902a7d440f60aee6e78" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.628Z" + "updatedAt": "2025-12-04T20:16:57.334Z", + "postProcessHash": "aa2fd797c067a2f5cd73efbbefde0dcdc8a6efb9796ed6ff5301bf27d2c79f91" } }, "1487bdd2fa2f3944d2af9ce3c81c0bc560bc49e8a960f88b3b1bd574854de890": { @@ -13821,26 +16983,32 @@ "e39ace6f98adf22617bf02b8e1b5e543cc789b8aca34a357f850131c862245ee": { "18eb1c50ac74effbf464a4c046b94e4cb6fa9eb96d70864437ccfb525503aa01": { "jp": { - "updatedAt": "2025-12-02T22:57:28.623Z" + "updatedAt": "2025-12-04T20:16:57.329Z", + "postProcessHash": "2b0b57bd4ea06c8e5c4b9a6c95f831407c19fc778e4ee4b961a91f8b03e6a405" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.620Z" + "updatedAt": "2025-12-04T20:16:57.327Z", + "postProcessHash": "12faafb913455d155e5dc524d4294cff74dedf15da25a094fdb5d81032ff595d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.620Z" + "updatedAt": "2025-12-04T20:16:57.327Z", + "postProcessHash": "b0274324d40d83cd69e33b1466fc81aa54a38e58e35e580f055e9331c245b9ad" } } }, "13087be692dde9b7feef18870d3f79b1d18225ab44ef354f1e90c30366e1c433": { "4c3eaabe73297be96e394821c21a7b410f9fe4c375bf5d324f6eec00d4aab141": { "jp": { - "updatedAt": "2025-12-02T22:57:13.034Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "d15a10a3be7d4c65c36a20468a5ccbe61bd0cc1e69f7bca394aff5fef5b9a9bd" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.023Z" + "updatedAt": "2025-12-04T20:16:57.355Z", + "postProcessHash": "db2116fc9d77c22de73cdcde1355264d4833ed95a39facfea5292d3230ec156a" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.029Z" + "updatedAt": "2025-12-04T20:16:57.371Z", + "postProcessHash": "f669e2512970173554b1de46f827d0a0ed50ddad656a77ef5e44f3789fd983f3" } }, "ece9e5e8b9ede092044e89a8f22b5f4f8ad893ecb0b80c8ee957c1d036b8e7eb": { @@ -13858,26 +17026,32 @@ "188f9a9bc3bec2ce321905c8a56a28198b42bc1f90b417b6ac00a4d9cf3c147b": { "8e6933142a9b80421dd489117c3233c45a2645cae67fe6bbf99c75fdf827c9ba": { "jp": { - "updatedAt": "2025-12-02T22:57:13.031Z" + "updatedAt": "2025-12-04T20:16:57.373Z", + "postProcessHash": "7463a17bacbc9d279559634d120ff59ddb2d3807dbffcc23ba1b9f730f61636f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.029Z" + "updatedAt": "2025-12-04T20:16:57.366Z", + "postProcessHash": "511f5bdac0b716a96f44e0fc7b30e6bef017a6c2e1757be90b101aba7cefa1c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.028Z" + "updatedAt": "2025-12-04T20:16:57.365Z", + "postProcessHash": "cf770c4d100cf7c4819cdd20a1a598abd403f147ac1e30bc0dd6d22b62771559" } } }, "1c00ec1111d4c97040f8a6b4705c820bc0afe08ce75657d4021750534563cc33": { "b2e299e5c648bc6c75f661d7ddb0d415bf3f4d2d15b1b81f676f8d781e4ab3d6": { "jp": { - "updatedAt": "2025-12-02T22:57:13.021Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "90fabaa1f489aa677e3911e3e2c22ee330af3ef3357e554423c3be3efe5758ce" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.007Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "6679eec89b7c91f343e16613fa05d9707c9cf0cf5fc27e1cbbfe1c7cbbb45121" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.022Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "cb008b031e52d5740d85f2ff41dfedeaae477f00b3203483444cd17af77074c4" } } }, @@ -13892,31 +17066,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.651Z" } + }, + "4b159102b8118d1c5f860203e158167c69c30018a911e2276f3dc980a4d34d58": { + "ru": { + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "713919e5bfbaa1140857d38d65c0897d30bf48e642820a7f4a088871f00fc287" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "7d373b90d1e51a605faeb92e1f6e0dc09b43678a371f261392782811f6ff7826" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "20531aa7f84930db6bf5e0e3d87ee7e9985eb4caf6f6e21faa4769ec3fc9d603" + } } }, "2fe98a07a0771f0c918a105339c7465f1d1800b749a6786ae052b4f5792f8146": { "bc9d4d641f5b9a05f88360a2ee33515689607102fb6c336b63a7598960ba63de": { "jp": { - "updatedAt": "2025-12-02T22:57:28.649Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "5fae19cce12e8fd7208330ead77e8235b945eef07d04f040fcf0c57e2e6997c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.655Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "f3218fc9c005cb6bfa695a6477f3b30951b6cccae27b2bf3352f45bd692f3fc2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.649Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "60ca586ea16fee7c71ab63035eaad4535b80cea6f6395bf8a5cbfeab5a2d74e3" } } }, "341051d81c779d41671d446f22d563f3189738d32b9550e3675fa7143546561c": { "7b5212109cba86689f00d93efb9aa6bdd2aad536bac1da86a77d78443cec95c0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.028Z" + "updatedAt": "2025-12-04T20:16:57.365Z", + "postProcessHash": "b86184a1e7c45332b09c540f6875e491ef45bfc2942e77b632215d98c5e264e1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.034Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "fa5c4556e8011f86934aed061008f5165b76068c2d058facbef9e751a7591a29" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.033Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "de1f02951e7ffb89115f8f5d6be338e93f2938c9af79037f4ffb55d751e4dd9e" } }, "75703e9986b34937934165925fc58e72a3afca84531a9442ab6247ddcf89893e": { @@ -13934,26 +17128,32 @@ "513fe6bad8509823ffdccf71f911e6632a1d6c62bc3828d6880a93c15b106872": { "8b0b91827d9a7c004ba4a826838ebb29f76a0224d429a5d945acb7d900b732fd": { "jp": { - "updatedAt": "2025-12-02T22:57:13.026Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "85d9e4f6facbf2633810ecc02b699e945a30c535ef34fd9be2e78542edb6cfc7" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.027Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "33d1c11d5809a56b9837c81bf9ec26ce53a2f436357b65407be02d64913f04e2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.024Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "3d11ef68efc02d837d13d3339af85bdabf60e8aff5dfe5e6f929a07e4f30ce45" } } }, "5aef4c42e18c1cc3dc46fc838620121334ecc389acdb56926a191d15c08904ab": { "7e58d82c11edbb334ceb08f7f620a974944c8ec3e120b7373fd4e03a17ac6cc9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.033Z" + "updatedAt": "2025-12-04T20:16:57.374Z", + "postProcessHash": "269f2a9dc97d55027db709439164a46d40eee85e8428a87213a3c6dbaaddbd21" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.638Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "794696d349d0f66683b62aefd92e5f920368191d0e385b56eb943145b607cf00" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.032Z" + "updatedAt": "2025-12-04T20:16:57.373Z", + "postProcessHash": "af48790cc2c5bf4c2ff094e8ec6d8dd6c7525ea9ea2d8731570099b67d8fb53f" } }, "30c6212c47a3a967e11aed7b52fa64caa3358560d58e4a0931019d75506f6232": { @@ -13971,13 +17171,16 @@ "67b2cf74cdaca50f8911af9d708d0de9b1f69f0efeab9993911fd47c8fe2f59a": { "cfb3d97b019aa58d29e5939b08fabed324c21080592e9c094aeae86aafccb4be": { "jp": { - "updatedAt": "2025-12-02T22:57:13.014Z" + "updatedAt": "2025-12-04T20:16:57.375Z", + "postProcessHash": "fe4c5a29c33425ceed7fca6c22a89acecf8496296bc64a9c5979a10428ded9f9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.013Z" + "updatedAt": "2025-12-04T20:16:57.375Z", + "postProcessHash": "31e16e480cf8c83907826182b19bbf8d096663c69462f8afbb8c28df1ad5c1ce" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.608Z" + "updatedAt": "2025-12-04T20:16:57.319Z", + "postProcessHash": "824979aaf761c3bbcc7f7033c7f0116b6b64ec55dc0d21489b6ad00cf368b04b" } }, "47b31a12063a879f2beddb3373c4c72ffa7b8dc29a27e09156d7cf03a46cf52b": { @@ -13995,26 +17198,32 @@ "721c2734aaae37ab2cfa24011429e694a791b3fb975c20c543e63c974c336cde": { "9ecec8ec535a5264bf7ad03315791abb102815a602f895880c47fb817859cf24": { "jp": { - "updatedAt": "2025-12-02T22:57:13.009Z" + "updatedAt": "2025-12-04T20:16:57.355Z", + "postProcessHash": "d63b6ce8d5689c0683454cbd56a7c22959ba42fbeda3c13cf034403871cc1146" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.632Z" + "updatedAt": "2025-12-04T20:16:57.377Z", + "postProcessHash": "0103d4d8aee5d3eeb45b2dac9a015f69c69c783fcff6374d8c829f0f256c106d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.000Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "b993dba7e880e823839ea105a29940b01d82b427ee1ab0566a84e5a2a29a00e0" } } }, "72afe36eab2ff12e14d4d91a26552fab2abc011f9a4ccde2047a84210368f35e": { "1560d332f0571ac4c9e52b7b887c8418554ea38f675716836286eda06c730b1d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.027Z" + "updatedAt": "2025-12-04T20:16:57.364Z", + "postProcessHash": "22fa1d9affcdfc76f66f842d5d9052dab534ec1a8fedf5a23183648e0a9a9f93" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.025Z" + "updatedAt": "2025-12-04T20:16:57.360Z", + "postProcessHash": "d7b0596d907baeef079fb4dd561a1a72774c365c41157e080f8bb2b065199492" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.024Z" + "updatedAt": "2025-12-04T20:16:57.356Z", + "postProcessHash": "6ebd1e34c9bf8c888be12e7d8daaa9341d8de1c88b61476be479fb65759be603" } }, "28b09452901cdc50d62751f8c7e48dda24bcdeceee8080b1e1efa2058fe428d1": { @@ -14032,39 +17241,48 @@ "8315916bdb3d69fc26c0b36f0b4378146ed63f736e03228e62d22efe01d9dfd4": { "5856087df98f6740b4472f367157e174efdc961ef37e3c1247d0ced2db5782d4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.636Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "468ef70d7c76e9c7923e2c1b3602529f07a1dc781c0753fe6a9d9a80311d61da" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.007Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "3465184aea208b2bc9e1adf80585b8495933f091f1e8f8fd4f23eebed4f74215" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.634Z" + "updatedAt": "2025-12-04T20:16:57.322Z", + "postProcessHash": "f90dc06da92b8c3823148c69fe9b8a74ad47803e07cf6afc64b21196d34e4c02" } } }, "989eb966fc80a9e76f90dfcbc66e0dea7d1236c5a18dcfc3951a22c271c46183": { "501b56f9eae0cac02eb27cad28e73a3ea80b0a3e66d207d53190032406e903ec": { "jp": { - "updatedAt": "2025-12-02T22:57:13.011Z" + "updatedAt": "2025-12-04T20:16:57.363Z", + "postProcessHash": "40fc2ccd1f620fcf3e12949ae14ef43a5e9024cbaa57ec3cc258fc40dcc2cd9d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.611Z" + "updatedAt": "2025-12-04T20:16:57.319Z", + "postProcessHash": "18532e4f329fd4c78566f3ad855ebbdcfb5c240d3e36f6ca9ab51da494b9c444" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.010Z" + "updatedAt": "2025-12-04T20:16:57.359Z", + "postProcessHash": "8624027eda0dc6de07c2011a0ac60d10ed4acf46618d11235fa3939de78b4500" } } }, "baf8aa50269f83f9ca719a087ec8ad23a44645de6e886fe94ea8e017a880d3bb": { "bcb7bc6405a8a18657a3ca87a61f1366339dfcef97912463438b8f97cd471adb": { "jp": { - "updatedAt": "2025-12-02T22:57:13.000Z" + "updatedAt": "2025-12-04T20:16:57.324Z", + "postProcessHash": "cdd00ec91fc2a003dc200ec727349362c3ddb29760570ec69b7f9cd3696d821f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.009Z" + "updatedAt": "2025-12-04T20:16:57.354Z", + "postProcessHash": "a28a956661869e60618d66bed6b6364d3a25e738b12520becab7680ebec74820" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.610Z" + "updatedAt": "2025-12-04T20:16:57.319Z", + "postProcessHash": "c8be606d38a8d8c9f6a49f7bb271630c1d8e7c1b82060751b61aef26ddfa3fc7" } }, "a85beb07457a8cd583ab4626e4a1665d17654cc132cfa7622752e519095ab48d": { @@ -14082,13 +17300,16 @@ "c3e128b68f1271e67f658e6a27e710c60881f8641ac2288d555daa3208c005f9": { "13e7fbfbc5e808dd0b360b17e6a4fd7a2ba6d1036bc640c856da5cc47ecdadde": { "jp": { - "updatedAt": "2025-12-02T22:57:28.632Z" + "updatedAt": "2025-12-04T20:16:57.320Z", + "postProcessHash": "23a0f8fa9926fb1781e90492c9a6210915691b7587335fca5cd867489ad608bd" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.009Z" + "updatedAt": "2025-12-04T20:16:57.355Z", + "postProcessHash": "acba7fcda4e6373005c4868374fecff027905d257203eb6b6d5d2cded48500bf" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.634Z" + "updatedAt": "2025-12-04T20:16:57.321Z", + "postProcessHash": "89aff3b9967ef3c1713f75dbda009f694ceb6abbdf2dfb17ac65372b6442c270" } }, "4626508269ebe6c1920ad10ad096b8dba568bff0279bfb0356204ebd6e971f08": { @@ -14106,13 +17327,16 @@ "c484fc5a7f3148583c4468ad2af97f94fd9cc073f7098786a953f31855eb484e": { "bd8825806b6c9a9fcfd0073d67b67df3440ace8280431d5a8c8dad6ef5f213ec": { "jp": { - "updatedAt": "2025-12-02T22:57:28.640Z" + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "f78972114aef21607b7b8f9e1265ce628375bdc0825b895f5fcdf8f4e48bf5f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.640Z" + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "7eed257047daa3afe04317f684615dcbf3c3b722c7d55c3e2e496cc64fec58c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.639Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "1d3273a068587a537efee554f872470a43e5114fa400c53aa03cc4d984e9fce8" } }, "8e44081ae47c2cc39c56f929606d16db2e836b56a354be7dc6b701f4b95b4017": { @@ -14130,13 +17354,16 @@ "cb12578467473a3c801b153c6cf4d13a10cf518318fd5f17155acd1793145e1b": { "7416b573c39ce724ff3be3b7fe423ab34f30f16536bf6cf0fdf7d2ba979a33ab": { "jp": { - "updatedAt": "2025-12-02T22:57:13.020Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "90e7a938b9e834fed8f5e5a922daf1eb73a22ae0fa362c4b64930cc753b92b8f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.030Z" + "updatedAt": "2025-12-04T20:16:57.372Z", + "postProcessHash": "89c27ea484a952d813be3614eb7e4d9c964bb0358b8b3e57df43af45aee806f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.002Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "f623612a89e1c21d1bcd40ee3600381403e14d6ec771a414a4f59eae427b619d" } }, "0db5aa15e3dbe18b1c681a26cee8856fc9b32345cfacb7a3bf81dc6e5ea5df3b": { @@ -14154,117 +17381,144 @@ "d7f86ec094d4fd68c7ec3902e09e9c8d6f32e759b1104bbeace470bd65c6ae68": { "aa75faa94f785331aff5bdbe2cbf5c4d6e4d398591d7ba48c786aa44ef7c17d8": { "jp": { - "updatedAt": "2025-12-02T22:57:13.024Z" + "updatedAt": "2025-12-04T20:16:57.356Z", + "postProcessHash": "434aded90235c4991bf54e3afbdddb76eb48aa37a7d093e285062b2908c45477" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.003Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "d286d1dbc1e65775da0aaf967505ca36a440d6e5cdac34d1f8b2daa0b08b7f25" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.005Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "ca4d08725cc391e8b91c50123a23168ca7e90057c6da2c40405f0d5a2c3dec87" } } }, "dae06bb227a02c2e0c6a941ce0fc26005199e52c806d58d5e67386d3ec46f9d2": { "7b4e58d24764fbe8ed14bec5a6c802f2f143b902c16c654c45567175ea3ba639": { "jp": { - "updatedAt": "2025-12-02T22:57:13.022Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "1c4929e8188ab915f1dac62aad8013d4590790a20f218456f60ddf3d3aa1bd09" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.012Z" + "updatedAt": "2025-12-04T20:16:57.366Z", + "postProcessHash": "96f6dc17e55b3c1df0355219db62fdf87c098106bf1aff54a35e8fef356450e9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.012Z" + "updatedAt": "2025-12-04T20:16:57.366Z", + "postProcessHash": "28b82b5de9f103c12c48b14380ce9372179a0cb89099c286debcdf49d346e0ed" } } }, "dbffe2a957cf5e50f0d77de216e876face0751f13e47da2a20400d54d5665054": { "de205edb219286909fddbd177c0ceefb00f1d4bfa1753f3d37b2539c40ccb3b4": { "jp": { - "updatedAt": "2025-12-02T22:57:13.031Z" + "updatedAt": "2025-12-04T20:16:57.372Z", + "postProcessHash": "0cead01f8a01ace2a7d8c0b96a249cb078878d7c503f81d829941dcf9dbf2ce2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.032Z" + "updatedAt": "2025-12-04T20:16:57.373Z", + "postProcessHash": "303ecc4dc4c2d28401a06b68a156fdb4bccf9286d95320db8602aa96d620fbda" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.030Z" + "updatedAt": "2025-12-04T20:16:57.372Z", + "postProcessHash": "cdb0960b5ae076eda712174408733bd0b597277ed28cf292fef7101949829471" } } }, "e05629c59a8527d19506d4c60937f73b19f3d5ee1a52750f68b76b2d39b9d9ea": { "746136ea09bf1fea642a7fffc300c1227b17aefa177ec7ad998a0d64c56bbef6": { "jp": { - "updatedAt": "2025-12-02T22:57:13.011Z" + "updatedAt": "2025-12-04T20:16:57.363Z", + "postProcessHash": "4a1509f5faa7dc02934b2ee7f800ffa265f225b661a47242b0b8747269afd78a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.008Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "a795b35a3f86fc3112ab357482aabf06b5da32ef132a1af80e45c7b00a945963" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.010Z" + "updatedAt": "2025-12-04T20:16:57.362Z", + "postProcessHash": "946d29d682818eb10140112e5d72ee23c5c1953f86d982af2d7d52d13a4ada36" } } }, "125f424723e0504386a4a184da1e7119c6a2785f018e32a19cce5e8d2b7e5836": { "b707bc414a14120fcb5707df2de39c191647cd3b486308a8a5dafb116a49cb6c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.037Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "639b3a200002ddc4f957d2e1e67b2646d8269ada508a506c25e76fbf94dd6fcc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.038Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "fffe3424bef38267c6f10dce60a8584f27f8cbaa3e9261599ab0d451bc4bed3b" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.035Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "dceda4085b9a0cd9acde6ad7c6b3139aa5c09a1006f4bced81b9f3232ee62b70" } } }, "19dc76f171fdf3b0cc1a3933538a1ce4395d12a9b9640597e4903ce3f6b18874": { "de4790564f72c39fe581e10e8ac3237721217d6c3c4ea4ad3cd07779bcc8dcf9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.645Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "6566d398aae4da435dfa2f5bad0fd623411d3ea587e872970252d1db6659bbb6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.646Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "ba40f655448d8a0310ef4be8087c54f30d3f862f5ab2e829434de4deb513bff7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.650Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "63e13c28e42e65822a8013564a6604ca3e11af7cba6056e3b9b8147f3a225812" } } }, "1ce6daa0ad295dac3a93f320fa28494beb73c39ee95608595b498a15a3e40ffa": { "85d971b7567c96e52bcd05d9d21b9c8edef12dd133c8c50e8b309d2d5aa75dc9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.039Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "ba99bbdadfc8d183755ec8bbda4e391a53bf253de6080df3a3e36f0c962a55f4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.041Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "415ad3a1c1fcb042b8e430b6269471caf8095f66e71cafbdf9747c5365f935d3" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.039Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "c546b61ad45f80fbce914c996be6af73fe47838a93d213ff6fea3d5a3bf9c9b7" } } }, "232c5ecb0f7a4603625517e022985cf3f01e1ead564c3eb970640640aaae8e12": { "3cf3a4419ef85aa0f20d163b55039c8180a0e1cb6acaf80999e00570756a5e6b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.651Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "201e173f6fd7052267b75604fb630955f45f55f6557e26c1256711f02ebce5e1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.644Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "37d4fa4ba86d1d6528065ef494d0abb4ef1025c7da03500b15ee8ab8cca09417" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.653Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "0e0f0001707884d781d7435d986af820beef9f06c5b971182daff29592b9540a" } } }, "6731708fce75a234e44f1b0c956e538194421e979b065c7885b48212f94f5cf7": { "90b07a9d7262bc7ef0366d310777b0b0370a7a92ba12a4e6abc92359b2507413": { "jp": { - "updatedAt": "2025-12-02T22:57:28.656Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "ee6c26bdb7d43e2def6a7c60818f344457dfaaca6aa328c1e3502a36de5b93e8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.647Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "2e5b5e0e9daca13f941008fc0dd5b90b53c7f18695ebd1d220f9b0f55bc8aba5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.653Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "3f88e20c5f0c08c238c2096ea51bd3910f2bb0548038b53bd0efd5fa7f3db4da" } }, "86f5cb11e91525fe84a6d7ac02cc19c807d8d6cce44b69717c9bbcefc375cc31": { @@ -14282,26 +17536,32 @@ "70cf97c8fc949e8db59f1ad657a9a53e576e424eaa88498f6a60d5b2e6729885": { "338d9d04b8e82dfebeacc09a54a398e5b4290b074e597a101394bc9922a1ee1c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.021Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "d5bf0201aeb67cd67ab8508bee6416bb164da6c2dae316369f6c5e3696e9255c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.019Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "b635543b4549e5c0cf1f798285d916bc8467bb7db5c36ae5fdad834309e4af40" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.019Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "91ca42f9743a574b29b90785beb13a1033e24f63b96911b7da66fac8e43d09b3" } } }, "7fa7f13fb2961efc9814d134b779cc0fe6672bcbaea9bf48e814ee1d05addd82": { "b111ecb2cd1900f390f7f5548b4fb6a727aefd661dda094d15b6c7351b124265": { "jp": { - "updatedAt": "2025-12-02T22:57:13.029Z" + "updatedAt": "2025-12-04T20:16:57.370Z", + "postProcessHash": "472278d625be9d145755b0812c884f078f24f0213910591e51b539583e70fa4c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.019Z" + "updatedAt": "2025-12-04T20:16:57.352Z", + "postProcessHash": "530bc476f5f975dabd591f565893c384ff6fd5c3ffe7da284560595b0503661e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.026Z" + "updatedAt": "2025-12-04T20:16:57.361Z", + "postProcessHash": "a5907f132642c66757586c83dcd3680c20c2bdcb190b537a0be070f1bbd8fe1b" } }, "b37dc6bf582ff09f935ab13559b16785003bbc859edbc25cb5250cf6ed36730e": { @@ -14338,18 +17598,35 @@ "jp": { "updatedAt": "2025-12-02T22:57:13.017Z" } + }, + "f95ba4c4300ed79512639406575a2a02d4370daf2f9f94b0e2fa01cf5198630c": { + "jp": { + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "5be24af8c64d51e0e0f51836a08dcc65811a43330fac053b54c7688abeb538b0" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "2d2a0f5e389ece7e4e0109285f5fffc2c1ffac091910047901989785e0086140" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "a4baa250b2d07d27cbe38ece3f987311155144f021e89e5f470bd2c1c1879ec0" + } } }, "998f0f4e3468e7524c434f473537634e659c94f841a46f5563bb39a4ef82c64a": { "cc945ba5712ca0c208a5b68dd98646b082cd93aed560249a34be7216e338325a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.030Z" + "updatedAt": "2025-12-04T20:16:57.372Z", + "postProcessHash": "94c1d1afe742dbfcf44aa63a59995e7c10537cc85e34c8212c5c6a13082d28c8" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.028Z" + "updatedAt": "2025-12-04T20:16:57.366Z", + "postProcessHash": "398a5f07a7ef09c2c778bd543e10e0b604c42476642e7b9c15148491875eb151" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.031Z" + "updatedAt": "2025-12-04T20:16:57.372Z", + "postProcessHash": "fd52803462376e894ed8c58720cf1cdcecd556be9404efb856fc9da401f4bd32" } }, "8da324568d7b8b5274356df80d91630b941a46f186301011fca9d984a25b20f1": { @@ -14367,13 +17644,16 @@ "9b57ca46e862eddb44a226a1ea028a1678344782bb5bedd683e47de11237eb37": { "3677a4ab1311e72873d3ea0a7b9ee486d601653054d95434f100acf8fb1a4084": { "jp": { - "updatedAt": "2025-12-02T22:57:28.650Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "4dd89d10ea24b0190f75c91276f64de87a17a13cacfe048a1475d32c5dbb46c3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.657Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "ee3ba3fe57f50faf2a43555d44a6cceecf5e9aaa61bdbeb4adf40ffedc83c519" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.652Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "9ab01c3a14779d371ad53ac3f4c70e407b2ba51ad0de4980275cf5a549d5307b" } }, "68a496a4ec4c57830ab397ac2a3d04f92f17c400130928c2a18b7b319b353710": { @@ -14391,13 +17671,16 @@ "a725d7aefcb81ca44df79432f1da90c48ccc1821c943c4aea64ec662f97fc340": { "11463482ef7448e6a135decdd6e0c2680b565473c869d4016976dbdd9ebd3fd0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.021Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "cc2e3ee543f615b66e8602dd4506bb4aa24794f6176949f6abc04ade3f2a8397" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.023Z" + "updatedAt": "2025-12-04T20:16:57.354Z", + "postProcessHash": "d2b20ff03ac89713fda5aba8d4db4deae2959e9c508e0ddc5d67d928d5749bf2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.027Z" + "updatedAt": "2025-12-04T20:16:57.365Z", + "postProcessHash": "9624773d77b2f92736700669fde9526bca854fcad18527806f52d0b2c75cca24" } }, "1828935083cf996cff81aea1667dc9a5bb681eabb1001a486dc7de9c3d17be77": { @@ -14415,13 +17698,16 @@ "aff518be70e64a7690e4ccddb5d480073f10c95e3ea3c17ad5f290330ba897bf": { "6a9dc9ad629f0fdf6c6df2ec6b40090ded099d6d2e5c42feac66429630d6b1dc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.661Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "d8c67fe085db62e3734bdc90483860940541202a0206fd04b4dcdf6838a1b1ea" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.661Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "cc62272c3b75b2d2b12c723881ad20363e8e59daf90463a1b3d758fa58b91c35" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.660Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "ed51ad14bb5ea7d5be99e9d26313c10f2f45cbe2494df73d4090543e1470bc52" } }, "afced390065bbace06e3ad3e7d262d22da585d3d52fc35e6d2659ce9d6f1de55": { @@ -14439,13 +17725,16 @@ "b02ce70d6dcff3632894b67e171d3cc1146833fe54d4b06011bbaa8c85a0884d": { "09c5316a4d219ae648cc28e8db671f96b825f4184cd4e8e4a511bae5beff8109": { "zh": { - "updatedAt": "2025-12-02T22:57:28.661Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "3eb8b67aa6178ac247ce3e3bd46943b9d3e9dcf3738021a1e75c75bee161b04f" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.662Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "f7078e8e29b0338931e680318a6a051d3cc06b9ede900aacc289c57b179c0772" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.662Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "99d63daf2fa85550034e3afa2098b113067f8e0defa473a27c9326458903760d" } }, "16d290c771287aeaa511447cff48ba3f26790d6964ff9ef4e8d1df0086f94e4c": { @@ -14463,26 +17752,32 @@ "b0eb0aa22feb0a0e87aa525beba888ab6c811439fb42a8629b3439c190197487": { "f0d582626df8dfbb7340a088943ebaa56822080b63fa866b42e18086e898b317": { "jp": { - "updatedAt": "2025-12-02T22:57:28.644Z" + "updatedAt": "2025-12-04T20:16:57.349Z", + "postProcessHash": "514b935a6e2cd03fc1a3aae435a509cda24bbacf7afa776b9baab3600fcd1196" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.646Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "7f4618656707cbd5e9ba72090aa93e71e28737bee49973783d0d8b1efbf5df6c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.653Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "e1864a3d123593c5fcbb8d55585c1dd4e11bfb70890fc08f7034064a4dcdd6d0" } } }, "c5321e7f9f222609b774837230342fd88d3987594612b1a1a987cb8500748344": { "6b17248bc93c579b8212f6684732b598b2781d46fbc0ff7686c6c9716116d43d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.658Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "3cc1fd3998316059ffefd5cee65d9f184b5e7522b23df7fe3f28f8b337c36d2e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.658Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "0675f49d6b231b94c7e0f15ebae3ca85041776a63f964a186879dee2bf97ebe6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.652Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "ebf19f9bc1d7b8773ca3afd561117694e31e9e978347b5bf7ab14ebe8b249fd4" } }, "9643b5165e89def525dc375e71c872c60a8f7dd711c239056b81159bc679dcbe": { @@ -14500,13 +17795,16 @@ "d865d8906bab480f2412b8134877a2a96913a3533480602839cb1425678255d8": { "40e74dd63d675033a626615c8dcb1dc2d2ab2290058dacdabdc77986ec16b1dd": { "jp": { - "updatedAt": "2025-12-02T22:57:13.029Z" + "updatedAt": "2025-12-04T20:16:57.366Z", + "postProcessHash": "80f4c833cb40e0e67035367514b25c5394bfacc8398c0d88cf12147e7984f527" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.001Z" + "updatedAt": "2025-12-04T20:16:57.325Z", + "postProcessHash": "68b64f9a4e5b3045e0a798c64a1ed87de939871733175279305914a520428705" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.023Z" + "updatedAt": "2025-12-04T20:16:57.355Z", + "postProcessHash": "0d71f89e4582e999d37a21225a73f8e39009b89767629fc85d00b791de198422" } }, "9d65695564bebf9c14e110dfe0a95ba918be4e978cdc05a279d1f5d41bd4ee32": { @@ -14524,78 +17822,96 @@ "db1f6b413c1b5c95a7fe86857804d32fa0bf64bd126d0d1bb0a19d36642d1ff9": { "2a09e7a09ae046fb1bc7a86b262a2891822048befffff23b62cc68c9e7e58324": { "jp": { - "updatedAt": "2025-12-02T22:57:13.026Z" + "updatedAt": "2025-12-04T20:16:57.362Z", + "postProcessHash": "f718d1ddad686375fe27dcf1068f16bedacc711f2d6696092c49daea52ff37bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.006Z" + "updatedAt": "2025-12-04T20:16:57.351Z", + "postProcessHash": "65222bc2fca56d7e5e8a8b1fcf92384e8d2243542464ed21d7dfa93dd7c14342" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.020Z" + "updatedAt": "2025-12-04T20:16:57.353Z", + "postProcessHash": "f1d95524deef78ce639ce70d7e74cd524286003ff0fdef008be10925c3ec1c2d" } } }, "deaf9da7af41c9dbd196870e7d946c2d92a2b4098eacc1d9d67ca6e552d438a5": { "fdf52ca20d97fc34fd94ada024eedfd00d77d9abbb0aed5df8411acf741dbddf": { "jp": { - "updatedAt": "2025-12-02T22:57:28.639Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "a7cac9121ef2488d4638808bb191604310f5e752aebdf6718e5374ebe1cf3184" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.641Z" + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "ad6b109dec4fae12f3941ed2af9bbaad87ce50976e7850ef9155d6bd1c932878" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.640Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "d1a54dc88951113ac69ec7ea74c8626ca15d9bbb7bb418c8284ded093fc9647b" } } }, "ed51dd17995f6639353bb7c4089fa97d4f8dc7203bca3e26312cb31005fd949d": { "a382bedb279fccc3ac9fd5b4fe0ce9a876319b2d0652651cf74622f32f475762": { "jp": { - "updatedAt": "2025-12-02T22:57:13.025Z" + "updatedAt": "2025-12-04T20:16:57.359Z", + "postProcessHash": "ad02e08a928d952df9c71207831c8fd8124f79a83619dfd14a6b2bc941a034c0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.022Z" + "updatedAt": "2025-12-04T20:16:57.354Z", + "postProcessHash": "15e2c08351e5c93614b78f3808171aafc430aaaae0531b9b1f280a819c9bdb9d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.025Z" + "updatedAt": "2025-12-04T20:16:57.360Z", + "postProcessHash": "7efaecd79f9795c398e3a89dd50c742aec07d33b6673621c11bf9ae975fa7aef" } } }, "ef55ad557299e30ca7d8ccbe3f701f3efcfb9407e677358fda64040c88c2a0e3": { "b7534a46cfb2aba578904a3ead55b3a917dd6ea809c434df147c1f98e5defeeb": { "jp": { - "updatedAt": "2025-12-02T22:57:28.655Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "047fbe8f9e520a9c7cefc69944a99b2b69cc7b8e75ec1a3fbdd5f1f245b522a3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.656Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "6e3d1c642ec7f5d547f3bb89d239fe9bec196004d1a6f550cbf5ba4300e6f9f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.647Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "4dc62aba9d9299b6e444f9f64dd126b77e8a64248fcc4e3c09e64d3063b36c76" } } }, "f4e514c65ad19dadd6e36981ced2004e96119143057123e6f8343003c976414b": { "f9be206d9401669361ef8b3907f74e41604e01c3da770a270a3b262d0cf9e0b7": { "jp": { - "updatedAt": "2025-12-02T22:57:13.018Z" + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "82991c7736ab988f28d5bbcbc37d67dc082ed9675d2c9021e05977e4fdafe973" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.645Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "7b3f7f91a75ebed362e6005e9e4b8186dc59a7ad132ce22a402bf913632510ed" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.645Z" + "updatedAt": "2025-12-04T20:16:57.350Z", + "postProcessHash": "cd417b9d1435c32e854a59b730655725ed51c078890a57a79dadd1760b342db5" } } }, "fde1eb9f476467503483925174bc3e07ff1b6152ca9eca4cbea2189875928f86": { "5b437c2637bdf116e6ae9dd7273b3db889d01e6006e1510d494e0c27992a8abe": { "jp": { - "updatedAt": "2025-12-02T22:57:28.638Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "288bef79b82e46c2514f2604af9e2aff31e8c72df2fce2e4ffe5170582e704d8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.637Z" + "updatedAt": "2025-12-04T20:16:57.390Z", + "postProcessHash": "2338f227a44daf7e23429498b59ed22d9c515b0e7106ea6eb0b41e1b8c6d4ce4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.640Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "f97d11fed715174d952fe8f6f36dc631e84d93e7c6794508450f2796098bd203" } }, "0325309625a326b35142132e72492d62272031ba36d1a42a0998e56b1719cc40": { @@ -14613,91 +17929,112 @@ "025fd49fff3f320d5bf6441808dc379cdaa73f78cddd66059a1f1d989a1102a9": { "5cb5606bdf1fcec7d40bb07c9211307f195d39d691aa2cabd78b397dd79771c5": { "jp": { - "updatedAt": "2025-12-02T22:57:28.664Z" + "updatedAt": "2025-12-04T20:16:57.378Z", + "postProcessHash": "dfe7f0af643b5073a1f750c2b1275601db819a15a8ae1433959a2c324749852f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.059Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "5d0afca2f2ec88137b91fba0b4fe5f13fece7614556104790197dbb5325bde73" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.053Z" + "updatedAt": "2025-12-04T20:16:57.395Z", + "postProcessHash": "bbef0504f3ae7c8427e93e91bcd51387a9d34e0b621d9c3a053853a17082fdf6" } } }, "1e4b57e276f3147467bca9c9b34ef7237444bbb31a33e9319c88df9db588b8ef": { "781ade8017e15eb182d04e5802e03ea4655dd91aa963a8d3d6d5e111348f2ef9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.045Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "46fcb2a203a8ee8300b96eb88d9c150937940b2c2cad4d5ac580f4c7720daaac" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.041Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "390868cb580fbd5b17cc193b27296464645eb7a5ba7f593858382ee288ef3fb7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.038Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "c0a2822cf8d4e985f58d014d5e3b2ef28f544c12d00a5ffeffbc0aa69971ad2c" } } }, "243d4d43037034f08f1c4e2b8b9dad2508192f28c9087e19fdb7e02cb828ad52": { "8945c696900efad4645c2f95b6f862201f4275bbed3998caa867b1ac37deb350": { "jp": { - "updatedAt": "2025-12-02T22:57:13.062Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "5ece8aace7de6152433126b770180670fce9d54f7cbdd2a12c3ca3506a93996e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.059Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "3cf499b5637900e639ba10877b190ec477033402401f9ee8a3ff72b2a59a7340" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.057Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "e5c507740645ee74164e2c5d26169b83d10cbdbc09415d15c4f64a5acfd032a0" } } }, "2d5ce469cb4fcd9ac57756723325805176514ce512b8039ab05e3fde56bb12a1": { "37840663d4e6d0f5bd1b9b294c2b0feff352bd6bdd003b973cd9e9e03ef04b2a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.060Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "abdf2ad8154e2f0c04e371069e0ce812b28490dfcca52c636a48bc31a69893ef" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.061Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "93e24a1a82f8cf19d087d9dfbb510e0d61163d754e395e43c2b049e13141b7e6" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.061Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "2b269feca9802ea9c32e2ffbe85af2f2e35cfb7f5d718e3ea0e344b2af6d264a" } } }, "344aa60f54b872aa215951fce76265aad2f3f1d6ff8bacd50188b941ce5098c8": { "7a8f03b82b278bf1a01cbbd7ff1923941fcfc7239248c640ae1b2eec075f2bd0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.046Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "1b8c8e958bf4522e57e12a8657b691926df8adee84331b485f15697d798c8d29" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.042Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "578868b4aee5372b1b1421f08f28fb0f83945cd91845cd9b967290e893441616" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.045Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "7b09b35e7b670af0ed2ab4071a58fcb8697c1b2f6b7f633a880c78801a8a5897" } } }, "53d65ec30475ca0007e7da32916549bd02696879f561f268e8e3a58c0dfe9de5": { "e1d20246377ea7703705aeea779bd04141833d80b87084862959aeb3e9a08c2e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.649Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "e37b88880dc4a3151b67d534c113cf3ffd14608e92a0a9544fa46f53c22183fc" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.654Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "ff16f55530f46df527f6a7a3f6c3382af2e005108dc14525779e9c067d131d00" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.646Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "88ced1772e9dbc83392b462a37534eadfb741a2a77fcccd25442653d50788ffe" } } }, "5b9f7f3fc45524898a5a0adff01cb46775280fd54d8fbbd51c0cb3cfb2b7e78e": { "1f25423f1068a3269325eb00ebe91d4fe71d603537f5b9e8c9b81de44ef31aaf": { "jp": { - "updatedAt": "2025-12-02T22:57:28.648Z" + "updatedAt": "2025-12-04T20:16:57.385Z", + "postProcessHash": "0cf6908dc3a389ebd24440319e7efdfee13ca6d1297cd88b5d589015fe05c90d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.647Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "cfe95609848b89e6a7cb753b2aa491890dc1a119ca76e262d189c687118d2515" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.654Z" + "updatedAt": "2025-12-04T20:16:57.387Z", + "postProcessHash": "57d2a8ec849ebb8b9304e3a62f5d2d8c5c661932c071cc3d810083454792daf7" } }, "aea269c29122c965661a61ed9e0de26050201cfa241ccd1b34c694f29cebaf67": { @@ -14715,104 +18052,128 @@ "5c4dcedff3da1da80fb63b9461c1c89223beee53c37a3b5a538edc528453f0b2": { "620bb0c22df1a23b2a8df3eb395373d44296904b0332797c29514f90a31606b2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.650Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "54ebf8cfc447e67dd29ff4030effbe51d137e469e7fbbc854feaad9c45d6ad47" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.648Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "3f332c9b05e0c3fb08139ac33e9ae6a72575ca1b7e536f39fe31a7a3058d3055" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.659Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "6a313a38a95aa9d5a1d8c0baf48a0ea1e91f3eb653e3b52a2b943e0b72df073a" } } }, "719a6d655a54f957cec2c65e95d6651040b93a639ad6aa44861b85ae09c1c5c5": { "fafe4a083f40e8f75644ffb779bcedb7065ad373f06a042ecf2238313aeef393": { "jp": { - "updatedAt": "2025-12-02T22:57:13.018Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "7754d3225bdbdbc5126e6a8d9824f7195e66e24feb845fa50b9c90cc79eda924" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.655Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "b26c7d2427fc55b67142bd31ef93dd8da48c4670d3e0733d3189114d217b8839" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.659Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "593ef13ce1953fa1a89269ead27c2e8e2bdcb5acfdb16a2ebcd5789d16cf580b" } } }, "82b281d3017bb8cc4db38036df8fbbba3430846e468a784c1b2e6d4d8e43b6d7": { "617961c999f1bf6eb48c03b5f56f99b3a7309dba7bcdb74914b6a76f36a56413": { "jp": { - "updatedAt": "2025-12-02T22:57:13.018Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "690d09dd69536a3942cdfb603f003b3f6b24f86f206ef4964af60531881140b6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.663Z" + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "43028605a4a849b8a6d6b55f2f8bf39ab321147fd161ddaa3f7b81455f7d410b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.663Z" + "updatedAt": "2025-12-04T20:16:57.391Z", + "postProcessHash": "15324b3465fe8d4a35e4424534472fc67a206dfc723a47dd05fff079e205d2f8" } } }, "8cbea57ac40a6d6358183da1d28c1a09304c1b4a5edf96e2c4a808dc6773ba41": { "39a62a98184d3c0536249ba36e562c954047436e58e929927516fea5318e895b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.660Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "ab289d102894c33b531c895a6e16f87a5f70863409d8af8032b8efc3e485b674" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.657Z" + "updatedAt": "2025-12-04T20:16:57.388Z", + "postProcessHash": "9768561d2c726cfd133b78ccf7a8e3028419dcfea8daa392ebed159bbb676fa6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.658Z" + "updatedAt": "2025-12-04T20:16:57.389Z", + "postProcessHash": "f2500e62a0963b2a92fa3cc9a3f3982c02caef8102f35cdbb74891eb594d3358" } } }, "940796a1aae864d0eda15bb34a302626f3ad6a2c1d3af60ba921316d95e81a13": { "301a0a16ec26f11dd9fb52328307087f8c2528fea166cdea553309d6e58106d4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.652Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "bc81c817b983cbde9e28c9fee06053dfcf6c2b37a61eb8179d3b1da6aea24560" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.651Z" + "updatedAt": "2025-12-04T20:16:57.386Z", + "postProcessHash": "735d04dd6ba2713ac0fe68459df4d8866083b24b327bca438f5cb6dac618083d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.643Z" + "updatedAt": "2025-12-04T20:16:57.348Z", + "postProcessHash": "9deaac027d919a62e7a38a1227f0699396a165a8e6c57fd653190def0c95c065" } } }, "ab91d27df4d8b8148381ccfd51e2bc9b99a1625ef08e73f1d9a0eb197e5397a2": { "a1465aea8fd40bd2a71567dcd05c6ce53e13c60e2ac21919e271ebe1b6782f74": { "jp": { - "updatedAt": "2025-12-02T22:57:13.046Z" + "updatedAt": "2025-12-04T20:16:57.407Z", + "postProcessHash": "efb101d14d65b4516a7ea2c183ba5f827368e14c277c611317fa573d984c2031" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.036Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "37ed044e7ee372a4dfa8deba40a068b332d419ffe9a2e572a4808f4f628eee65" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.041Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "2f1683772e9929148ad2d3d7c0131669cf6d6b796873b950bff22aed365c948f" } } }, "b7c59a245d47fd54f7c7477cbd498ba2937399586e98674be51c6a7c40b2ae70": { "410fd44fe625de2b185ba9098597ace5e062b1884403c90912660d14d188d9bc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.044Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "d3105a83ceadcfa66f4442fd6b238d9d027e4682e5602c3bc03c8e2067b56c38" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.044Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "7d19c100024c49f98cda709473bc6002bd89a9fe4fdbe340df6129a6439066c8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.662Z" + "updatedAt": "2025-12-04T20:16:57.410Z", + "postProcessHash": "07aaf1a744512f19fd0a96ec3e407e9c976ff4cbe23d2c31cf8c23ed0e2effb0" } } }, "d03338e91e1f725469cbc573d2b5a49c055fe39e67ab09e92b408e3e6dce3361": { "fee22f53b36f6d80c05058f7c0b07e16a2dbb531dbf640d90efae0a82972bd4c": { "ru": { - "updatedAt": "2025-12-02T22:57:13.044Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "a1ada88c105f9c280c3a1c745b91c95ac523c744babf15c616a9eaa9b1868e45" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.046Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "e2e1106ee34b46c5fc8de1fef3854bab8037cc1c41b1dc4c7777746ef2459083" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.042Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "9dfd6a62fce597b57e64e67532370aa6103b57f7fb909a9037204c63534da4e1" } } }, @@ -14827,31 +18188,51 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.654Z" } + }, + "8c32d9ed79179c36d91c216797451fa357aeb28c845216e5bab97a4007a3b6e7": { + "jp": { + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "98b10da2453259ba71892fb92e0678038bd855e28bdb3c1d824a70764e676637" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "a919d4d8e187ee2ceae4f5a543fea28b28807868a83c383be3b356498a8d9211" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.392Z", + "postProcessHash": "bd79ec34f07d72d4ca7f5be7416583f10b5766d3c92a63dc97fdef74c6509e4b" + } } }, "07567d62aae7f94a29e9f4d850ede3f6eec697596681ec8f0be305090388b473": { "781c617b76b44e877e7e119770ca6ecc45863cb3bae1a444fe8807d6ebada97d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.062Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "7e406526b77751ae66e2c472de3a2a1c6189a1cb0dd3ae96467c9ba91841bb99" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.056Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "8ecc7da56d2ef34ecbf7607d285fcaede2b8917cc1ca3850bb1c646e8b067efa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.064Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "efe6c85bba5eda6bca7bd2d97cbd0010994959369d0b3def5cb1ab93081e5f61" } } }, "0fb41b547356b6f436429d0cf855f257dafb010e05f7a885625fcb0c5c9d75bd": { "af639a70dc49d1e6f2c4983d593aa1ebfbb63d8f5308bb046693e20ebd771ba0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.887Z" + "updatedAt": "2025-12-04T20:16:57.572Z", + "postProcessHash": "369189e798539275d41318978bb762f29a934b5d1ef151f677d0971a48b7bde9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.887Z" + "updatedAt": "2025-12-04T20:16:57.572Z", + "postProcessHash": "8f93afae3a6edb9c75c57ed54ef4348f1119007f686c7d88606b8e29363bc82f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.889Z" + "updatedAt": "2025-12-04T20:16:57.573Z", + "postProcessHash": "c0da7d7a2fd027eb15e4486a21eb7a581cfd29a6471e430f0803bf55885d53c2" } }, "3a8f100e338d722f8c4dbb2e623e5c4dc5a4d6f3bb6f2d5ba48737830cec8fbf": { @@ -14869,13 +18250,16 @@ "153ff0c08aecf20c420ae5dfa80993225532cf87b7d9c41e419a23934521c9a0": { "210b6c2518d283f504510edd4cc66909ab1c5db80d2fefc077ca8492e8201477": { "jp": { - "updatedAt": "2025-12-02T22:57:44.883Z" + "updatedAt": "2025-12-04T20:16:57.570Z", + "postProcessHash": "0c39f4175da598f24530bfc31366553333540c883024475c01ba8733f19c1ef6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.881Z" + "updatedAt": "2025-12-04T20:16:57.569Z", + "postProcessHash": "2afd645119b5d65458f1c46332519bf04bd1149c11fdf9103778547ccf4e5cd7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.881Z" + "updatedAt": "2025-12-04T20:16:57.568Z", + "postProcessHash": "3501a1964075465af3a3ce38309acba422dfd0503fced0fa08c7194ded3842ec" } }, "9d361fb3775a562cb507335e776089308bf758ae2747ae0957bd649d98faedc0": { @@ -14893,26 +18277,32 @@ "24d0c9c911ed73221e135198269c3368d046b7994b57b0fb624351b888e71a8d": { "547964d07a357f1d9316aadc7016d3943cece91207d0037cea7d08bb8914f5fd": { "jp": { - "updatedAt": "2025-12-02T22:57:13.060Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "874623a59a21c31f1601916be566543f8255dd5fb28d2d9dfe7efd390e9811e0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.060Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "a75f16dbe71b93bdde52969a24ca61157c2988435cc4189c7dbda254610266cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.665Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "cd3e44fe4e54ab3e3fc09f809074abd3bfec830baf00a1cbadd46722612261c1" } } }, "32982205f1155c2c2e05fe89e04c9cd20828fb0a653c7c72c7da8d61c3253607": { "641d2a22f3cbbdbb5877f4694e0f7a70c2d4d0ea47aafe7ac478509d2f4bda90": { "jp": { - "updatedAt": "2025-12-02T22:57:13.070Z" + "updatedAt": "2025-12-04T20:16:57.410Z", + "postProcessHash": "71fb8616c55b05c4cb6bd3602d2bb61ba509141650c8b6b831ae7668a8d66d52" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.862Z" + "updatedAt": "2025-12-04T20:16:57.573Z", + "postProcessHash": "d4b74c7199cf29ee43de924ace6ef983fbb9870219fe538844a597003e3492ff" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.861Z" + "updatedAt": "2025-12-04T20:16:57.573Z", + "postProcessHash": "011cada3c160378a9aa776af60a5a6318e62fa8498ce2028421fb494ec42827a" } } }, @@ -14930,91 +18320,112 @@ }, "86296003488064b48670c7fa1dea340b94da850eefa6ecaf62711f1d83875b93": { "zh": { - "updatedAt": "2025-12-02T22:57:28.643Z" + "updatedAt": "2025-12-04T20:16:57.413Z", + "postProcessHash": "9c908d9a3bafa8e5a2a2e43478ba61ee83a3db924186828ab6cc873f50897d4d" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.666Z" + "updatedAt": "2025-12-04T20:16:57.380Z", + "postProcessHash": "1264a6fa75b0f245c8357b3d5f42ebf4ea1a5f8351256b4b958353ea1956c4e2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.666Z" + "updatedAt": "2025-12-04T20:16:57.380Z", + "postProcessHash": "1f0bdbf46736308c3fab48a073b2bf712c7d759efb8b2d3474808bd7792b0b06" } } }, "38b350a818493921c30933efc9a00f13c8de2b1d444f825141d01c27a7c0dd78": { "5c8a7b7c41cedb9f12aa1dfb4a692603fdc40391fd020d73e7415f0890b583d6": { "jp": { - "updatedAt": "2025-12-02T22:57:13.063Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "d4a917e863f5d4057c14534037a93e2f33fc2e40a3a8ab3101e8463e74f38995" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.055Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "6a52c714156422329100a2e0671df9f2a32e36bb12779c05b8c2d5daf057f8c5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.058Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "019026495245179df5194e1b2853ab8112b4f8de9bab2a6d234e9e4e34e2775b" } } }, "769f4a7a3d111208fa74381508655c4dc5d7dcae5fe2808879e68d3cdc7b3382": { "489e0fb1db1004ec357920c6836eb4613ef37b11126cdd9c08bcfd3ba4aff449": { "jp": { - "updatedAt": "2025-12-02T22:57:13.064Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "3df18d50aa6a31a16fa3524f3d7361d0ddde3c9456e4a4cf7fcae4554e64cf97" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.043Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "7d75da1980b5268c62efbd5941e9cfea0ac04e598e157868805e262a4753bb03" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.642Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "f55e0a46d5928629669d87c9ac1df99257979526cb8deadcdb1267588695cf3b" } } }, "79e713eaf2edf1bc512ae5d02a7d5d250a9659ca697b83603287e03063cf76ed": { "4ae0bd2c9234eb6b17182e97f10042bb3a03df6b39a2c2156858ba7f8c5537c8": { "jp": { - "updatedAt": "2025-12-02T22:57:13.040Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "f766e41611afbb3be864c8036141b6e8336ffe6448fd4852005b65216ba622ab" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.043Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "cfe26221090e4be2a3ea139be0a00cfc4f60ad97b57ce8e160128f43c391a4b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.039Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "30f130b04f174530775eb6e40f3092da9e179ccc33221551c28fe10fca181589" } } }, "85ba3aaa892ebfeca0dd4e9f91647080ae86a451c4f6a00a9725e2f2d8687ecd": { "b2beddd5e719b038a7b64dcbb0affae7ddf832501e2aa7fafd227bbe1cb45855": { "jp": { - "updatedAt": "2025-12-02T22:57:13.047Z" + "updatedAt": "2025-12-04T20:16:57.407Z", + "postProcessHash": "f36828ed4132957545e8ca92588d9104496ffa952a6b793b7ffd13d49038307a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.048Z" + "updatedAt": "2025-12-04T20:16:57.411Z", + "postProcessHash": "22f557405c85073bad2dd8279af0b23fb83fea31599ca36f47a848ec8e525745" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.047Z" + "updatedAt": "2025-12-04T20:16:57.408Z", + "postProcessHash": "c4e9b83d1f02730b9a2f3695376e49e4a64c67f3bba28bbcfd643c87a23faf17" } } }, "8f1cbe44d3d43c4cea34fea884586e29908abcb748f98fa025ccc41b62e45d3e": { "8e89cf7d6f4105f746591f40378eb84bf4bf9932ed4187023e334efc47a4b281": { "jp": { - "updatedAt": "2025-12-02T22:57:13.047Z" + "updatedAt": "2025-12-04T20:16:57.407Z", + "postProcessHash": "742477bd5246fead2399453b9f43a8df71492742fc2ae2ad394e9a8469d4745c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.038Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "35b28f698d25ef5eea150c389834221d829e4f600364085af9f1f39e90499e38" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.043Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "72c7adc4ff3a440bde11d7f132668696c22a4a789bc9f7a23324b07d0fa2902a" } } }, "a094ce3a28e694708179862da79fbac7d2795b1716246328a6d1d45989e4d89f": { "01511979759628779536c4426b3446323cd0ba908ba9e69ed46eef6c4e519583": { "jp": { - "updatedAt": "2025-12-02T22:57:13.063Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "9f0f1f7fe8cf38b788f563958b7ec63e98cee9d9bf461720e6af376290aec078" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.062Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "b83a8b159481b28677784da672c7db798a05a5b74f3e6831e8dda5b28ea61279" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.057Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "4746f5ac12eaeb708bb1a7b5d3273c7a622c248f1e62a7bf3ecd76e9ac9c2656" } } }, @@ -15032,130 +18443,160 @@ }, "52272796a3ff10b33a617542859f14d9522e98d92a2f558892a1b3822e8ba86e": { "zh": { - "updatedAt": "2025-12-02T22:57:28.642Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "73a0c49402e7ff6d7d16caf3c848de6f1b3b6d4be12b7afa4bc928d651ad5ef5" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.643Z" + "updatedAt": "2025-12-04T20:16:57.412Z", + "postProcessHash": "9f3d10460bc50db0e6525897c9d5c746895a665697cea3debaa4bf1ec3e9b901" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.665Z" + "updatedAt": "2025-12-04T20:16:57.412Z", + "postProcessHash": "16f3545071006255d0c7d618d3a3f0c04e9c1e00dda8dcd6882b4b93a275dea9" } } }, "b28fb4d49a614d643a46b4d31f46daf5e9fe6cda08176cd2f5e078a055407bab": { "4108560a1744ad0710588b9cd75e007435917814d8b73b2316426c9d931d44c6": { "jp": { - "updatedAt": "2025-12-02T22:57:13.054Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "c3436d937689b399827f3d32c2d0e1b2058e0604938f633d2f72824350c56585" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.055Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "98d552b7ff0d32d1bcd3bec1f62324033aae398ad57b15ecd2e643f66e9a3130" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.055Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "99e0c98707eb9dc6b98d07ec720a227b66e24ec17295ab654c2973f3eda69fba" } } }, "b626ba6a5d5d3ea8fc4f8b1fbab4067c3c422e1f441d82656ea4e1576b013f77": { "d39e1a92c96f946e67f7b31e6fa41e119a9a923698dbf319033ccb86b70446c3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.641Z" + "updatedAt": "2025-12-04T20:16:57.377Z", + "postProcessHash": "0943705af4405ac9b73650bd27e524f18600f4c44fb9196fc5644bd3ba24ca29" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.035Z" + "updatedAt": "2025-12-04T20:16:57.384Z", + "postProcessHash": "95ec8ebfb49d15895d88ab953c2986e087e916d295909740cb6acc218655d1e7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.034Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "afadf5a101619634076f4230fafb7f4b88e424fce8a08d904b971de0a77a2a26" } } }, "bfdad58f0ce19b8378572771619d14adf32b34da41695f420ad03ed4496197bf": { "c5d8b4488de9c51f7fa4c711f9885ca220f45c37ba8c7062bb02813316daa7be": { "jp": { - "updatedAt": "2025-12-02T22:57:13.036Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "8b96bbcdd6e747a2294cd6045f5d96401e513bc666662fb222e04772d0ec12c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.037Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "2b06ef19b4159bde81908b17aa722108e7646f37d8cddd1e5b2b114bd91033c5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.035Z" + "updatedAt": "2025-12-04T20:16:57.383Z", + "postProcessHash": "0f829ac3fdacff2e78794ba36f28cc0d142a527082ee16154973b29242492f20" } } }, "cb1ba7289dde002c321160e758dcebe6637312272f6a21430a36ca8d2bd0457e": { "6d2f41b7dfc6a91c7ad657ff5eb668944436fee3888a6396625bc67d1726719c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.065Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "e9fc5aef9363c5b7546b8020f4293ab27f1e36a6775a1bf181ee3e46905a8740" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.061Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "92737d28092a02dd24f974ae7b4e82ecff09c5c9b9eb26c7b7be1cfebbd7c0c5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.056Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "e79d6c877d19f28ef3c568187ecb7ca17ef62d07dd328e2217f0fc495a87db0b" } } }, "cdbd4e3a0fcbd1a9915e133e9c7749b9e313633614596b23aedac6d6da31105d": { "184622e2d0685a2859808cd7eb92c85650ed8abc39d7a38af056d81ff2c94654": { "jp": { - "updatedAt": "2025-12-02T22:57:28.642Z" + "updatedAt": "2025-12-04T20:16:57.378Z", + "postProcessHash": "3becb1781eb00c2880b54f7aadd2b27a80f1b7fc57719d233b25f8f8195040d8" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.040Z" + "updatedAt": "2025-12-04T20:16:57.401Z", + "postProcessHash": "d9c588e44cb76a0ae70eb5351eb7f277e885c089bc74ee92a1278af3662495de" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.036Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "ddb61b582e6f10aa414c6df2b05302bb9e61332fe1827f701f7d08e054d0cdbc" } } }, "dedecc80a24539ab5ef48968c83b54eb08fdd06c15720daadff55822ec0b257c": { "5da52f81a0a0c35a9810a8ba27a1945c10ef4931f047eff638a1e08016f6bd12": { "jp": { - "updatedAt": "2025-12-02T22:57:13.045Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "b14feecc8f1b852cb2c009dc40736587f574ec2e316ae56170fca2f3537b2e62" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.048Z" + "updatedAt": "2025-12-04T20:16:57.408Z", + "postProcessHash": "e1ceb6c93a543e9dc9bca2ee51af12b2d459aef50dd3f3e5d9a9c2aa3efe952e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.037Z" + "updatedAt": "2025-12-04T20:16:57.397Z", + "postProcessHash": "51c3a5b9b846cbbb7931d251df08fed25c8f0178213340fb80f787747c315cda" } } }, "e7ff4d7fd0bd848202048d33c9e285c0b7eaa06b86b242461c43fe7e001d1b39": { "574ff1d32ed4fa6964c51389dc9f9d35f7a76cff9623137d2922ce0856a65215": { "jp": { - "updatedAt": "2025-12-02T22:57:13.066Z" + "updatedAt": "2025-12-04T20:16:57.405Z", + "postProcessHash": "cf8ad016e98437328b63f3697887fc8511011083ab23a2fdf301e438e0c935b4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.063Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "fede103933d82c1350c464340b7df3a378caf82c4771ed30610b653fe1c5e3e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.066Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "a5973f73f01078c0a40abef244723cf6137f92d5ee96a9466957a48673a37012" } } }, "e83fb55099e0c1e7efe462a3fc836fad5d3f3480534f4512599d1bb0307a952a": { "00125ab6f5435064f526a97e752f345080fe710b1445d06711d4011db26a78f3": { "jp": { - "updatedAt": "2025-12-02T22:57:13.070Z" + "updatedAt": "2025-12-04T20:16:57.409Z", + "postProcessHash": "97523aee0daeb12cdfa381323a91f24c3440f29bc77c9c1cb4673234e60c9b36" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.068Z" + "updatedAt": "2025-12-04T20:16:57.407Z", + "postProcessHash": "c1c41d01e957b646b66e4b398333e521c716d49adf23f912d6cc61036585740c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.068Z" + "updatedAt": "2025-12-04T20:16:57.407Z", + "postProcessHash": "8d477785bb4afcb4ee19228fababfa93c64b871ab5e5ecd5db3f57614f6441f7" } } }, "022408973e8d05cf5445cbdab59b64e8bcb0e39b91c3c51a44ce2b73d2115e4f": { "6976c8cc844f9c7b24905debc03f2230b626e14b487d48182c047ccf79713f1c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.921Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "34422740a064671f687eadfdb7e461d7e3dc026915ca2c8dee3f06b3868ca90e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.896Z" + "updatedAt": "2025-12-04T20:16:57.447Z", + "postProcessHash": "f5d9afcdfeb6c89320e9442e2794ca769d3e52ac2d40b4f7b298953227af191f" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.901Z" + "updatedAt": "2025-12-04T20:16:57.464Z", + "postProcessHash": "0b5c35fffb5153111ca9823ccc33ebe92a68df94f8853aca82fd365ec49aad1e" } }, "86d2b49dce63d0030956d9394380f458d82580fccf11182038c47ae25941e202": { @@ -15173,13 +18614,16 @@ "027f426455e0e6842638722daa037b778ebc144d4ad338fe61f0710ec20e99b4": { "2be41a032801dbfdd8155d04445f268ba662a583888a188a0a1208a4404eea38": { "jp": { - "updatedAt": "2025-12-02T22:57:44.872Z" + "updatedAt": "2025-12-04T20:16:57.456Z", + "postProcessHash": "0a0e678b5c91cface7204697e67efd9dbe2cc34ef6c0f073f7fa5d17babbd7ca" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.874Z" + "updatedAt": "2025-12-04T20:16:57.462Z", + "postProcessHash": "7a6367e99a87330326b19440316f5814362e4501bfba306f09e16296197c06e6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.872Z" + "updatedAt": "2025-12-04T20:16:57.454Z", + "postProcessHash": "01df08c3ef6979ba7d1708e5545b501fb0903f1947d44153253c9b561b538d2d" } }, "c1078383e79ac339256601f181a193ab1979b13400901d0b702e398f5163d3ca": { @@ -15197,13 +18641,16 @@ "0819d9360d80872f0e20752e84412951fa413fcd532b41e457c8b552f0613288": { "ee4054a86f5571bd3661ca00b9fc4bb113e20c94a5b9daf823b181aa1b238cc5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.917Z" + "updatedAt": "2025-12-04T20:16:57.586Z", + "postProcessHash": "203a564b2f5686ff665c9d4f9a4f03ea676c4401e1d08bd7ac5589282b36c5e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.919Z" + "updatedAt": "2025-12-04T20:16:57.587Z", + "postProcessHash": "beef53655b28ba7315813fc08af61946537b31505c56f53438f507e9237a59c9" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.900Z" + "updatedAt": "2025-12-04T20:16:57.463Z", + "postProcessHash": "f4fd2b63b42681cf5d08eec5ffb746fb2b2aa921f782930a7110b94834a832d3" } }, "d83c42928fec524182b7980ee07946e52ffe8f524b8943b83d109bf0a5e6b9b4": { @@ -15243,39 +18690,48 @@ }, "8ca0b7e819a4aa2e29ffcd483e624332d0ed852d5c4bad277c62fbd16669a10c": { "zh": { - "updatedAt": "2025-12-02T22:57:44.910Z" + "updatedAt": "2025-12-04T20:16:57.599Z", + "postProcessHash": "6817c57bac2111c8f21bc7a54412e1c846b477c12f1aae830034da47801d0133" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.910Z" + "updatedAt": "2025-12-04T20:16:57.599Z", + "postProcessHash": "0c1108ae320e3bde1df8bdedbad10e6e49ef6d698ec1fdc9e65c96dfb14bfb3b" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.912Z" + "updatedAt": "2025-12-04T20:16:57.599Z", + "postProcessHash": "ed82fbdbee79a71b50353299b3e487e5c7c9b14371bf68d1a1a10fec384b57a9" } } }, "2e14d7ea42f23a61da8855e77c500092cd204a036888c976b84a9a6bf71b8eaf": { "1e988897ad46c538e51b835cd9cd1cf89a4e7059611c53ec91e71868db50124f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.067Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "5a70de3a21868d0f33685c9e1cbd3c20499c2da07488b23aee4cec0a361b66da" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.069Z" + "updatedAt": "2025-12-04T20:16:57.408Z", + "postProcessHash": "1f54048f933c2fa755446e80dd029a03d74d9958199f943bec438b6f81d8c939" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.066Z" + "updatedAt": "2025-12-04T20:16:57.404Z", + "postProcessHash": "36b0bca4a8d9f88a1feaae36ab4681033873b40e89a3aff55c41c51a66717e29" } } }, "4b67c6e7bd1376012629229475b5e3fc111803b0ff98dbdc8baacee9fbf6cf1f": { "910169fac7a6c3b7cf7b852a7373930f9083cfa0a5232508038d7982a0dbecc2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.885Z" + "updatedAt": "2025-12-04T20:16:57.571Z", + "postProcessHash": "f063eb821bf8c843ac6b68fc4d95dd45c4a7874262006fb98f27859f4ad3365e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.885Z" + "updatedAt": "2025-12-04T20:16:57.571Z", + "postProcessHash": "a656ef747d84e52eb1e11f38f12634183c5bd3dc7cc59c6d3a69ec845d87df0c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.887Z" + "updatedAt": "2025-12-04T20:16:57.572Z", + "postProcessHash": "1d0fac447da092abb1828a5b01c8d9a2584fb97d728cfef2e1a8acfad2f2794e" } }, "7557e28163d56959272cc839ee4219d9744ff724198372cda0479d4869f1c55b": { @@ -15293,13 +18749,16 @@ "52ccd94aa1e934784ca02ff91c16f3972d775ebf46b09dc38022536e439186ff": { "c395705e691a1be5ddcfb6d1b262f9a0dfd9470788de834bcb6fbc5d0d8f0c8c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.054Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "ff8d566f8fe7b95932f774b9283e06b20965571e31dd35b778c8f2a28d462996" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.663Z" + "updatedAt": "2025-12-04T20:16:57.378Z", + "postProcessHash": "bac63b94714351ba38cdcb5b8c5aabb3f64efc87eca8da9e556f34b9f3cbdf65" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.059Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "668c0cc72be01dce6da43d56f94c3909fccded62e786f90c614e9b97e834aae2" } } }, @@ -15328,39 +18787,48 @@ }, "a83b5c8e07cdb86fdb1b7ff5835889bf493ce7ad49ee2789df132e1cd12e4a80": { "ru": { - "updatedAt": "2025-12-02T22:57:44.874Z" + "updatedAt": "2025-12-04T20:16:57.561Z", + "postProcessHash": "3c15b3cd4bf03510072be7016b1f0f40791dfe267edf8c2f4d49b56125541598" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.875Z" + "updatedAt": "2025-12-04T20:16:57.562Z", + "postProcessHash": "f21401216bd5bdb254ba6b8fa336080e24ccb82843548b5e45963dcda78d3391" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.876Z" + "updatedAt": "2025-12-04T20:16:57.563Z", + "postProcessHash": "6955559411063c41c089f0634b8075eef88d0b1aced4ae0ea28189ae782da6e7" } } }, "592a7f7d3a8dbeda07da824c065c0da9b3e247906e6dbf77674f6a63df3136da": { "2293abaeae3fe16820f6c7c9a37b91841e60a17efff63af19cb7a8d4a0eb2456": { "jp": { - "updatedAt": "2025-12-02T22:57:13.053Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "f8d06c65276cb1d54886f8120cf389188fc42c5830d381b602feb17ca81c4ad6" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.067Z" + "updatedAt": "2025-12-04T20:16:57.406Z", + "postProcessHash": "548042c432476bff90faeaf4805396bc3998706df207f0fde594d8e3899d886c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.062Z" + "updatedAt": "2025-12-04T20:16:57.402Z", + "postProcessHash": "c1c62ecbba81b6dfe3356750a52a6d787bee84bb4083a8ffadd450e92e030048" } } }, "59e3664663d669e021fbd29e32b23a365ecc37fceaccac1e3c9e74f070873d03": { "664e682e3d269a460d26982803f72d705695f346f7f43cd3b62de24703236061": { "jp": { - "updatedAt": "2025-12-02T22:57:28.664Z" + "updatedAt": "2025-12-04T20:16:57.378Z", + "postProcessHash": "b00d2ab28ea7902dbdf64ac1da6ff529f3abd363f4b42d06a4d8eb3d7d3e6a51" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.065Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "9a2eb6548297d4d8b1bc0aced0dddb963c38a7a8daa4a9e7561ddaa2da828639" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.065Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "c11365b42ed045225827854c02f5659742ae2b67b40f8c1e5aeaf3985644df15" } } }, @@ -15389,91 +18857,112 @@ }, "1da4b33ab1fa4a2985d2ee4961ae406b960da1d28a670a0f8712f7085ef02acd": { "ru": { - "updatedAt": "2025-12-02T22:57:13.050Z" + "updatedAt": "2025-12-04T20:16:57.578Z", + "postProcessHash": "683367d05d2d8953b11740d77f9388afa1365d332ff2cb642bc20fde708f6ba7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.051Z" + "updatedAt": "2025-12-04T20:16:57.579Z", + "postProcessHash": "4adfdc63a6e43f87402e62874248f1a67b4bcb1e9b9061209e06b7bbe1bc78ca" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.052Z" + "updatedAt": "2025-12-04T20:16:57.577Z", + "postProcessHash": "22bbcc7c9ab9d0963910588044e740ec0482e1b2bb3b69d3a1040ddf84cd792a" } } }, "650407ab32a2947c9874bd0fc813344a1675577ba430ba4ddefb9497ceec4df4": { "ad334487bb9276e08638e9be4af54b1205755e694d6c1911d00059d8415fae44": { "jp": { - "updatedAt": "2025-12-02T22:57:13.054Z" + "updatedAt": "2025-12-04T20:16:57.396Z", + "postProcessHash": "b78dd148720eaf7b8748a92085590aa1df613e77e2761506c18876b6e04f9c46" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.664Z" + "updatedAt": "2025-12-04T20:16:57.378Z", + "postProcessHash": "c1de43aff64acd60a3cb8a3272ac4ca62790b246a95d3ebc9bdf16b9bf094a0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.060Z" + "updatedAt": "2025-12-04T20:16:57.400Z", + "postProcessHash": "a3ef6174a251a61b2d3838e10802de4d2028146e428af27554becfc2388e203c" } } }, "77307f3a7d1b826bb6622b0f3ffa4c1f7706494839393590234d7206bbf2be8f": { "017f574127f909641a3e7c014420c6954edb618ef3d438854515fd0f5dd1e298": { "jp": { - "updatedAt": "2025-12-02T22:57:44.882Z" + "updatedAt": "2025-12-04T20:16:57.569Z", + "postProcessHash": "2ae8459a89f533f52077935ebf208d1e59a72687681ab09bbe8dc2f00cc7af9a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.880Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "612d2b6d43ea19dcb2247e71b21d902a459f81caed0160cd4aa27b58aba7a1f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.879Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "6c629899e6563a179a946a76d3080fc7591b76028d28c64908617e464d3fdbc3" } } }, "914120dcc6c64903cecac105d4df906767aa83b440456a677f5192431cc83d6e": { "4af035b51000a041cbfd0989fe3c52f7370aaeec63d4f8ae146a2776b899fae3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.665Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "69443a9a6a608d838f32d7035b90e6c73879b885a022f65bed9b7ce61ce78103" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.054Z" + "updatedAt": "2025-12-04T20:16:57.395Z", + "postProcessHash": "0a1bb4458ac06d2f3f7845813994b3e6a9daccf82040a40df21b930093c7c107" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.058Z" + "updatedAt": "2025-12-04T20:16:57.399Z", + "postProcessHash": "5866376c46b5bc0b232944cfdf32492c122afc39d5105ee62f388d1f0f904a5e" } } }, "9c50ae2540822f01de38fd832846c44e0815140836bcf8df45e61a172e36831a": { "48e37702889833007771c8e75d0ebddc5a93b178a5f5ae6c2512d72beca89b15": { "jp": { - "updatedAt": "2025-12-02T22:57:44.888Z" + "updatedAt": "2025-12-04T20:16:57.572Z", + "postProcessHash": "9b6be6fb616a6b720c5ea8ff6f6e2c8b776b74044005fd71a94ae4aff5eaf6f0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.886Z" + "updatedAt": "2025-12-04T20:16:57.571Z", + "postProcessHash": "5e9864a6a4c45617fd4b9a0fd169e952e29111d9ea28db50f922bac8692fa476" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.886Z" + "updatedAt": "2025-12-04T20:16:57.572Z", + "postProcessHash": "109dbf4fed9f5a9a86bca75d86613ac58898f5f28ac36fa1dfe9414c932ed635" } } }, "a1a93279f18aea8b2a8afde127dc919f6b9381d84fdb78e820af9fa87a4f85d7": { "8ef32573cad40bd5922dd07f6e65cb11c503497f1996866bd36c8bd70fdbb4a4": { "jp": { - "updatedAt": "2025-12-02T22:57:13.053Z" + "updatedAt": "2025-12-04T20:16:57.379Z", + "postProcessHash": "1db96cf6f48fc6386fc34cad42e5ce60569e68b84082f3b7655e7afc189be0d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.064Z" + "updatedAt": "2025-12-04T20:16:57.403Z", + "postProcessHash": "477698bc6caf43414aaa8f930f561f33cc84dac8da89fd0ab9394328c432f647" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.058Z" + "updatedAt": "2025-12-04T20:16:57.398Z", + "postProcessHash": "4e718d357e8f431ded918065b2af5ad0ddbbeeb6775fb29f6b5d02ec1338132a" } } }, "acdfde8e008fec05434d98fecd72ad26585fb764bb199b4ab8e0e4acc9186b22": { "965e0c2eb0c14ec2e1498ef8a8c93d171612158fad780e6309dd536f107ee840": { "jp": { - "updatedAt": "2025-12-02T22:57:44.894Z" + "updatedAt": "2025-12-04T20:16:57.575Z", + "postProcessHash": "e0fc94b1fb704891c315b7527d53e5d41fde3988fb4a161ef8ea849658287529" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.895Z" + "updatedAt": "2025-12-04T20:16:57.576Z", + "postProcessHash": "443a51279ac8cafddce1f9ebdda751d5f8494b91e75fff304992c20c6fcf67ac" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.890Z" + "updatedAt": "2025-12-04T20:16:57.574Z", + "postProcessHash": "89c80cef1e03ca7353483486a483c25dd322e3a8ecb5cbc9a9f0549e177e3a88" } }, "ea316ef388947a1bd8b8ce3b019ab9377bd0b52bbf557f4ee29826ea0406c8d6": { @@ -15491,13 +18980,16 @@ "b1eb514e8efc1da765f03844ec981e8df30e9e90bffe8f559550b33fcb148386": { "dc959ee4adf1fa9bcf35767f2bd90be07a3522cafad54234ab37ea828c51a0d3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.884Z" + "updatedAt": "2025-12-04T20:16:57.570Z", + "postProcessHash": "4e252c961d7fbeb0c8cdfca4c7e78ed3d9a47930a490ec0fcff9ba2259ff842a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.880Z" + "updatedAt": "2025-12-04T20:16:57.568Z", + "postProcessHash": "21693b17da6c0672bdc03f490c034f47e0b6c622b12386d8cd5dc15469e81df2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.879Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "8e459ea4edcd0d1973e5c7304439f49adc8b83e453c921767efe5c957a7a30a1" } }, "fe5653fd0a01cc377763c0dd39db11ab651632c5116e8e68e5b26336f447b84b": { @@ -15515,26 +19007,32 @@ "c35229fb2bf6081a5aa25c5273a6bc76f7fb1f8586da22277f9b09cdfe9c161e": { "96b4bbf5cd710c7028d1dcff43630fc1346305b9fc31fd06b6feaa5771a11a01": { "jp": { - "updatedAt": "2025-12-02T22:57:44.884Z" + "updatedAt": "2025-12-04T20:16:57.570Z", + "postProcessHash": "49c7a807f11cf5e4a39deec6f00abd954857e60fb873da829a05ab6fe812f91f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.882Z" + "updatedAt": "2025-12-04T20:16:57.569Z", + "postProcessHash": "1f45a8f2e27cd1638a125fbf219fe0c1e44ff4505c3940d54948d7f42bbe717e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.883Z" + "updatedAt": "2025-12-04T20:16:57.570Z", + "postProcessHash": "6fcb5f7c2f71607a966736e4fce375f71dc05ec6d9c58f6b658e71d6973ea6c7" } } }, "c4f683be9ee704aea456e00a9bdadd74b41b2c81a61a807e96857c1248b0f095": { "696664a1a5eef3359a3250235ce385270ef743cd6d7bc089cff42a85307e3ff8": { "jp": { - "updatedAt": "2025-12-02T22:57:44.885Z" + "updatedAt": "2025-12-04T20:16:57.571Z", + "postProcessHash": "e655fd62b4e798de954eb13fac8d265d2126b74f5e7b6c2525428141ae8ac016" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.876Z" + "updatedAt": "2025-12-04T20:16:57.563Z", + "postProcessHash": "8f8cfbc5f501bb063d9432a89723c4da07e154c6b2337840503024dc92d0a14e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.880Z" + "updatedAt": "2025-12-04T20:16:57.568Z", + "postProcessHash": "e6f5396464416b361ac3e9d6ddbbd3fb8d50cc6fb9c08499b38e8a91be49a8bc" } }, "624184c131264cdb4084e3b3202e40b83320cab7475a7b58e74d2b6244ec0c40": { @@ -15552,39 +19050,48 @@ "d88cb52cd1ee657459479ad84c5c952fbde653226d9799e31239473fa8b0fd23": { "fb9e79efbf3a2d62721e7f715f0699a0dc1f1dbc6e75db72c520ba3026346f5b": { "jp": { - "updatedAt": "2025-12-02T22:57:13.069Z" + "updatedAt": "2025-12-04T20:16:57.408Z", + "postProcessHash": "e46cbc9f324265d037cd050a3ca1d9137e0957d7dbe63f1d6c0247d0a900d020" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.863Z" + "updatedAt": "2025-12-04T20:16:57.575Z", + "postProcessHash": "761985e147a796c708da1413bbad05642bdde93d4a227cbef461d74106cde30d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.862Z" + "updatedAt": "2025-12-04T20:16:57.574Z", + "postProcessHash": "2088615095a8fb52ad1d5cfec71d5b15ff47b07d9ad40f176ae1273ec13bb746" } } }, "fbb5789352a952225705586e3f21b0e7e42cd17127fe8ed8e8ca218112140a27": { "19f784e7b489f48a3d495a2e1c1d68856626b21b4cedf271ef931452b7add1ce": { "jp": { - "updatedAt": "2025-12-02T22:57:13.058Z" + "updatedAt": "2025-12-04T20:16:57.568Z", + "postProcessHash": "ef14a4409c770fdd95cb1d38ba996d05cbbf0fa46a0554d1aea0a2c6889086bd" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.056Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "d7510b4310c007e2c3fa4196ac12676a3536f6c79fe0ce6fc13766b97c0f66af" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.057Z" + "updatedAt": "2025-12-04T20:16:57.568Z", + "postProcessHash": "29e9835f7a1f4812561803ebb9a2aa64870cbd95d309966ee1193f31fbb7cd3c" } } }, "123aeaa56592e54f31fc778623c345f09749d4e0e65e902af7d1a93337a425bf": { "f2e0676875f34dd5520562d2cd21b217af1b44b68311b6c948988adef7f432a4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.941Z" + "updatedAt": "2025-12-04T20:16:57.561Z", + "postProcessHash": "5c7d6b3cd3264e06d8d7812df031cf9903d7fb6806ae3e0c9c1635e715368d8e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.943Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "dfca4649f757befedb12c7efa1f2f5aa52881312c7f49b06571e46d85ec75438" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.950Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "f99cc87e485f33547973c03d3091f6441b2e51e37d4d74045ecb2fd73552b8bb" } } }, @@ -15613,26 +19120,32 @@ }, "ad92da9d2ad16e7a7be4a085a208f9676b179473914f697e479282b6f12daed9": { "ru": { - "updatedAt": "2025-12-02T22:57:44.864Z" + "updatedAt": "2025-12-04T20:16:57.414Z", + "postProcessHash": "475e89b58d3d798adbf05fee073e0696b66bba68f802f4be5ea3ec4698baa222" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.864Z" + "updatedAt": "2025-12-04T20:16:57.598Z", + "postProcessHash": "3a233120071962c816f3e71060ad4203490e4a5ba07384ad2f1e8ee52f0f85fe" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.865Z" + "updatedAt": "2025-12-04T20:16:57.598Z", + "postProcessHash": "169f01b2ba1324707096fc2ad5d0a30b11e262fcf5035d89f000a491fe145058" } } }, "1f24f51d58cccfdaab17312855078466a67ec6632bf8534638b69f8f5f3551c5": { "ac3de3782a6dcd627cb900e0e3c325463324737e43db6385a4a9edbf6ff7796b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.940Z" + "updatedAt": "2025-12-04T20:16:57.561Z", + "postProcessHash": "7edebb5b4455ee9e9c1089fc1ae7fd5c58c90de7e9b2131b29c3d5a333bb5413" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.954Z" + "updatedAt": "2025-12-04T20:16:57.586Z", + "postProcessHash": "d3d92aaead95731c99241f5a26b9c413958c08d2cb289c50e73d5d7b7da3f96d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.948Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "f748f662fb02e4ad6a394569991a95594d4a56980bf9aa1c82d296dc46583545" } } }, @@ -15661,26 +19174,32 @@ }, "815d4a3898271eab559a7f43ac7696375da4ce6aed7381829f6d915f3c27b4b1": { "zh": { - "updatedAt": "2025-12-02T22:57:13.048Z" + "updatedAt": "2025-12-04T20:16:57.577Z", + "postProcessHash": "17065de62ed7b44e4b1698bb40f7a8779eec4078664cb56e1a186d9fa38532e9" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.049Z" + "updatedAt": "2025-12-04T20:16:57.578Z", + "postProcessHash": "47e9915b48ff1af285e05289639fdeeb245d586e0560581915c6df8adeec10a7" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.049Z" + "updatedAt": "2025-12-04T20:16:57.577Z", + "postProcessHash": "e6e78e1fe156ca834198b55ab4e07894a30fb985d6ff24d0acc95b01baf0a639" } } }, "3a83cb18dec6067fc17dcd4bf9d92d724df7894996965a2aa6ddadaa218d8377": { "afb31609bfccd3793dc26f3946f0dce0c3f4dbb5c1c5a18bef98f4e67be74fcb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.904Z" + "updatedAt": "2025-12-04T20:16:57.560Z", + "postProcessHash": "cd1ed7b544e93ca41658dee9f5fd40edb44c3411c62fce4dbf2847db3b8e7b90" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.942Z" + "updatedAt": "2025-12-04T20:16:57.564Z", + "postProcessHash": "46436804b988a5f3ed185d8462f40bd61e86a293678165f0d5c07718c72dfdee" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.955Z" + "updatedAt": "2025-12-04T20:16:57.586Z", + "postProcessHash": "21ac8d5b9e5184b27a27b18df7e465f3ccc7708aa1d8326182d19f6956cade85" } }, "246e677fdb1037560ce0f99220806100065ce49a0a719ec84b0ef40a87caadcb": { @@ -15698,13 +19217,16 @@ "4db1e2e4946307003f6c8e7296d88d16ea1fa0a50642705d3f4a2f6130b44a03": { "1cd9bda536f7de9e39e1b25889cdf102824eed5e8c98879a779c5ff86145b27d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.866Z" + "updatedAt": "2025-12-04T20:16:57.393Z", + "postProcessHash": "974e52b34d8585fc84fded7280ef420ade662eb89ae735facb188cc41eb4bd66" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.869Z" + "updatedAt": "2025-12-04T20:16:57.395Z", + "postProcessHash": "33f8ae6f975657782d5abf8032d00bba21ddf463fc4ee8a87e00f67374efa9c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.869Z" + "updatedAt": "2025-12-04T20:16:57.394Z", + "postProcessHash": "8d266e0a146bdf6facd693ed6fb03cfab00acbccfea70810ffd15cabc5091731" } }, "c65bebaf1409b6811c25b61ee9e1a29774f1a9a74497375c4b00bb9357be3fa7": { @@ -15722,65 +19244,80 @@ "573d715ca8095f0e4ca44d1cba02fd75a74bbc9c173567252833684110e7eed3": { "87c69d03f3d553568b16a72f5fe7243c7fbedec0f55aa5c55695e0895009d96f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.896Z" + "updatedAt": "2025-12-04T20:16:57.449Z", + "postProcessHash": "1a1b645f85716ca56b4e7a21e38b923e5f9ee9556dd9228c9fb13decc1d6be90" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.867Z" + "updatedAt": "2025-12-04T20:16:57.416Z", + "postProcessHash": "e19a3bd005afc2a50da054b81abed593fe67d74a6e6f3726bf853a9ff030e8c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.873Z" + "updatedAt": "2025-12-04T20:16:57.458Z", + "postProcessHash": "7fd6d5a2c64e3f3d95277021480ab28791ccf58c6217c776d5c7ab62921aef70" } } }, "6127321ac3891bee9f802edc9f97eeefd28aa0d40a647d0fa4cda55abfce14ff": { "d3499050f8c6e7b0a1bd1cf5e8bb8e940304335d153d81d9717b6c21c16c2985": { "ru": { - "updatedAt": "2025-12-02T22:57:44.915Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "8ae46671d3b105490e167d3a87c01ee84d49ee596d8aead76c92f067bc37db6a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.907Z" + "updatedAt": "2025-12-04T20:16:57.564Z", + "postProcessHash": "117f3f05038b7f990697c7e5ac9c82064349ab5de59d1e7c1df99e016f0b4bce" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.897Z" + "updatedAt": "2025-12-04T20:16:57.451Z", + "postProcessHash": "0e72bd7ba5dff39be4c28e8d1e5a311b45f50efe576fb22425b0d6b91af96464" } } }, "650d9f2cc9a940fe5940498f6e144305c01bbf36d3ee2dc4bbd8968c9f8967c6": { "17de42c037b1a363aacffaae4c43b7e7c471839ed6cecff05326ffc1616e8599": { "jp": { - "updatedAt": "2025-12-02T22:57:44.868Z" + "updatedAt": "2025-12-04T20:16:57.394Z", + "postProcessHash": "683b3137fd6d5f8f2ad3a2279a693fe3916216d7fa262a7b9b0d55330bfd7f85" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.871Z" + "updatedAt": "2025-12-04T20:16:57.576Z", + "postProcessHash": "aca7860b85451932402e25fd342e73f9cc1e6eb90e58a2dc9526fd94efc757c8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.868Z" + "updatedAt": "2025-12-04T20:16:57.394Z", + "postProcessHash": "a13b4669e710eb100e21d1d41b22517add88a7e4a67b8880d7eae0234fe2c6b4" } } }, "6813da4ad4c4af5afb1c7574805fe2dd8caa6c96f485a82e9c901ef475f08fee": { "b0517d0f55cd108acdbbe709883cd25fbda01a6703d9b51ff50bd2116dae6e4b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.889Z" + "updatedAt": "2025-12-04T20:16:57.573Z", + "postProcessHash": "9a7953083c75ea58505ac2b189ed5a659df72ce0557a628340c2d6d8aebef006" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.888Z" + "updatedAt": "2025-12-04T20:16:57.573Z", + "postProcessHash": "7663c76f0b8df2131d956db72a0e2fa03156a59295711221b82957afd5b0e382" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.890Z" + "updatedAt": "2025-12-04T20:16:57.574Z", + "postProcessHash": "c8fa2bc97435a0d2801c93aab7d4f9ad62a15c6f727a43f977e586c56c94f5d3" } } }, "6e6ac7b2f451e9b54c142890ff01e2a80eb23b0ffd7b7bc15c34b94f9502bf82": { "29539529c90b3e9ad079683127ad89b005ba24b80d2ef9588cd0a3e6f7d4d74d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.873Z" + "updatedAt": "2025-12-04T20:16:57.462Z", + "postProcessHash": "432b8cdc0f7f934b8e5a6c86ac707524f640bbffe8d341ca3bd71c1ec7950d0d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.871Z" + "updatedAt": "2025-12-04T20:16:57.395Z", + "postProcessHash": "88f899de044e299b047e413b118c8005a05fc5c6c250744447d61293392906cd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.868Z" + "updatedAt": "2025-12-04T20:16:57.394Z", + "postProcessHash": "90032ce3f0f12af0f427695c74addf5a4f22262f8dfdcb2662a1ff3fd0ffa412" } }, "8f32e4aae111f92315bc93e0ccdf602c223cf64f5840140b6501f1f14e174cbb": { @@ -15798,13 +19335,16 @@ "6fb070f1b02c940c98234a8aaec25f6c6469691d330c72faa861b07763ae4725": { "15b7711f236d6afbc710de3e96b101bbdf132f753a46229174d84f106446a055": { "jp": { - "updatedAt": "2025-12-02T22:57:44.926Z" + "updatedAt": "2025-12-04T20:16:57.592Z", + "postProcessHash": "328e65509b3b525d59af7353a46e30210fcfd69f1856f6e010fb2562d8ec2416" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.925Z" + "updatedAt": "2025-12-04T20:16:57.591Z", + "postProcessHash": "f9b192dcd2780c161f52e17b680aee54e64d4fe73cf0a7a8a2f5bda8503b6b39" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.924Z" + "updatedAt": "2025-12-04T20:16:57.590Z", + "postProcessHash": "a92606040cc98821ffb8227b5d981abbe63ee1c16566bfb56ddf46cfd492728c" } }, "355de205d2e66f18b00c07141db16e3eae08111fa3207ff29e5d7e2db19cc526": { @@ -15822,13 +19362,16 @@ "9d8c420729f6dd40353fd0b37376eb59e28f1b3a71685df761a9e2ad46f35ca4": { "a0b2512b24b1839bef32291210747feaecb9e749d4b925cd5fa1e21347e7f49b": { "jp": { - "updatedAt": "2025-12-02T22:57:44.867Z" + "updatedAt": "2025-12-04T20:16:57.417Z", + "postProcessHash": "b4a82e239b778ef26fba270d85b9d8d3ebedc369b5a050b858193f5346f328d5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.900Z" + "updatedAt": "2025-12-04T20:16:57.464Z", + "postProcessHash": "2341f430e8873ebae8f7c1ae8ed81cff33c71f82ef357a2ed2242552271384cf" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.926Z" + "updatedAt": "2025-12-04T20:16:57.592Z", + "postProcessHash": "58dbd634335f69ae5a82f14acdcf801d7719d672b51a939484625e6e66e29646" } }, "74b3411891901f287f4f0295c14cd3b703d6197988dcd91f4e53985964af404b": { @@ -15846,13 +19389,16 @@ "9fe9b6ce42a6ad2189bab2836ba94c9f99886df803b81bdc3dec38815dad7c26": { "2a6580470ab1e345d52a27c96f69c6e94d335299083f18b83f4f16b1913c6ee0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.937Z" + "updatedAt": "2025-12-04T20:16:57.593Z", + "postProcessHash": "684cd30fef45b00527cf34276d31263d547c99d080bb411e047f484fc2ddf118" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.936Z" + "updatedAt": "2025-12-04T20:16:57.592Z", + "postProcessHash": "6b027add4d660f791cd652609515c4d4c5cb2dcaa61e2d84830c9a1bf265478d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.936Z" + "updatedAt": "2025-12-04T20:16:57.592Z", + "postProcessHash": "7717a3a43c708e3f13638d6bff8c0aa0f2be7c3ab0469e14fd47dd5a1ae83cb3" } } }, @@ -15892,26 +19438,32 @@ }, "07af586610511b700c76dae0ed1ab3334a12925f5399f3094ee65f2db97bfe4d": { "ru": { - "updatedAt": "2025-12-02T22:57:44.878Z" + "updatedAt": "2025-12-04T20:16:57.567Z", + "postProcessHash": "5da1407d3c90732baa956b1e01fe23433219dab183be642242e0c47e574aa23b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.878Z" + "updatedAt": "2025-12-04T20:16:57.576Z", + "postProcessHash": "149f19a2f10bf656408c2031fba812e2459c59ffff929dea4c35e440b6e36f50" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.877Z" + "updatedAt": "2025-12-04T20:16:57.566Z", + "postProcessHash": "1183a298d421db9b97e21f4f01d01257c56c0cef6b8974e5abbd6aabc6f292c0" } } }, "bca19f630581f8646ca04081842168a1d45e2ea5896cbdbab33c160594c627c3": { "aa51d28dbbcf7446d6500201eb8ea20726fcac5ea9bad09ff5db9afdf1821a3d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.873Z" + "updatedAt": "2025-12-04T20:16:57.457Z", + "postProcessHash": "24b04f934633cd338abcd7c1d59a4da2241e50eb8f763e70a5f205127d231aca" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.870Z" + "updatedAt": "2025-12-04T20:16:57.395Z", + "postProcessHash": "1381a49b2370264fe860d4bef4d729b576e853672d6d5cdcfe01e15f4cb13440" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.866Z" + "updatedAt": "2025-12-04T20:16:57.394Z", + "postProcessHash": "5b983b5fe49b6803c209bdbefc96070ef0273e8491209688cde56567c145f99a" } }, "0e3c63c854cf8f55abf51be5d8395d72aed010f11ba09ea870f1dd42d4d16794": { @@ -15940,13 +19492,16 @@ }, "19dacaa66e2b0be54bc80666d5dfdcda3f46718eaa102d310792ce9e3c21f2f0": { "ru": { - "updatedAt": "2025-12-02T22:57:44.865Z" + "updatedAt": "2025-12-04T20:16:57.416Z", + "postProcessHash": "b65cbc40e656a1470b9ae06d6a61a7b5f6c4d659cae1cfefed139d7454780244" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.905Z" + "updatedAt": "2025-12-04T20:16:57.564Z", + "postProcessHash": "902d75e3f559482a8ff0ce2277e0ffa52401905b87e42266ed1358622058c9d7" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.907Z" + "updatedAt": "2025-12-04T20:16:57.565Z", + "postProcessHash": "69d83920c99d8af74182147219d69109d40215ce4b8767cad5df5e66ce84ed93" } }, "957fef1200e01d1d2a8bc6b685146a1418c4d936418ddfe9ecb18479516293d4": { @@ -15964,13 +19519,16 @@ "cf53b09fb0c34e1e63e41a10d6bc7a6922adc30f419e11b91aa28c4b6550ff94": { "3e1547b9c7d4792eac2ae0c79197b7137e4dcadce9e0b2767bdbb3facf5009e6": { "jp": { - "updatedAt": "2025-12-02T22:57:44.883Z" + "updatedAt": "2025-12-04T20:16:57.569Z", + "postProcessHash": "2ce61454744e23274a05d6b4815ac7942d6815c8261ace32862bef2bee12f1e0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.886Z" + "updatedAt": "2025-12-04T20:16:57.571Z", + "postProcessHash": "188a83c14108290b8534023237455ddfa401b4ef6e7ed80d416661d2ffbe50dd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.879Z" + "updatedAt": "2025-12-04T20:16:57.576Z", + "postProcessHash": "fe1bdd270c33ed5d42691f074c8718ec1e3f45a302abd781c02f730de9e7b34a" } }, "b2680ed1949ad3d0db6340dcfd927b99beee508808edbd641c4f0f3589bb32ec": { @@ -15988,13 +19546,16 @@ "d85a58d074e13f650fae5bc844462e82b569a15037cf4beb81c7fc31334227bd": { "50add6bcb155ef82dcd92ed736aa66054a116b26b9fb26de8e9ff13cfb7af34e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.897Z" + "updatedAt": "2025-12-04T20:16:57.450Z", + "postProcessHash": "31401254a90e9a4ba6b81c12a19957ff66b8efa4a23d8655d68f7fe7d96c1ff9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.923Z" + "updatedAt": "2025-12-04T20:16:57.590Z", + "postProcessHash": "fb5aba312333cf2c16cff49fe32d651beb65dce5ac661fdf73dc3a9b2098fbbe" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.922Z" + "updatedAt": "2025-12-04T20:16:57.589Z", + "postProcessHash": "3096f5428c08683e4688608a0630057893887db5d08a4cccd3cba50efec1f19b" } }, "b6d6c294393317eabba321f888c00ac842ed017623140b48620c5b77ecf9538f": { @@ -16012,13 +19573,16 @@ "e014a958a8137fc765da9797a531683aae1075024018fdd2793c345a9ea2837d": { "a3692c0caea63dccb572f30b9f84021d898cc0b99e942bba8475e5cddd746e9c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.870Z" + "updatedAt": "2025-12-04T20:16:57.447Z", + "postProcessHash": "f2030f357dc5d21928ab4c442ed0cc7aac717f6678928f297a995e4e5912a4ff" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.877Z" + "updatedAt": "2025-12-04T20:16:57.564Z", + "postProcessHash": "cc74e5cec84e4d9a2a71f19472d47247e366bbc10d224789d2529175e1781283" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.881Z" + "updatedAt": "2025-12-04T20:16:57.569Z", + "postProcessHash": "964a134b825ff1b664c6ac0caff8494f2f74b2755a8c77ee0978ce76e075120d" } } }, @@ -16047,52 +19611,64 @@ }, "18448ec82189a3346ac5ecc5d92a8a578107f606339f0aba3af2bc08a257d424": { "ru": { - "updatedAt": "2025-12-02T22:57:44.912Z" + "updatedAt": "2025-12-04T20:16:57.566Z", + "postProcessHash": "1003dde3ef519318fe17ff3a5f9d7dc0831e4bce3cb6d0adeb302dce50c603e3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.913Z" + "updatedAt": "2025-12-04T20:16:57.599Z", + "postProcessHash": "9a6e74d157e3ef2377720bef046153d7a8c82d723edd72a65fb039394fefb67e" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.914Z" + "updatedAt": "2025-12-04T20:16:57.598Z", + "postProcessHash": "e35f79ec6e9e4aac6d91cb8fa0fff98981ac260e6bf44da146f71550edb7bdbf" } } }, "02fec6942d40034d750c654d9c675a575f12b3a87ec90a6e3786281d265a9b29": { "f8983bc303673b5b9632c8a2f95602dd3f90803ac3e493ee4ff7244ea4b98790": { "jp": { - "updatedAt": "2025-12-02T22:57:44.952Z" + "updatedAt": "2025-12-04T20:16:57.584Z", + "postProcessHash": "3e60d91065001f6e17a773783fa77ef356ed382d29f6d5e34a40edf4972f28bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.953Z" + "updatedAt": "2025-12-04T20:16:57.584Z", + "postProcessHash": "e2870d4616cc270d794bd62520f7e08ed3a170667b50110411a2b00528730e4e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.952Z" + "updatedAt": "2025-12-04T20:16:57.584Z", + "postProcessHash": "7824ea90fbaf03f67e14c4cbe1b6e44286943883cc5c866377e0a0c069ff0d6d" } } }, "0393512198efa57d46b32a113a35375ccd26518fa34d3bbabef4214d4fb8b53a": { "8103e61160aa52995bd2806ebc1f5871330feb5a4b2c8de0e9221fa8a70d1ac3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.969Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "2709dc57a05ea43c2290d862b72d1372b6bbc798278868ef8d98eec7f783e352" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.974Z" + "updatedAt": "2025-12-04T20:16:57.652Z", + "postProcessHash": "8e846e7c412d4a1949fd30c043582de544a54c597a24c852d44c1990bb846928" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.968Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "6574bc502e943aa9a1560f72ac5b888a9c829d19b23d5aa7a04d8079340b8f99" } } }, "0c5a65f577c71fbc834405efc189e3c50da0f84a64b7f1b1ba76d9fa8e7a3e9c": { "2d31634c588cb2805bebfc13a4cefde978ae8d078f32a88954c1ee076a081d1e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.954Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "b43498141ea471dfd51aefc5c5f31c91ded72964674e91dcaf2d160d7f483fa1" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.949Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "4ae9c38bfff868de076cd1b81a7d76fae7ea7599eee6c88276a9d1fd6b52cf72" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.954Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "6347eba96c167f12ec493abd68b3b4cd4e1dbd24405f54daf5b42f156410cead" } } }, @@ -16110,39 +19686,48 @@ }, "734cd9155fefd740d8d08b0c076d7ff45e66e547c017603481aa8c2733e38771": { "zh": { - "updatedAt": "2025-12-02T22:57:44.941Z" + "updatedAt": "2025-12-04T20:16:57.579Z", + "postProcessHash": "a25e2d85dfb298991468ab8b7de56d743bce6d5d0af3a06713f22317ae5140f1" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.942Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "d490510cf4311532313f89d50c42ea55246f32ae1b3b8f7f594842387d13e8cd" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.965Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "9b712568d10f607f8f856ed264ff9cc47a645868d117f9b45f6179d21aa08da0" } } }, "16ea5fa75d5d08e032a72f3d2f70dfde100b84192a3a87d58596c7a636e73d4a": { "08b83c6534ed2ed43f2e271298926bbac6bd7c4e552372271ab8f870588ce545": { "jp": { - "updatedAt": "2025-12-02T22:57:44.953Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "637b043ea75b061ec56f2e0c6ffec2ccc66175cdbe5eba33ad7fea95b7429cec" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.949Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "45d4fe7ad00e9c2042bc777467be0483492c8e69be5490be29e8c3187cb7fb74" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.955Z" + "updatedAt": "2025-12-04T20:16:57.587Z", + "postProcessHash": "51c268d231a10015a509c2e87ac5f3443c767a72fac17a57ce1ae3cf81ec7915" } } }, "3db136efc6f866a03d8de23b61d3a9177ba22c082cf115237fe396e7ac9e917c": { "f41c454a9d7c667418cb2226d0dba5f50dc0d9a0d8d91fa03d85e6fb89852843": { "jp": { - "updatedAt": "2025-12-02T22:57:44.908Z" + "updatedAt": "2025-12-04T20:16:57.565Z", + "postProcessHash": "eb4800db01f105d4702bb04a8aacde77c099ca5af2e30a61d219a4e7f8b4e57f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.902Z" + "updatedAt": "2025-12-04T20:16:57.546Z", + "postProcessHash": "795c5e6265166ca294a9cbc9f7e98445bc11f9b1eab55b129428202cc70ac784" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.898Z" + "updatedAt": "2025-12-04T20:16:57.459Z", + "postProcessHash": "53eec29eeced600ab198b2fce0d200b18118c83a047d4dcc98ed76af35c25fee" } }, "bc07f19137cb6dabf76e068e43903d8f0c0d4a5fd3ef5e4c48ca713d51eae844": { @@ -16160,26 +19745,32 @@ "3e1a6a2d9604853fec0f6b9c21e1534bc36ba5880d4042f71f1d9a03ff9e0c74": { "50a43ff5465e5ed3b333a2938abb5b5a0fe5d616b29d9f1176535339c755b45f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.943Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "5a0705618a01eb3b4a560325668cb43ca04563087260896a31eeb025ca6f02d6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.942Z" + "updatedAt": "2025-12-04T20:16:57.563Z", + "postProcessHash": "0b026ba47a86d82886a082357463377404f5e92f5eea6770e50622990f745dd0" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.944Z" + "updatedAt": "2025-12-04T20:16:57.581Z", + "postProcessHash": "89e9f6a2cdc98325fba5f0443f7f556668c96c5584fe2ad96f3a6d182b2a3322" } } }, "3fac7c056527b2656c82dc06a3baca18af73cc6545b9cbc229c03280f867b18b": { "a946c1d0dfa7b8e0e8021716ca512f6f92c6c72a0617c7843fc9f56182195582": { "jp": { - "updatedAt": "2025-12-02T22:57:44.904Z" + "updatedAt": "2025-12-04T20:16:57.560Z", + "postProcessHash": "971dc060decc336bdca43a61598211065f5020d06c67faeb55effb8215883783" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.920Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "c3e98a59712656d8ba2955f90171272a5c02cd6e19efbbd3aebb26205829c40e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.921Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "35a2b62726e14991f5c58824534b1f55af3f55295d0fc1ae0f67ed9bfc8dbace" } }, "955b894f3bdf01356a56ddde8e74375c28f36a9ddacf9f8cca1d929b82dc3c8a": { @@ -16197,13 +19788,16 @@ "5be58ce97a5c915ff2d4f6bb0a603580ec8a37cc97e4e9b54ce41df65adbfd1a": { "e6df66dfd56c3a5f3a55edad9124b2138f3c2c2e0aeb7207d62e987015a6ea34": { "jp": { - "updatedAt": "2025-12-02T22:57:44.938Z" + "updatedAt": "2025-12-04T20:16:57.594Z", + "postProcessHash": "f7fb02df5c0da10a8686c9777e604e79d092494124cf3bae3a1f931a28ff4b3f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.938Z" + "updatedAt": "2025-12-04T20:16:57.594Z", + "postProcessHash": "7ea2f561365da54c629c39a5bfedd424e50c5631f96abf7992d79d763486c786" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.904Z" + "updatedAt": "2025-12-04T20:16:57.560Z", + "postProcessHash": "ce3a40af1be83d79bd86d75fd2f91b8babb223a386c9f67bcc0a9b551f96440d" } }, "8c7e75be7714da47b7c687d7067a073e9ba05d82b6595b598a376227cab0ee4c": { @@ -16221,13 +19815,16 @@ "5eb08e96fd1bc79722d094e6a779abcf8a842d610d831653012ca3687bc9f9d7": { "abb8bce9e0621c2ecf30f9505bbef2d716584c9ef4ba432a4e47ade831354eea": { "jp": { - "updatedAt": "2025-12-02T22:57:44.958Z" + "updatedAt": "2025-12-04T20:16:57.590Z", + "postProcessHash": "588890dae19b3dc964f3d11131386b9d7fb0cc599d1cec1214feebf071950a5f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.957Z" + "updatedAt": "2025-12-04T20:16:57.589Z", + "postProcessHash": "fbd9c968c151f7936b86c264bc84dc75f2a64498b3bba499fc03e762e8dc1c11" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.956Z" + "updatedAt": "2025-12-04T20:16:57.587Z", + "postProcessHash": "9dcced3e8753b1fc29339eb17be589baa787bb747ac9cba6da4457f86521448e" } }, "a7faed462f96219d02aa7307ac7bc7935bca6700485c60f90a7438b05da3f66e": { @@ -16267,13 +19864,16 @@ }, "4daeb6b23031febdfb1f2258c7c2480bea8af82a83a813aaa85011a7c02617ee": { "jp": { - "updatedAt": "2025-12-02T22:57:44.947Z" + "updatedAt": "2025-12-04T20:16:57.597Z", + "postProcessHash": "94fbca2a335294316cd92d2bb4df8492711e02b4b4ff1b47a8372fe27bf1f3b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.947Z" + "updatedAt": "2025-12-04T20:16:57.595Z", + "postProcessHash": "b6e65c5c53c1be3ff4204dd51388094bd8fdd108dacb6523c9a51c8c5cbbc3ef" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.948Z" + "updatedAt": "2025-12-04T20:16:57.597Z", + "postProcessHash": "f57fb22c18eebeb60276974bd48d570560679875944c86f6caceca399649131b" } } }, @@ -16288,18 +19888,35 @@ "zh": { "updatedAt": "2025-12-02T22:57:44.935Z" } + }, + "6baf22b6178fca69ab2233a6185e1332af3dc95190818076a2f12b4b73a673f2": { + "zh": { + "updatedAt": "2025-12-04T20:16:57.600Z", + "postProcessHash": "c4d182944099cfa257e1b08feead9ffc3da7835e6bd603b6d6d6082d4405ae2e" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.600Z", + "postProcessHash": "a66e5258c11a392db59a7afd9f75f6e99b7fe6e4706d905a04c54736f906cd80" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.600Z", + "postProcessHash": "46610f4d9e525108e0657f77687e25efef1484cbff8256fbf3a9838cd8031cd6" + } } }, "98a2ade41bd4731a099f797edd596fa78793203056404b35f51d527b8f16a36d": { "ab4ab861bb9e5302cd16890ba55359555f2b962726d92b750a912cb146221d73": { "jp": { - "updatedAt": "2025-12-02T22:57:44.924Z" + "updatedAt": "2025-12-04T20:16:57.591Z", + "postProcessHash": "9c36633e4281107e9db95736c6e1dd8cace26ee02c15f336009b2f5310855e9b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.898Z" + "updatedAt": "2025-12-04T20:16:57.460Z", + "postProcessHash": "9f5c7a03c93953d2fec624c45b14c3ad86d1cd83d14dd7666316874df60875cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.917Z" + "updatedAt": "2025-12-04T20:16:57.586Z", + "postProcessHash": "b6bfe7e3cf4e93efb7143387c7ee34be6a0320586781e4d40a6c4ee4dd65a066" } }, "c6165f3ca7b3425b951b4a1571401725d547adf52dc6626c445215fb9218c8f1": { @@ -16317,26 +19934,32 @@ "98c18fb7bc391069017a8197ca9b4b5c5a8218b2cc79f1210a3aba08ce470c6c": { "81814115cad79ea901cacf1a4876697b9b219a7ce07476d4edac8f5cfb5017fe": { "jp": { - "updatedAt": "2025-12-02T22:57:44.963Z" + "updatedAt": "2025-12-04T20:16:57.595Z", + "postProcessHash": "9c9d2c22e8f96283914a1aa3c9e3b962711f10497ce1f5e6debf4c3562ece170" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.962Z" + "updatedAt": "2025-12-04T20:16:57.596Z", + "postProcessHash": "a5cf88c06c16151ea880166ba833d84d23d4c171273266cd5fdb7e2ef98eae3c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.903Z" + "updatedAt": "2025-12-04T20:16:57.559Z", + "postProcessHash": "c9a76014fef5ffccb1cddd49e6ad55227871e79c065d22a4eb035f889ce9dda8" } } }, "a005818ddc766e865af1ed1831655cacf269df57fe20666970c25aa1d089714e": { "a21353177e0617a8a2423d2339da6a2715dd92734a60da91f44354edb7bb6df3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.925Z" + "updatedAt": "2025-12-04T20:16:57.591Z", + "postProcessHash": "7a0b812c32d2b502a1a9814574908f4967f6f73547ca97f55bfd15788c70819e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.909Z" + "updatedAt": "2025-12-04T20:16:57.565Z", + "postProcessHash": "680b8b14cca1d1e74875907615016a1a70d845b6f9ac965f463746d2d1256d8a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.935Z" + "updatedAt": "2025-12-04T20:16:57.592Z", + "postProcessHash": "0c5069d6624ba2441d3756c401903b94b4f0adc1f6b9c33a4cbbd55a940c0a5c" } }, "6b17506439ccc3610f6d989dbfd30e8ceef573be8f80fb3230ad3a6b4a276542": { @@ -16354,13 +19977,16 @@ "a5f04cc970babcbd17a73219fd4d3f1d299602d839f96c355b2d5ca53d5cee5b": { "23193e28103b0087a2e3c305ef54301cdc3541ea5b77ce29af8eb04a17fa6f4e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.897Z" + "updatedAt": "2025-12-04T20:16:57.452Z", + "postProcessHash": "fa4a09c9cd643edd4450462b32be7b5ab038ed1230e1cb8d0ff68c40edb641e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.903Z" + "updatedAt": "2025-12-04T20:16:57.559Z", + "postProcessHash": "84d2b1b9e4f778b2bba2dbd55afdbcde6a5a407f8b9969b949ba4be776b761a7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.920Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "be76dede8b617ccbbe433ec0f13b2733f9a35b6e11ec2027816e492096897469" } }, "e965cf783f03102b23d52203220a90ea4ad4eeda8ea356dc2888850e3a1ee83c": { @@ -16389,13 +20015,16 @@ }, "b5127f46fc8d65c84a16ddba167f617184476bc3c5db98f881b3968229929e19": { "ru": { - "updatedAt": "2025-12-02T22:57:44.905Z" + "updatedAt": "2025-12-04T20:16:57.564Z", + "postProcessHash": "1f6592afa5325d047a92d4005b0e8954eea03dfd0bbb1e854273552aa3519b79" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.908Z" + "updatedAt": "2025-12-04T20:16:57.565Z", + "postProcessHash": "80742ca8aac8df33e9190d562858c19349d861d37a78462cccbe9c3e77089ede" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.909Z" + "updatedAt": "2025-12-04T20:16:57.597Z", + "postProcessHash": "df9346336aeba936833c6ef50938380f2563adbf96e760e1669caa2c5e57179a" } }, "8b70f77a580ae511ac0bf35f88454f6df63aca38b1be27503cffe5bd9b3b0d0f": { @@ -16413,26 +20042,32 @@ "c43792a75d02793708f0f9c298dd1e81a2db715e26bb86c9a3a5e14f34e785c4": { "76526beb43a3126f9cd6e8837bdfd7a2b5b294aba899560796a163b8963fb64c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.960Z" + "updatedAt": "2025-12-04T20:16:57.593Z", + "postProcessHash": "cafcad036089c03679ccb094517a20ba63b22b4276a4fd5c4ac88148e22af841" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.960Z" + "updatedAt": "2025-12-04T20:16:57.593Z", + "postProcessHash": "5cd145a6775e7b9ae1d044606d92ed13830db6f9f0b743c0697f9b9e07111c00" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.961Z" + "updatedAt": "2025-12-04T20:16:57.593Z", + "postProcessHash": "75165481bb165e4bdcda4a00a71ee5986e4c55ef64a22e01ed0d279f8b381fcc" } } }, "d34da7fe047d51a78fb46b67d8b9e61beb3565db263ce630e02ee4fd1c3a415f": { "de2fd7fc7b86f2b80b1c08bd8bb73d8dce37227d7b14b5fc21508b921ca7af10": { "jp": { - "updatedAt": "2025-12-02T22:57:44.919Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "5acdab8d074b694e772e4cd5d8eb974d4b3acf50b77763fdc8c77567ca8b63d3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.922Z" + "updatedAt": "2025-12-04T20:16:57.589Z", + "postProcessHash": "29f2e9fa77e2de07f4d5d4522e576af9ee7cef1d42e12671762ccd317c943d4b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.918Z" + "updatedAt": "2025-12-04T20:16:57.587Z", + "postProcessHash": "72fd06feea9218b99d6701cf7385f2b2e5ca4210a36ac8efcc028281cf80290c" } }, "9b5a33767927dbd5b8d2768daa909d9f65bb2a2f716af808a8f3eb55f623603a": { @@ -16472,78 +20107,96 @@ }, "15560cb3fa6f0a4e7ae1fd51eff8646af3ae08a98e760dfc0c6787d702072f0f": { "zh": { - "updatedAt": "2025-12-02T22:57:44.950Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "51c4c210de3569e62af44e4ff7e2611be9137661c3b75a6d6af2f519add5d372" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.951Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "cc853f29620b9879c9bd1555c249189a52e70b02f5b3b01003b8e020bf81b42d" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.950Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "c41f48194a047e16653c0b18c7acaf83f43af9f3967548629cbf4cb39546a388" } } }, "ec512023c5c9e376d6b5a73b27c72543c329f1031a88df1142e318e852a6f5e1": { "e3ddcb697170bf6b9accf7bd9484665c071ebdf44c1609b8c1f4a6ae733dd1c5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.961Z" + "updatedAt": "2025-12-04T20:16:57.594Z", + "postProcessHash": "67ba59176cdbd1bb4f9d12293d544c7a5a5377e00b65b68d126c53cc53ea94c6" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.960Z" + "updatedAt": "2025-12-04T20:16:57.593Z", + "postProcessHash": "955f7d4eadb160c93d9451181919f2145c7a76c46d6f5b697a0ff93b2d594dc3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.958Z" + "updatedAt": "2025-12-04T20:16:57.590Z", + "postProcessHash": "849c0eb47bf6384d9102f769e0be3a20534c3a99219d1350ad57e1d77e26bc1a" } } }, "f411f73869f1597bddd3a69a70dcdf627b2f48802b68eb1297c49cf998a1d590": { "6c152f17b58caad6637a04e4d427aba059026b111c90e5aa764f040e05e669bb": { "jp": { - "updatedAt": "2025-12-02T22:57:44.916Z" + "updatedAt": "2025-12-04T20:16:57.570Z", + "postProcessHash": "331c38927c19c9407728a6943bc5c1100800089fbcec620f2ee27b2c0dbaeaaa" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.916Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "9bed1592f00e7bf18dae387703cc34eac541819b3a8c588ad993153015f9f8ee" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.901Z" + "updatedAt": "2025-12-04T20:16:57.465Z", + "postProcessHash": "d82b0bd7ba21ffebae0abe4edac136a9d8ceae2ef8fc706a7d7c3c2019c4bfa5" } } }, "0bba267be6ffcbb62a544c365f5d2cd85d6371c78dc289e5697b0225352a76ea": { "95f85b7c7a43494a5f08ae259de69c8952afb7851b1d9a887ad3107d5e6cbc01": { "jp": { - "updatedAt": "2025-12-02T22:57:44.994Z" + "updatedAt": "2025-12-04T20:16:57.656Z", + "postProcessHash": "5678792d3c8237068a96083cdfda49285f99688cc50c0db739e579a0640c30fb" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.991Z" + "updatedAt": "2025-12-04T20:16:57.653Z", + "postProcessHash": "ee7eff3af93a21d39396b021afd96e657d0c5cb774c6fd4adcde1ca1d4073940" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.998Z" + "updatedAt": "2025-12-04T20:16:57.660Z", + "postProcessHash": "04bc6b4e2abc3728a804964a2237a67bda8a38309da09809b34eb9b8959767b1" } } }, "12f796f4ae9f25130a8cfc11aff488171e7376f25404278d4e5c173c8bf9ed02": { "55069f671a99d799cfd16eda4312b66b5a321376cc69b52c58ba054f313fa404": { "jp": { - "updatedAt": "2025-12-02T22:57:44.971Z" + "updatedAt": "2025-12-04T20:16:57.646Z", + "postProcessHash": "b074d55b8da5114626369121cc74820fdb214ebd596eae0dbb3f06c730ac5a2b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.973Z" + "updatedAt": "2025-12-04T20:16:57.650Z", + "postProcessHash": "5a746eb1b76fff9bbed5d64080534b643b7b11c2dc04a7ee31203a4738cab598" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.971Z" + "updatedAt": "2025-12-04T20:16:57.603Z", + "postProcessHash": "61553ae737c53f26871eabf42e1fb78d9b7c3529cdd25940d9b4d7be23de76c1" } } }, "16c87bceec945d0aeefa7e76d421913b507e3b04716834b3894e9fd3174d2613": { "b43921e7c1caab150d19b0823696bd909b5e9b9dd41fe7847acfc9dabaec0942": { "ru": { - "updatedAt": "2025-12-02T22:57:45.005Z" + "updatedAt": "2025-12-04T20:16:57.692Z", + "postProcessHash": "8085e509478f7334aa7227e7769468e5d1ba68f27b4b3e6b040d2913de5732b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.005Z" + "updatedAt": "2025-12-04T20:16:57.691Z", + "postProcessHash": "d6150b5f43b94cb084e8278e06756702bf513b2ff5723bd6c90e3e02d39eab32" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.004Z" + "updatedAt": "2025-12-04T20:16:57.691Z", + "postProcessHash": "8d30a943dd7049ba0ccf50151cc90c3b2cdafdc6cd8c25bdbeaadb833b1914d6" } } }, @@ -16561,26 +20214,32 @@ }, "7ac8d25006b0218725310bb4f50d2afa2fa76b42500a9587fca779027db7c47f": { "ru": { - "updatedAt": "2025-12-02T22:57:44.945Z" + "updatedAt": "2025-12-04T20:16:57.581Z", + "postProcessHash": "55822944d0ca3d818e7662491756b4ebc2b83ad6fe9ff74ba3aa6e3bda5b2f79" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.946Z" + "updatedAt": "2025-12-04T20:16:57.581Z", + "postProcessHash": "612cb0291aa25af65af9656d1d18506ee966100817edef81c1dfadde2c965110" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.946Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "f71b29640bf00a366ea60eef418ae0caa20005e98396a651b7c2d3bdd85f5156" } } }, "2644c145de6d61cff7556d3efdff355e849b2f38b5c7912fbc2eb07360771f61": { "0e301628684a655bb2d5641c57775c3259b037ac338372d82808d6c91cacbd8c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.895Z" + "updatedAt": "2025-12-04T20:16:57.415Z", + "postProcessHash": "43f5173f581b4acf5a4e070befc3ea31210b152647821901d80eb06023eeef1a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.944Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "c43a0d811d7beeedae7799e5e8227e4997a1f35927a0cc7d7be4153897f0d1b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.943Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "dc513c2b487d9c80c1790111069c8b4d9d0e725be3b3223eda84db3cf6664344" } } }, @@ -16609,52 +20268,64 @@ }, "e28902f12acecd10b62cf9cafa9cf0227aea3ec77ca8326732aa46b01a0d4b12": { "zh": { - "updatedAt": "2025-12-02T22:57:44.969Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "fc0fdf9d5c9d6811268d6026c31e5f95cbb506fcea4a8efb04aafb9226d0ba90" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.970Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "db434e2534ae8226bd3a619adf6b42ca3045e9ca884388eadfb596ccc9d09c4a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.970Z" + "updatedAt": "2025-12-04T20:16:57.603Z", + "postProcessHash": "58061eca5b8c9473c60118eedae63fb225c50aba6b05ff5a2d83447f1b328430" } } }, "337fa5ffda5b1ce15febb15e28d78f509b83dd0442c0eecb4e5fd5ad01cee570": { "8ad0cc19f45e168f3328286b8c922f25ddb3753ff16efc3a1795161778bbea66": { "jp": { - "updatedAt": "2025-12-02T22:57:44.979Z" + "updatedAt": "2025-12-04T20:16:57.687Z", + "postProcessHash": "823a60b3ace40c2800fc852e592988f75c15f8e313fa216d6aa3be8f29aaa8b4" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.983Z" + "updatedAt": "2025-12-04T20:16:57.691Z", + "postProcessHash": "dc0e2a44e5d9c51aeb0571d7216cb03a68dae0ebba3bada7c51146ca7cb57a1e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.941Z" + "updatedAt": "2025-12-04T20:16:57.579Z", + "postProcessHash": "83dcbc9085d4dec66330d0d0ba2833465b52fc629dc1f333709fe07edd70e758" } } }, "3a39c3cb40c4a84e5848358c7bcda5a305e64fba4846580eecea963760143cbd": { "1a63ea8e13a6c3989444c8189eb5c95920d36ded548a2cbb106db39f91e17f56": { "jp": { - "updatedAt": "2025-12-02T22:57:44.976Z" + "updatedAt": "2025-12-04T20:16:57.655Z", + "postProcessHash": "27e8ec3a420a04a66d60fed025534b09a101288941296a8b96033153e442282c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.977Z" + "updatedAt": "2025-12-04T20:16:57.657Z", + "postProcessHash": "4573361ff48146216a20fdca51ba2f638d4ee6b572ae0ae4f5fb624f1b17e978" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.967Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "01534d33f7b61ecb12f83039d48e18394593ca38993aa3b05d91213dc48871e6" } } }, "3b2a0db3103ecc795ff82061e46875995689dee845c28a19697c2e8b7d78fb8f": { "84bf17e2315c270d4f26795807428c5ef311a937dd6e53a4b6f3a8e26bf5e771": { "jp": { - "updatedAt": "2025-12-02T22:57:44.973Z" + "updatedAt": "2025-12-04T20:16:57.649Z", + "postProcessHash": "effb6f58c64072592c934d47c27ff8ac63b1375e2b9fec9654852bfe6bfbee4d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.971Z" + "updatedAt": "2025-12-04T20:16:57.647Z", + "postProcessHash": "16bb312a94c73f8bdf7fcb1b1e5fca2dc77ff3bc920fac25d0e3f4f39c713ca3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.972Z" + "updatedAt": "2025-12-04T20:16:57.648Z", + "postProcessHash": "6046957fbe55f85753686a109f9b0332ea3faf14b2b00ae43adc07ce9983bc1e" } } }, @@ -16680,226 +20351,291 @@ "zh": { "updatedAt": "2025-12-02T22:57:44.946Z" } + }, + "2a954aa5b3c6ae320f0f704cc2dbd8490f0b40f5d2552a4ea0cc0a372ce7f660": { + "jp": { + "updatedAt": "2025-12-04T20:16:57.670Z", + "postProcessHash": "2980acea06f3537995f6e0c8861d13a6193eb0e742161df85f41ac4564876f2c" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.670Z", + "postProcessHash": "ac15b162913a27353a25090a993ba54a8eb99bf06561897c2c2f7a332a04dfdf" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.670Z", + "postProcessHash": "8242057c08e9b60c33c698b4afdb1f138ee4a5572af0b83e8f1eadaa1e17c34c" + } } }, "548882c1623ad246688470b47967ff13ad16868ecad4f09349b0182efc755985": { "76fc9813a272dfbb6dda3bb0096c7a2eeb3bf0a58d344e26c115c075a8cdf7d0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.982Z" + "updatedAt": "2025-12-04T20:16:57.665Z", + "postProcessHash": "8cce58c225ae7057600cf7811ba5039561dc3dba9003ad8e1557d007fb9d1788" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.980Z" + "updatedAt": "2025-12-04T20:16:57.664Z", + "postProcessHash": "359450fe5159865ea64944f5be4fe90ec28e8e166b0329e46444f60f07b82988" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.979Z" + "updatedAt": "2025-12-04T20:16:57.663Z", + "postProcessHash": "30cb2b636cea84225dfcb326501229b458b95d9a02cd03ccdd80776af413c94e" } } }, "7d0d5bb4af482b39457517f498efb3ea46e33dd5a7cfdef6d18b2beb02e7dc2f": { "a7f9fe4cf3ef806d2de5eb8e1f83c674deb8224319845c32faa2c509b9f0d89a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.978Z" + "updatedAt": "2025-12-04T20:16:57.662Z", + "postProcessHash": "1bd505b41826ca42c331d80a284b9d07e1c02cf19b9067da463bd6c319ffeccc" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.981Z" + "updatedAt": "2025-12-04T20:16:57.664Z", + "postProcessHash": "adffa03c1dae0e693402b1cc267b4d76eef7d004064838606a4ff7edbb68465b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.980Z" + "updatedAt": "2025-12-04T20:16:57.663Z", + "postProcessHash": "2a235fab93efe01442f4ab5896d3e7da4bea146ae3ef1884ec31f13fa0fc63df" } } }, "819fce8b1343a94dee6cf3e428f8d46ff343c43b0d83b49efe18129ccf292430": { "af1d949b76a7c871e4cdce3092a3b2e2b1ea6afca4c4788054f8ff3eddde3ea5": { "jp": { - "updatedAt": "2025-12-02T22:57:44.968Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "2111238a4c2eac9d3710a50dc755c6712c30b4cc92349904670129bfec67cdf2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.973Z" + "updatedAt": "2025-12-04T20:16:57.650Z", + "postProcessHash": "ffa97f7bfb7595960abeed29a597ee8910dc31a345726a97b75f4c4dbfb1f34e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.968Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "355fb666a6c6405d55de2e318eb8a04edbd84ea2f10e6b38e0b9f814cbc3ebcd" } } }, "8756460c34802f52ffc72c46fd775666b61d2134d4e3d1de0bf4111a5a049571": { "483cc85982240fd19d9aaf9161c58f6f4b1f2cdf226fb60169450e02caea8384": { "jp": { - "updatedAt": "2025-12-02T22:57:44.977Z" + "updatedAt": "2025-12-04T20:16:57.658Z", + "postProcessHash": "7f8e52ff1a3dcf6fc66cce399a07e85b4bdead3cd2e148abe4f0597e1ed2f33e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.969Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "491a5d945987c764f0fb168d1e21a4c81ca3feac1fc95ec11a46924a097d7948" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.976Z" + "updatedAt": "2025-12-04T20:16:57.654Z", + "postProcessHash": "802fa188bb9f3cc9b7dc3bb4a776f930be6a6f6d2c4ff2c2f3d222d50f38f6c1" } } }, "99effff387a3391b66ab69348b19106aa7ae02149e5cdda15d9bd9397ddf4c41": { "635055619056b153a2e20b6a09345d76348336b24340ba32f815de9c85a7f2b0": { "jp": { - "updatedAt": "2025-12-02T22:57:44.899Z" + "updatedAt": "2025-12-04T20:16:57.596Z", + "postProcessHash": "155d02e31aa699a4bf250a7fa2f462f31f42ca0820d312441625b2cfc79ab72c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.957Z" + "updatedAt": "2025-12-04T20:16:57.596Z", + "postProcessHash": "8928fbe9cc2ef1d8bf4295f54bdbef37b404af0d5332f5a006975836054ec49a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.959Z" + "updatedAt": "2025-12-04T20:16:57.591Z", + "postProcessHash": "aff2ee71adabeb9f84d468e926f6611825cd55ded277310e804875b9f697aeec" } } }, "a0698fed4f549f79afea06d1dac666b108eb6f5dc2fd3c91aff7c13f9d635593": { "7bafd49c863eb3620b55c3516c62d1d11ad2c81b1333e7396261c18b3d55cf9f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.959Z" + "updatedAt": "2025-12-04T20:16:57.590Z", + "postProcessHash": "6ab413cc5056660213ec38a9c0436d51700f7e1c81fe111f5a6e95f06a7185b5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.899Z" + "updatedAt": "2025-12-04T20:16:57.462Z", + "postProcessHash": "a6bc3718d40d1495077fc414d0af57917737294004d0d13750b0dd7f1142317d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.957Z" + "updatedAt": "2025-12-04T20:16:57.589Z", + "postProcessHash": "ec1f0e9dddf6d57b44fb57adbe015332c36fea7ef97e5b0cee80a85cec615106" } } }, "ad1402ffed17fc7c6fda3f600f70cf8e3bbe5384d766081c16c2c90b4a775b7f": { "623f2f8c2f6006597fa106e18afad1304117a0a599684c3050b5f92f433dadf9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.977Z" + "updatedAt": "2025-12-04T20:16:57.656Z", + "postProcessHash": "a8099cbe05a556cee64ca9cfbfcb2a769035b393cbaea0cd8a1df676563bcf30" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.975Z" + "updatedAt": "2025-12-04T20:16:57.653Z", + "postProcessHash": "dd5f9a77ec575837021212f3420e35f4c7e8932221b6a4699858395f5a9f0c5d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.965Z" + "updatedAt": "2025-12-04T20:16:57.580Z", + "postProcessHash": "13832ce732ba5ec83691768de15e7b247a8e1a8102a1fcc0d5af5938156c7365" } } }, "d6127c27c939a8143d6bd93d737c445238b16aea350cd52caa535082aaed407a": { "af21361ca18f3026c0fcb3b223ce74e7a213c2e9016d2f7596b5103f9f243027": { "jp": { - "updatedAt": "2025-12-02T22:57:44.975Z" + "updatedAt": "2025-12-04T20:16:57.653Z", + "postProcessHash": "75e1f3c22ab847c7e3575736f7c1868eac241fc599624409e3fb4e5984abc3dc" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.979Z" + "updatedAt": "2025-12-04T20:16:57.663Z", + "postProcessHash": "8280e955de9114057e63a8a6e0f35e0d287b8d26ae811127409c306545d0ef30" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.982Z" + "updatedAt": "2025-12-04T20:16:57.665Z", + "postProcessHash": "fae1838129385f45bdc786961ff08dec1bcfd358cab84e352d6e92eba0fc88ee" } } }, "d679b331b013310d0726e18cff38795d35a48a549ce862414366ed5d37b17a5a": { "6884d15ae61a9e31fa06e9f6cb793ec44513338525d28650cffaeecfdfd55f59": { "jp": { - "updatedAt": "2025-12-02T22:57:44.899Z" + "updatedAt": "2025-12-04T20:16:57.461Z", + "postProcessHash": "6fad68bfb31a3c2ca29f1e025448d23d9e9154531047abb5c103e42cdc589e07" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.956Z" + "updatedAt": "2025-12-04T20:16:57.588Z", + "postProcessHash": "43edba1e1f956f00ee97d6deab1b145f1c29b51ab928f6a7141ed7a09f9f87c0" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.956Z" + "updatedAt": "2025-12-04T20:16:57.587Z", + "postProcessHash": "70ae0a9bc13e40156ab0b8c60a1c033b1092a6574b84c4fb7aee1ff34653d910" } } }, "f23d1fdbec8862f67bc4eb334787e78bade64fa097b14c338abf676e73a1ca62": { "0206d00d56a0c7be7a356c6499d1bc4c3b24602fe48380f49f1a8be277e30ae9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.953Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "a19ccf16a8eb432296f4f6b529b37701079feccb4c928f19b9bd86592276f006" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.949Z" + "updatedAt": "2025-12-04T20:16:57.583Z", + "postProcessHash": "60aae17087c8fac0d6d4dbfbd88aec68b34510677141ec3770aacb3c7882847b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.955Z" + "updatedAt": "2025-12-04T20:16:57.586Z", + "postProcessHash": "94a1fe079f2fe3282fc342bb4eb96117cd82b95ce260bd0a486f67e1545917d4" } } }, "f5d22ca5e2a60035bc7b1c39046c553ef2238bbf8c611bd22963a3cf3fe67663": { "9a33263baf26f23ddc1d61444b9f0bc17fe15f0d44c6aa520661947f7bc28d34": { "jp": { - "updatedAt": "2025-12-02T22:57:44.953Z" + "updatedAt": "2025-12-04T20:16:57.585Z", + "postProcessHash": "1dce294b31e5ac04b1fa6229f4cb78612a5af6accd91c4d81667343df2aa5f66" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.951Z" + "updatedAt": "2025-12-04T20:16:57.584Z", + "postProcessHash": "12a7b6c433b9a2774024b007c4971c28c0a48ceaa69433d48c1f79b5f55680c7" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.951Z" + "updatedAt": "2025-12-04T20:16:57.584Z", + "postProcessHash": "842264c8e329a99f215715cd845aa0c3147eb003a3db2f684ea4ab4389daa0dc" } } }, "0d3a0a09b86406c2c362ede819ee030f9d92d058939579cd1229e361973022f8": { "9fc104791c743a764dffa282d540ca4365e02a6a6590d6c336de81ff7f63da24": { "jp": { - "updatedAt": "2025-12-02T22:57:44.964Z" + "updatedAt": "2025-12-04T20:16:57.579Z", + "postProcessHash": "fcb325802f33cb167fb58f7a2f88d3589f1f273684080eb0f8fcb8d94db852de" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.000Z" + "updatedAt": "2025-12-04T20:16:57.684Z", + "postProcessHash": "d500b36ee678669ecfdef7e942677cc87111e09eedcadb2e52aa4f4e2e28010e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.001Z" + "updatedAt": "2025-12-04T20:16:57.686Z", + "postProcessHash": "3958b45d33bba163ae8a2c8b7bce67fdc96478e6d1caca94e6b0208f8aee0b66" } } }, "14364235369dc388419efc9e290886ddaa202d5023e8adc55d75a61c89fc336a": { "328695ec26f7fc60b0c8aec17edefe2b5cd222a635c116a01ed4259436be44ae": { "jp": { - "updatedAt": "2025-12-02T22:57:45.014Z" + "updatedAt": "2025-12-04T20:16:57.679Z", + "postProcessHash": "33928766b7939bb2ef4cf1f8f8c73ed086e743826edbd7a925e686b3c9a57c59" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.020Z" + "updatedAt": "2025-12-04T20:16:57.683Z", + "postProcessHash": "2e364faee5b46c247e85daaef3d2b437f56c616ff10522eeea43cda911806d20" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.021Z" + "updatedAt": "2025-12-04T20:16:57.684Z", + "postProcessHash": "1563a8e89e250ee1c35c865fa941b64adb9af53b11e29075973a1eb60c057193" } } }, "14a65362c725c7a0fae1767f0bdaecab08516f4549961fb82c9b0d3889476e2e": { "4b5208315e755dbc3f295c8a58958e452a782c2f41e4965b7aaafc2ecdf93523": { "jp": { - "updatedAt": "2025-12-02T22:57:45.003Z" + "updatedAt": "2025-12-04T20:16:57.690Z", + "postProcessHash": "35d7d5dc6937334f3b23d3b085c18e145f11b78d456c46feaa074f4650635252" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.003Z" + "updatedAt": "2025-12-04T20:16:57.688Z", + "postProcessHash": "7e839c964f9083c5aae1272de98bc132e623777c729d3e2c2ca923a799d05945" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.004Z" + "updatedAt": "2025-12-04T20:16:57.691Z", + "postProcessHash": "c9aee2c36ff574ed70f64b86e45c018171aac2e09dabfe25778397470719d5af" } } }, "181aa5509e2dd7724e3095fd6c0f17cf6fedab2635b9af1d57fe9d1e2801ec31": { "bf2760368d2fc3a4c455358f8872f13eb6f6e7b8ccd6d529c68dfa016882d216": { "jp": { - "updatedAt": "2025-12-02T22:57:45.019Z" + "updatedAt": "2025-12-04T20:16:57.682Z", + "postProcessHash": "10d9785badcd0cc24e46ec8b1e017b4d62c8fb370ea39ddbde77864a63f3ae82" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.020Z" + "updatedAt": "2025-12-04T20:16:57.683Z", + "postProcessHash": "9ed1f590fe2efc90a6477f83bf465c9da7e2f940a000f452d9400219d32cc77c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.016Z" + "updatedAt": "2025-12-04T20:16:57.680Z", + "postProcessHash": "6669451d98f9cd189d4c414cd13efbccf2bdb186ea5d7412928228dc2ebe8514" } } }, "5181bec59897499f787e1b509cc19c69de2efe0e1437cc2001f2c7dbe8022440": { "54af2191cc8de0b1a73c6bfeceff12420569139b7347df0f18a111a00cfa0d1f": { "jp": { - "updatedAt": "2025-12-02T22:57:44.989Z" + "updatedAt": "2025-12-04T20:16:57.648Z", + "postProcessHash": "685bfe4bfa067570b280e30de9ef63ca048946fb37cf9b0e14a45a4292726eb0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.992Z" + "updatedAt": "2025-12-04T20:16:57.654Z", + "postProcessHash": "1a302a4609052af4658efb236bdf661ac362db7fb97257a6456d3e76c84782f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.990Z" + "updatedAt": "2025-12-04T20:16:57.651Z", + "postProcessHash": "f35c1534a6cb2cd5f47a2130af68292d83181e517bdf83af3fbb4d42153c6b84" } } }, "59475ce3a8014935df370b01b5266883e7814a8041f963545d8edaf3119557f2": { "53cc67b8b0dbfb95e446a4d98d10dfc35a026203fad1b1a20cb6bf733faba4e2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.981Z" + "updatedAt": "2025-12-04T20:16:57.664Z", + "postProcessHash": "1c57804c6871c7fcbe5dcfc958f3c623b4d5540b2cd404928f54aa10bcfce307" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.985Z" + "updatedAt": "2025-12-04T20:16:57.668Z", + "postProcessHash": "be99934ed27ac4c0ccece20e80c8e3db0f326db99e826c9e50653e7513f0cf0c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.985Z" + "updatedAt": "2025-12-04T20:16:57.667Z", + "postProcessHash": "55383c602b43993e3cc0b0bdbd37509a15a6fb04ba58723e5470b8587f2d3afb" } } }, @@ -16936,369 +20672,467 @@ "jp": { "updatedAt": "2025-12-02T22:57:44.940Z" } + }, + "c2ebb0c5f7fbe91be83a9dd3c9def71a40958aa5b6484433272d09dbefb6d6ac": { + "ru": { + "updatedAt": "2025-12-04T20:16:57.668Z", + "postProcessHash": "14a404fc94c7a30c73b6e7280ed9a970a8cbb85c85b928d84f9bf227e9b652e2" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.669Z", + "postProcessHash": "93e0b32d915254ae12d21f4329de071192ec084d52a156a66e924b272faacf38" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.669Z", + "postProcessHash": "a54e6d2734366fb18422a671ad903a555349d2f855589bcc3f8ee55a7532c0f0" + } } }, "6d2c1d43528de97c8dcb1e3618555c13b1ee6ca0cec9035a38fdc267403c6c3a": { "a09b919bea9302ab3ba5a119614ec1de086c78f2af22957d06a895b8b1504bc9": { "jp": { - "updatedAt": "2025-12-02T22:57:44.972Z" + "updatedAt": "2025-12-04T20:16:57.647Z", + "postProcessHash": "1986919dd165b0ad34257e2198b4efd79d89389d0f3984e288d24547b84cdc6f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.974Z" + "updatedAt": "2025-12-04T20:16:57.651Z", + "postProcessHash": "37c1be222052727e0614815b249e72b4f2dd897b10f3599de386d0344b33c083" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.974Z" + "updatedAt": "2025-12-04T20:16:57.650Z", + "postProcessHash": "1c80f3cc245682a22eb150181382b07359e496bb52d7795c82082fc3003f901a" } } }, "84d27978ad24cbc0448dc0661dc1cf62312406d39568cc877e9bee6c04e93677": { "4120b13b5f03f7c2fd4dd243edcbc718d6bd291d7358050064f6599242eeca09": { "jp": { - "updatedAt": "2025-12-02T22:57:44.973Z" + "updatedAt": "2025-12-04T20:16:57.650Z", + "postProcessHash": "44bef36cff7d45a14389fe979190bf2e6bc0c1048290d86c5bd89064de58c34f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.969Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "509b660d3b5a053e3c26111c46772204187a01ef33432b39f0a8e019b21e2777" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.967Z" + "updatedAt": "2025-12-04T20:16:57.582Z", + "postProcessHash": "72a3afd4b9e9019096ee63f9dd474c59fae790a15169c872e6f0b7f78101fbdc" } } }, "993eb4cbf451025e383f5baa954ba930c6f9ae51ff01592c72b8d36662548817": { "6397e782e35c68ed2849d7a8210eb150a2820241365b2424b92b3ac99815d60d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.996Z" + "updatedAt": "2025-12-04T20:16:57.659Z", + "postProcessHash": "00b05501b5e3f5a8a5004e4283210272464b1e5bdb201cb1c1e7eb2932ac4ee7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.993Z" + "updatedAt": "2025-12-04T20:16:57.655Z", + "postProcessHash": "401fc08667076aff8967c4d9f19bd32c37bc55abc1ff3861340223fc53dbe84e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.995Z" + "updatedAt": "2025-12-04T20:16:57.658Z", + "postProcessHash": "b977d2ac0f3b085ffd87d073d3b8cb04a3c94bf95d7767f661f6270834c274aa" } } }, "bc95ac30c6163794df098cb1c5b0c612d68e460c1fee0982a9fde6ad2158ac24": { "d710ab3ea85690006a2ba44bbff81541eaffd450228382acc7544df0e34c7468": { "jp": { - "updatedAt": "2025-12-02T22:57:44.976Z" + "updatedAt": "2025-12-04T20:16:57.655Z", + "postProcessHash": "03b8f73755647a6f9a73d7ea26a28a8599953a3b3f67ccf5f33ccfa22b39e1b2" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.978Z" + "updatedAt": "2025-12-04T20:16:57.662Z", + "postProcessHash": "6a667df5280163acf1c0347e2f7025726fb2f853d2949b9c744bf29856b44930" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.981Z" + "updatedAt": "2025-12-04T20:16:57.665Z", + "postProcessHash": "2c7a3bf72c18949af3f0da6c769cf271b9465c65071a228445a402a66caa3ffa" } } }, "bdea2c6c34b1129be3efdd889576a52c92a915a41e1639ec5331bfe00948aa9e": { "d5c5bd7080a73f05e45d4b278cac9e1b97c489d95a7c80a8edeeccfbc35abb0e": { "jp": { - "updatedAt": "2025-12-02T22:57:44.997Z" + "updatedAt": "2025-12-04T20:16:57.659Z", + "postProcessHash": "d990c930fd772790529caf4c8b40ed0c38e14d55bbc980f2ca2439917a59d705" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.989Z" + "updatedAt": "2025-12-04T20:16:57.649Z", + "postProcessHash": "6135359e44350e325ae69b48eba9659309011bcc392a236c3780122003cc3c93" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.977Z" + "updatedAt": "2025-12-04T20:16:57.660Z", + "postProcessHash": "89313a4ffc0d8da2df46a3d1083df8adf0a7a77dcbd7804bacf19fbea65da1a4" } } }, "bef9b0e0b7b38c7969e61c98c564c4f45f4514c4992c99602befb825815d3fe5": { "ecbe5e563d38c0a661a9495fe8b3be6dea6041fe9fe0a6e674428f8d203f2c76": { "jp": { - "updatedAt": "2025-12-02T22:57:44.998Z" + "updatedAt": "2025-12-04T20:16:57.660Z", + "postProcessHash": "aefd8ed7e23196fafabc7b262178dc0f913ffd552d5c0fef5ecf94ef94e2928f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.991Z" + "updatedAt": "2025-12-04T20:16:57.652Z", + "postProcessHash": "fb6e568dd534ebc16ac4d8b7d63e98564d71657a2e780c6f68b5088cd4ad98ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.992Z" + "updatedAt": "2025-12-04T20:16:57.653Z", + "postProcessHash": "86e2c17cfde5ba27ea5111089b307d64faf9edd91ea3922d9b5005fe6a2732bd" } } }, "c86c73e2e1466ca9839d03145d28d089d50433e69d452f195d963042ce89ac2f": { "f65f3977310bfcdd03981a63ac5b1d00c85b04cbbc5ef4d29c352006d88c1be0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.004Z" + "updatedAt": "2025-12-04T20:16:57.690Z", + "postProcessHash": "3affb249b4704de25617e452b79e654d495c5789ce89020a0b5e30f4d6861fa3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.000Z" + "updatedAt": "2025-12-04T20:16:57.684Z", + "postProcessHash": "ae0a2067c28c1196a43e19e849f41085943ba9a4b4a937dc1f7b00d796b15940" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.000Z" + "updatedAt": "2025-12-04T20:16:57.662Z", + "postProcessHash": "e07c7a41f5012428d15834b680f86e3d51397170594328b6b71c4570181f7f22" } } }, "cb9c09aa37313bf52611e34b607eaa3775f6ebfd79387f2120b6b2b2ed4b46e5": { "b033c9754be40272847cfcdbce3fd43701961388f8efc8698510876cb0c0fb40": { "jp": { - "updatedAt": "2025-12-02T22:57:44.994Z" + "updatedAt": "2025-12-04T20:16:57.657Z", + "postProcessHash": "5f202645e772cfa9b66ce42db7caf7cd9c28649370139b45f796af57beada41a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.993Z" + "updatedAt": "2025-12-04T20:16:57.656Z", + "postProcessHash": "730476a1d6177985b73e2ce9e31babb9b5812e0263cc712f052ff4f6c5ee9ed5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.986Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "9a9ecf73cdeae1f584fb18ff22d40f31ae6876a3ee16f72f7b3e367557ae3af2" } } }, "d7bfcfa62fea0cd11e8181ebab38199db1c954694d8230c3cb8be3a89f91c476": { "c1ce68737a5260a794d17040e187ca291588ef715aeba34369597a7058dc2af4": { "jp": { - "updatedAt": "2025-12-02T22:57:44.993Z" + "updatedAt": "2025-12-04T20:16:57.654Z", + "postProcessHash": "fdf1dbaa7c682b6ae678be9a2af947848b3f27c2a9322fb7cf779df35dca1663" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.002Z" + "updatedAt": "2025-12-04T20:16:57.688Z", + "postProcessHash": "6e96817fd230b5af601195a3a5fd53abd1c083a986a2210e153bcc3d1be76152" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.002Z" + "updatedAt": "2025-12-04T20:16:57.687Z", + "postProcessHash": "c0dabcbe67d17b4e3acae426cc27dbf1690291ea46124ae529c71470a4516a74" } } }, "eb4daa639a63e99d988bfe1cd009befb853ba7171f88047823ca4d63e119f46c": { "db3dcac7ca205ca613bb9129a98b90f70d1edd49164206d1bacd86ccbb885f5f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.003Z" + "updatedAt": "2025-12-04T20:16:57.688Z", + "postProcessHash": "01922858373142ddbd8ea6103ccea8940d13afd51dc2cb38f459d478218b5d92" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.999Z" + "updatedAt": "2025-12-04T20:16:57.661Z", + "postProcessHash": "eda9dd7b2161cdb6e4d96aea79a31455e54a48818da004b118383fb95964185c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.997Z" + "updatedAt": "2025-12-04T20:16:57.659Z", + "postProcessHash": "30f4d2b9d7a03fead1282034fb7c93387bfaad7d147fa32254e0c0b2fe83a0d3" } } }, "ece18ee5cf148911a064ac3aabde31461f3fa90405c4631fe64e67bf35b3df8c": { "babc66efa89a5cb73d9a68a0dceb5ae1559780502d074014931e6370f64030af": { "jp": { - "updatedAt": "2025-12-02T22:57:44.984Z" + "updatedAt": "2025-12-04T20:16:57.666Z", + "postProcessHash": "e8489df5225c42aeeb337afc284f42a6932fc415547d9d165c5fcd77fa3585cc" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.985Z" + "updatedAt": "2025-12-04T20:16:57.667Z", + "postProcessHash": "a7318e854110db3c0cacb1a30bcef6280593857c7ab77e59fbdcfc67f46a9c8b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.983Z" + "updatedAt": "2025-12-04T20:16:57.666Z", + "postProcessHash": "9747aec9f29a3edcd1d969f85304cc67fdd86e8180ef92693560a034845812e9" } } }, "eec5db41f767e87040d1a1e1a235ad804968c2645819039af5e1306f75ee2ba6": { "3294839c4121817eb15af16f39ea52c308ef56de049782978aa71dcc4c38777a": { "jp": { - "updatedAt": "2025-12-02T22:57:44.993Z" + "updatedAt": "2025-12-04T20:16:57.655Z", + "postProcessHash": "af71c82b603bd678e62c1e75791c5c025daf1f69a9da736e1bef96a96ba7cbff" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.990Z" + "updatedAt": "2025-12-04T20:16:57.651Z", + "postProcessHash": "1b3896f8cecc45415504473caa2f7494ac024c2f823c742ebe3b221239e4acd9" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.990Z" + "updatedAt": "2025-12-04T20:16:57.651Z", + "postProcessHash": "3403cd2bc7296ff2c748cfaddbfb66d82056fce9bab2080b84348a7d67db365c" } } }, "fcac219896966a54530a8593af31aa0dd688a431b44e0f3c677722d49352eb30": { "764c0b5706ee7c8505c4e4a557bdfcf617fad088da12e5302081d2d0510f71a1": { "jp": { - "updatedAt": "2025-12-02T22:57:44.975Z" + "updatedAt": "2025-12-04T20:16:57.652Z", + "postProcessHash": "cba88151552169d835c6199f0ba4fe0b0d49bc433e9be193b75e2d67c89aad33" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.966Z" + "updatedAt": "2025-12-04T20:16:57.669Z", + "postProcessHash": "8d48d69356275cc1f3089721b2a4f0ce1ba01993f594debe48798baf14306fcc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.972Z" + "updatedAt": "2025-12-04T20:16:57.649Z", + "postProcessHash": "5adf6e9c0bf2f093f200f99caff780ceac2e0ffd4d5256b99906f4f0bb3b13a6" } } }, "febea1a8af326ccd97db3bc68c3ffe9b9d02860dfb6225e2ad85613d0fd14f7a": { "96025027a22efdcf22fae68b1f8666c6d43d7197ab56d27461b40b4566ccacf3": { "jp": { - "updatedAt": "2025-12-02T22:57:44.984Z" + "updatedAt": "2025-12-04T20:16:57.667Z", + "postProcessHash": "386a79a1fa01867803d452152df41682f4283e1d1fe641ebcdbe34044f49eaff" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.982Z" + "updatedAt": "2025-12-04T20:16:57.666Z", + "postProcessHash": "57bad336244356a395113002a731092855e47166de5347800c340ef3352dcb43" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.984Z" + "updatedAt": "2025-12-04T20:16:57.667Z", + "postProcessHash": "4f8c6db81923f2e69451c4dfb51b272d1637d720d15d9a8527f5bb5252510bcc" } } }, "07413031937c2e03067e44df8e3cbca1210ee434cba21c6bfa6e51fe5d2f01e5": { "1e96680d8322f2acc44b5d97b8bff6f35462189f2158321fb5f3892804e98d6a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.022Z" + "updatedAt": "2025-12-04T20:16:57.685Z", + "postProcessHash": "cef6f2b8fec21b8c5b60fdd9873ad8d1d2c74a7c3f5058bc3380e42fd5a927be" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.024Z" + "updatedAt": "2025-12-04T20:16:57.689Z", + "postProcessHash": "a3ddeadc4e1879b381bfadbb2031d843f8478bc5446d5def0e928992ce80f058" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.023Z" + "updatedAt": "2025-12-04T20:16:57.687Z", + "postProcessHash": "f7ac3c594a2eed9b400fd16f2f343dc74d03423b565b1f90b22a0e6113571560" } } }, "08048e81a0b10f6fc876c8e10e896bba823ef23c25b37974243d3ce6241e95be": { "fa7004278db4a71dffabfc42db57fec5a575fb3dbd7222d4b9792bf19848b5d0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.038Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "c047f92c4d8b3e333bfc3b2d9d815cabf0f7febb6cafaf2cbf0ebe6ae355231f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.042Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "6bb0df5d2bad0f2d93abac68cc948086755c6fbc8b0119e3eed93bc17b873a3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.035Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "7d2aa2d03bd18e28b40299d5fc2eff1e9953631c931824a01649d384eee38bf9" } } }, "184cb7accedb381551a80c780323d8467fa7bd7b87d916cb1c6e2e1927c800cd": { "20fbfd2eb1f5b24eda2f90fd903779fd0847f0d888d3b04f4c7e56590eff1492": { "jp": { - "updatedAt": "2025-12-02T22:57:45.025Z" + "updatedAt": "2025-12-04T20:16:57.690Z", + "postProcessHash": "d798b8bd51b384227814e49317313fefa2d4d3e214e6d37172b77e5d73674778" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.026Z" + "updatedAt": "2025-12-04T20:16:57.692Z", + "postProcessHash": "6970d3c5da5376752853b712806df5bdb71805fdbb956653b6b242c132d4d848" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.024Z" + "updatedAt": "2025-12-04T20:16:57.689Z", + "postProcessHash": "e3714499da18b34b86fb3915796d4c9daf2c6e3fede7f6a740e9a957a9ca1576" } } }, "1f9e1a47c221609e49eb77fb61cad9a84a56bdb680185de6655f77145049570f": { "d2bac435d9afc706018821f07927cba0b34f6719b4e95a2c242a869d2c00be3d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.018Z" + "updatedAt": "2025-12-04T20:16:57.681Z", + "postProcessHash": "b36130e4d35207f3d11a01e934cb87f657ab1e620e200e3161aec95f3d8cfeda" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.987Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "b256d16340c126bb4a02ecbbb1dbf9193a06fa7c19d4afdcf4b71fbede2cf852" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.009Z" + "updatedAt": "2025-12-04T20:16:57.647Z", + "postProcessHash": "50c1a161f97c9052a7fef13f33e69bf0f868216a66f4a66d6d37f38c3c005c88" } } }, "1fd11512dba8b586ce114f0a09f748f438a3592e967e6b26fdb70d49b49b5b34": { "528c254f1d39fc4b566d364735917ebd190685375530f8192104891def887095": { "jp": { - "updatedAt": "2025-12-02T22:57:45.012Z" + "updatedAt": "2025-12-04T20:16:57.719Z", + "postProcessHash": "9c7d433b3ad4d11286fde2c3b7f2273fbbcd97d4ea9e6e7304c9db80b170e286" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.019Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "78c4987294a9a6795316e727c820938ce9c4bc63511b38e1eb3b342369057758" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.987Z" + "updatedAt": "2025-12-04T20:16:57.674Z", + "postProcessHash": "784b61382b058c261eee34267dfd0ef43defabf1d0312196b774ceb8571b91cd" } } }, "22dec589b8fb9f267b747bb6c4caa91619a82b138da7ac22fafdf2a4d36dbe70": { "540a7500cbfe21ee07e22edbd55ff6af883a067d691b6301d93bbec754f9da7f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.010Z" + "updatedAt": "2025-12-04T20:16:57.676Z", + "postProcessHash": "a71edf9718a2ec3991ff44c721cd5145642c310e7a4e00cdc2717e738a306a89" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.989Z" + "updatedAt": "2025-12-04T20:16:57.648Z", + "postProcessHash": "b0d9a4063dd2a167fcd1b6fe62bff54ad896d089bea7c2c9ce2a150da752f0af" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.992Z" + "updatedAt": "2025-12-04T20:16:57.679Z", + "postProcessHash": "7414336ba3770185961c9109ef11217c8a4f3031c9e0644e9c25f6440155a29e" } } }, "25a566b63d1b51f62e85f3301907bb9851c8e295092c6c0cbb274855aaf2075a": { "b194d71f6380d7cf9309e9c89f192bff2723d4c46d48e2aa2b48e736c874804d": { "jp": { - "updatedAt": "2025-12-02T22:57:44.997Z" + "updatedAt": "2025-12-04T20:16:57.683Z", + "postProcessHash": "0a8632905a12a13164bbe1617093507595fb220b1d902678369a1dfa17870398" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.965Z" + "updatedAt": "2025-12-04T20:16:57.579Z", + "postProcessHash": "7e31fdb7eec4faadeff56b78034bcd236ff43f9622f8dfa087cb3d3497e86b2e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.998Z" + "updatedAt": "2025-12-04T20:16:57.660Z", + "postProcessHash": "dda149a9fb8c99bcd9130357ab0ee3d6c68ba59e01112e4cad288e6fa9421e34" } } }, "2dc1b2de19552e6b04e43bcf12a339877b5cd1caa1251210fa995f871b2381a2": { "8023c7e209034ddbc60f9efb8a61b992915d01ab6dcdc5f5a0b08c1c7a1cf28a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.022Z" + "updatedAt": "2025-12-04T20:16:57.685Z", + "postProcessHash": "d3896f53a0358dee8bc47f3a0b7fa8486f71ec8dc698950cd375ccc21fac9a8a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.015Z" + "updatedAt": "2025-12-04T20:16:57.679Z", + "postProcessHash": "6a56cd65043e01b60bc84fcb43f7e4616e87af81f363fda82c2b03d945098cb5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.023Z" + "updatedAt": "2025-12-04T20:16:57.686Z", + "postProcessHash": "8486168186dff5c750e54b7c4fbc627d90ae92e45195e5d021071c62593683c9" } } }, "356f390220f614f7e13052b738d6bac3386bcb14e99297bc57a7c7bf37c10fd1": { "eed67b4d5e2a37a8d51c1aaf6d8810650b97bd70d00122a88ebb97c212da9ee2": { "jp": { - "updatedAt": "2025-12-02T22:57:44.988Z" + "updatedAt": "2025-12-04T20:16:57.602Z", + "postProcessHash": "4bdff42f72d48b7771d7d33b7827030aa266f9938c02efd3c6a94e5878250006" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.016Z" + "updatedAt": "2025-12-04T20:16:57.680Z", + "postProcessHash": "9db3373e734361d21273cc5a0729d6c5443a7c2ac272d7858c0f57f3e9464849" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.016Z" + "updatedAt": "2025-12-04T20:16:57.680Z", + "postProcessHash": "561356a7cf20164d944d4b9f15656c7d9d466ad586d1a4e767c01c3683fdc995" } } }, "35c7bfba55131ad9d6116db29b6547a45eabafbca7d547b5501ea16d51eede3f": { "6a8e1ca55281999c6130ae572325abcb150b29cfd12ebe451133060b6c502a1a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.018Z" + "updatedAt": "2025-12-04T20:16:57.682Z", + "postProcessHash": "18172b0f262c8cd198d83f2ea882c4d79e65e98aefb17bb9e082057d63c4d386" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.019Z" + "updatedAt": "2025-12-04T20:16:57.682Z", + "postProcessHash": "b875b67b3b36e49e22a6cfa4050c910d0b9b10e5862cdd6b992631391fe6b441" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.017Z" + "updatedAt": "2025-12-04T20:16:57.681Z", + "postProcessHash": "ca5eedc366b32edf7a57ea49f9ce3cc24398b90b60bbb08c76e63091f29631ac" } } }, "43b396ef0d459a925fcad74ebe7dbd673c6bb8eab1d24fb377b596b6d6850d5b": { "83184a4d72d70281ddce4c2b92b731c5b7e8f98d6d6bebeedbdc0a053adf947c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.023Z" + "updatedAt": "2025-12-04T20:16:57.685Z", + "postProcessHash": "ff55e43a8444c65434b2c21fb0252d5dcd227fa39da2be4890424304de5d1f9e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.015Z" + "updatedAt": "2025-12-04T20:16:57.679Z", + "postProcessHash": "f0e11733381900845b54cc42b91065eabd68519515f7e7f34f3801db1e459891" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.015Z" + "updatedAt": "2025-12-04T20:16:57.679Z", + "postProcessHash": "4f41facd9bfeae500c4fb53d8454d82b3d8e6ab90736fdc3f18a198118cb3324" } } }, "4d4c6c8d13e7ac14a5f189e798e199562f2150ad644328ef3e5b7e6d397aacb0": { "7c2190f84db7a1c33916eca37c2632206233059ad999d42ac355995a785c5d81": { "jp": { - "updatedAt": "2025-12-02T22:57:45.001Z" + "updatedAt": "2025-12-04T20:16:57.685Z", + "postProcessHash": "7dbfce849c5fa0eb1de39ce86f03bc15418d04712365fb3664d48cf83dd3f7e8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.999Z" + "updatedAt": "2025-12-04T20:16:57.661Z", + "postProcessHash": "5ee70d58e0317af1c01dd29cddd4bfc73d674394fd283dd48fdaa8eda7079ba5" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.986Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "64dd723b3025f832c67da4cc88a40064dad3fabaf2ebd6305fbc82ac729c67fd" } } }, "5f7094d809fbf8e07ca4e02020e14a570a112a588701724679f8375a2bfbecb1": { "d84676e935f15fc8eda0f1c0db79ad9cef52b93ee23e53f9891fe1aaaa1180ba": { "jp": { - "updatedAt": "2025-12-02T22:57:44.986Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "90379d43aa54894dba1fec53be8637bd35633cc0bec8540fddc51f1daa52b2a1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.010Z" + "updatedAt": "2025-12-04T20:16:57.676Z", + "postProcessHash": "ee6762e6048445354132eca8b65eddd65abfa0f9461214aa7c89477a47a0c974" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.012Z" + "updatedAt": "2025-12-04T20:16:57.677Z", + "postProcessHash": "8253ea2f2633d33ab11318a2fcede0a67a8fe34f26c262b5f50de825f867d592" } } }, "7c4de22baba4a87ac92a5d821ddef4976b0c230d25c52c53dfeac09fad83b108": { "6f7f34ba690c91122f3ae8820b83f342061fa594ff253407eb57463d3c34c326": { "jp": { - "updatedAt": "2025-12-02T22:57:44.994Z" + "updatedAt": "2025-12-04T20:16:57.656Z", + "postProcessHash": "992d6416bf09f5ac068043bf9553de48b82483151b9c3d98fac7373367149948" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.996Z" + "updatedAt": "2025-12-04T20:16:57.658Z", + "postProcessHash": "09bbf36dd3bf7adb64f34a9a76b65da2a26e3dff1f786cdc88e410c52595d627" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.991Z" + "updatedAt": "2025-12-04T20:16:57.652Z", + "postProcessHash": "f5ea5cf25c9f603bc7797e15789771284cf5c2fa7255b6b3b1ced6be16a6e5d6" } } }, @@ -17313,577 +21147,723 @@ "zh": { "updatedAt": "2025-12-02T22:57:45.006Z" } + }, + "f45cf9b56a6023b4ace03e8b17976a39ecde2e62ebe59dd94428f3f5fb17bba5": { + "jp": { + "updatedAt": "2025-12-04T20:16:57.694Z", + "postProcessHash": "e2e95a9a7c6c698acb85fa81ca66e8a03fb5f557c757b0eab272e09fd28dacad" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.694Z", + "postProcessHash": "fffa39792f54c18653bcc34197a5d39715736ecc47301fe1370b72b84bb3075a" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.695Z", + "postProcessHash": "10286e09aaad1f999d519bc4c70af495aec3c1d4f47230b12b8adac86cd8625d" + } } }, "9d97fe2dc29c4e251fff04450d9d510ccf9c2e831a0489dda3f038dcc1a6a2f3": { "e5572fcb5876d8a6b1d1de543d82369a494fcc0715dd6012be5bbf426e7ac03a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.025Z" + "updatedAt": "2025-12-04T20:16:57.690Z", + "postProcessHash": "bb7a5ff30260fd4862ec6233ab1b2ad492c081179d1aa54b6e5d621b637ffcee" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.023Z" + "updatedAt": "2025-12-04T20:16:57.688Z", + "postProcessHash": "3721e193b7f88254b53c4d0b81aaa2654fe27bf88de6e5803713592fefa53209" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.024Z" + "updatedAt": "2025-12-04T20:16:57.689Z", + "postProcessHash": "b5229b1b64df0e86c3b5f08bc7ae178708965a73fac68f8f1ba20a6fd1a6db66" } } }, "9e660b008ccbb63b66a28b42d2ca373909f19186af16b9c41ba664f7930e07fe": { "41b27ab4937c7b922d42316551438b4ad659c0ecc6b4fa06f15edf97230d1798": { "jp": { - "updatedAt": "2025-12-02T22:57:44.988Z" + "updatedAt": "2025-12-04T20:16:57.648Z", + "postProcessHash": "a457a1295ba746250ecc29b58330d2a08e60b50d30f16bf5f6bb48f5627c9ec0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.995Z" + "updatedAt": "2025-12-04T20:16:57.694Z", + "postProcessHash": "6f4a1979eb4cd2406e954f71b3c1edf1a2d04bd3cb7f0d7659cb18c73fefd2bf" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.996Z" + "updatedAt": "2025-12-04T20:16:57.658Z", + "postProcessHash": "0de3b0e189441e4521d0406c60c2d875a99b6b804a86c55865e4ace2f158a729" } } }, "ac6b549d962e823e381f2519f7e5e9ff23ec0d86da8d61b9555feb375c459654": { "0f0b86bed0cbb0312f32be51c009ca122e78f92ff738c6606ff98754fca7f43c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.002Z" + "updatedAt": "2025-12-04T20:16:57.687Z", + "postProcessHash": "599f0a95b7c7c07489b904187f89743220e46a48395ddc9cc7a2e62428bd9cce" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.995Z" + "updatedAt": "2025-12-04T20:16:57.657Z", + "postProcessHash": "51ca72913be952490b7a55faa0605eaf64592f45b74c32630bcfbd8c7929a6af" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.002Z" + "updatedAt": "2025-12-04T20:16:57.686Z", + "postProcessHash": "cb161701ea0fd2bdd8bdf5745d73b212351371915440c446ab8650baf9ced48f" } } }, "af7eb4d69ab4cdae0eb49d2ecd090def503798009a9d8e43c2370f01f9a1219d": { "048d3372a598f7a300c38f0ddefba7da299bf7d8ba7fe1a30bbf53fd7ec3546f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.014Z" + "updatedAt": "2025-12-04T20:16:57.678Z", + "postProcessHash": "e05bcf79c6b2fd053cf849ebffd2622904241d74275505a6e16c3827b1f00c9c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.011Z" + "updatedAt": "2025-12-04T20:16:57.676Z", + "postProcessHash": "09277f236f0554bb589288be86de2b7671501ba928be5cd2ecc8818417d718cc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.987Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "43a473fda8a70a3e1605fdfa5667ef142a3828e69ad8c465455f439e1c512883" } } }, "d613460c9b562b02355db0de5e4d5e795d93c8356530d72c4e6943e450e0cd79": { "21c14d0cc95de05e66c6024e0bc731b06c4934474cc10eeacdc8bce66de73868": { "jp": { - "updatedAt": "2025-12-02T22:57:44.964Z" + "updatedAt": "2025-12-04T20:16:57.694Z", + "postProcessHash": "6e904c99ddf1c0141552fdb17a1378553537c3a08e8394191fb9534e68634a98" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.001Z" + "updatedAt": "2025-12-04T20:16:57.686Z", + "postProcessHash": "f297a08b268e50493e92f6acc7aa6e1a02c90e220e3cf13c87fe4104f73b66c2" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.999Z" + "updatedAt": "2025-12-04T20:16:57.661Z", + "postProcessHash": "61517ce520d7f8c088e8bd21582891499d94771262bbeab79611584245ab8b3e" } } }, "d79d5c1626358051641a02a5df10627db3ec1f8bfe82c284ecff6fc5d29ba24d": { "4b36bae2acf0c20fa2db7f654f8bc8ca933e4db7d7940a5c9c9a26463fe1a7cd": { "jp": { - "updatedAt": "2025-12-02T22:57:45.011Z" + "updatedAt": "2025-12-04T20:16:57.677Z", + "postProcessHash": "c87edf55940c5e9238e47f976ee8e49cd7bcdd886782dea6e443d2873170a909" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.021Z" + "updatedAt": "2025-12-04T20:16:57.683Z", + "postProcessHash": "8b49c499c1cc34785226b25629a0fff667e8f01f9c2dc1351e226ee4732ee893" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.021Z" + "updatedAt": "2025-12-04T20:16:57.684Z", + "postProcessHash": "4a35adab9c0955de8c03c3133e22c61afabfbabe436ec66820708a23e0a9cd29" } } }, "09892c5c8c7770850dc4f12c85271ef2eb4054c5c9c132e0c016cfae2c946ba7": { "dc7fead9cdbb478c71bec3f2d3de2e7f32d848c704aedac7d98e3ecb52061139": { "jp": { - "updatedAt": "2025-12-02T22:57:45.037Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "556963cc6c47c10968756dde4c16e5d6d01a4f685fb29faccfa81fc58293afcf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.034Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "a35976bf8820bc5acb71bb01126d3bb644d339f38f51893235f4813f63e20c30" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.039Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "c9632ab67c7a64c38548a8697922757e4dfac9999fcdc7790a1b68aefae29a14" } } }, "2fcab50b97bbc9aee5c0c03f5a35d374e8c3cdd3db10dc78091477c88a2c1277": { "0a0ef87ced393ab506690dadba9b95b3965777f4f3358eb4d004ea111fe10a51": { "jp": { - "updatedAt": "2025-12-02T22:57:45.039Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "c8bce9d3b60a035c22f901b545a48536df80bca154232c568c32029be8ed4abb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.031Z" + "updatedAt": "2025-12-04T20:16:57.675Z", + "postProcessHash": "91509dc646d8f47b3b2af52bd1c127df30842d61bb6933a5e887df7361fdc765" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.040Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "2c8f68dd7783fb73f86d04cf2d6a4b1cb957850707144875eb04955b87333aac" } } }, "326c8895de68add9af3b55b704f3bfc1105c0f592e4c66fcf4716d6ad3d6bd4d": { "67ed218e943e01dfd5ac6127ae3673f4c5704dc7e706fa765d94c11dd7f80e59": { "jp": { - "updatedAt": "2025-12-02T22:57:45.043Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "6551a05316b5b1d68b8ca9907a28a78817d58bff04e8ba3622cfb7feeda532f9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.044Z" + "updatedAt": "2025-12-04T20:16:57.754Z", + "postProcessHash": "7cc8ba766f645572c8fdab50e9ad37cd126b405af35e4612f47ce266f6e647cd" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.043Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "95d0182fc452600dd4ba9d8e6947855afb49b43a55c7e352b719d4cf2ab0fb56" } } }, "3aef4f3512c85d4057c69557fd85794d38328c9e61205b126b37d4de45a963e9": { "06d1c97a15302255ab6d9a474e72aa8993ccc93d6749dfd1e5e94970da469d29": { "jp": { - "updatedAt": "2025-12-02T22:57:45.036Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "e36d7a5ca89ded46a32863a1ca52463f5a2e803cc1b3c9a11ca6ff80f77a84d8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.034Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "6fdec180d980fede032cd966f6afaa594605ffc2adf87194bd798dab3943955f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.031Z" + "updatedAt": "2025-12-04T20:16:57.675Z", + "postProcessHash": "c1a3fc9d2ae5ff2748d9b9dac0ba54866d83aea30bc3f07b59ca24e61359ac5a" } } }, "467b72fd8dba8502decf3c42bc9358fa8c4d3014dfcfe6b42bb8f4dce198fd62": { "a67f1de09a8a84f9d6443a0df3a49146ce63494d30ed1c458b9929b32d5a4b7b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.040Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "e317c246af56757870b3907b3b7f8a3887dfdbc1dbdeecc490411008c777aee7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.044Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "354bb147f285ad5df7abc6040ab9c58baf523fedf577f95f8b46d4b93c541498" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.045Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "843a7ec415b1eada2f9399ed5024e61aca639b81a31ad943e33756c1209b337d" } } }, "4a37cce1f00cda917ca47dd0a1a69934772f9e50b5150732050d2e9f70a019cd": { "f5d8080ef6746049caf9a9d8037b9090eeef2259b54e9f42ef3e6a135b796e6b": { "zh": { - "updatedAt": "2025-12-02T22:57:45.038Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "5283dd7ce378b5ae28d8bf51b9692e25e18b0e301023e08eaa95b52b7b985b9e" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.047Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "5176ab8b1a922840528e4c9f7ba6f3368cf2d588e0db2c26dc640ce225b9ed39" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.043Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "0664b9c3f626b61dd01db9269e5a01397a85aad5e52d9e38e470c1e4d3ae4708" } } }, "4e436a71846d9aca6f15dc8c5445f526f911657bccffd77d51b5a4689a95bbf2": { "1ada5cacc80d636b19794a43afd3d71292a74c9e3f3fa93f182b39eb84ad7355": { "jp": { - "updatedAt": "2025-12-02T22:57:45.018Z" + "updatedAt": "2025-12-04T20:16:57.681Z", + "postProcessHash": "8afd1547f9d25eceea449fac9890137f96fd77df0fefa5a30121b7ead3259a3c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.017Z" + "updatedAt": "2025-12-04T20:16:57.681Z", + "postProcessHash": "6b88ce6dd658d514a2bd13058476be4fee5239952cc62595419bf6e6a5c92664" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.022Z" + "updatedAt": "2025-12-04T20:16:57.684Z", + "postProcessHash": "b8e4b09d08f7cae1ef94027e5dbc08d760a8da2ea1c7b48a0993131ac5b9cbc0" } } }, "625ac60abe1e4f7ce4df8ac9bffd1f30f906501c1b636c41e7dee039c1280348": { "eaab285929dea7d9ff8f319faad61a28e866d384a56d15e9eb7a2ea10d96b567": { "jp": { - "updatedAt": "2025-12-02T22:57:45.009Z" + "updatedAt": "2025-12-04T20:16:57.648Z", + "postProcessHash": "f79b0bac386dc848818a2cd99d3a1d918c7283fbd3f748a7ed40f88366a662c7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.020Z" + "updatedAt": "2025-12-04T20:16:57.682Z", + "postProcessHash": "dc678b3e11b653ed5021e3dce5f39b394798c79c098a568c3b084dd8b4742cc9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.016Z" + "updatedAt": "2025-12-04T20:16:57.680Z", + "postProcessHash": "683f4b462137504ae32b9c1bd1a1b916ab907e5f5af112f64d0ddeaf3f12d8a3" } } }, "67c93fd175b134b8986f749e1afceefc6f06a4487d9ef161d2ea74e2be618233": { "5418ed61ccd90e17c44bbf1d4246b7b4344bcf595b331971dc74df17def6dcab": { "jp": { - "updatedAt": "2025-12-02T22:57:45.026Z" + "updatedAt": "2025-12-04T20:16:57.692Z", + "postProcessHash": "42914c43262bad40718d51d84a2873ebe94ee73501cb54ec0214dd7608e6d76d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.025Z" + "updatedAt": "2025-12-04T20:16:57.689Z", + "postProcessHash": "6756d2622b2f3bbeaf39edc7d69f0a4c802dac0a4721ea91f186ef7a42bc3b68" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.027Z" + "updatedAt": "2025-12-04T20:16:57.693Z", + "postProcessHash": "a7711015568ed800c0da8a66269ec4bc109296daba67641799fea0ee6190e774" } } }, "8719f0b66c142c6db2cf8773ebaddac3b3d310bd046ca6effa5bb15e73d1a00f": { "9c001ffc30fb8da63ebd6c0931ef3efb9ac209edc160ae449428bb65298622c3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.035Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "ed2fe64e33aa67130b940547c707f452382c35e9cb49982c68707ae877ff32ed" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.008Z" + "updatedAt": "2025-12-04T20:16:57.674Z", + "postProcessHash": "e7c5d94313bd62cc31120e850d2de2b55387423eb1b91e492b4ca418a48c8dc1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.039Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "809dce573583ed288d406bea0660493250aea46289ee5bfa1621c92601e53175" } } }, "89ea779c156a999fdf17c325da1e81dd07a635d696dfd5a115e631154d3dbb2a": { "ecc1acdcb21d77d65ebcdd760265565e99254e242903d6b4483da0a6b4a59482": { "jp": { - "updatedAt": "2025-12-02T22:57:45.041Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "13c5d1741547b80439b429cfd28a76ea9665d40c8c11a6eb67ba0ec14d931add" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.048Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "1f470b07533db7b43a145b1a098e0cf992ff25fe96cca9b34c90bf367836292d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.007Z" + "updatedAt": "2025-12-04T20:16:57.671Z", + "postProcessHash": "7ea08959669f9243e9fce804bcc0f452afc93f28c1fc21aaec1d89714c1ed2f2" } } }, "9b137d113f115786a055cd8fbc160635ea3e53512ae73d845fd749380bc1f381": { "0e565f9a4b2a92384daeaab520393c6426e3c190a2625839b4ead735b7a693f3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.008Z" + "updatedAt": "2025-12-04T20:16:57.674Z", + "postProcessHash": "89675941b2fce4d38b001da357e5a9855e1e11add3611c4acaae54db2cbd9491" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.040Z" + "updatedAt": "2025-12-04T20:16:57.719Z", + "postProcessHash": "fe851b892d8dee1dfe5b2b5bc891ffb3c6a61cab1b556e61c74628e373684760" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.041Z" + "updatedAt": "2025-12-04T20:16:57.719Z", + "postProcessHash": "460ae3878353e63ec46084dc3a108adc9c1335432b3451b38a800becbc8ffc9d" } } }, "a524ef715c9704a1e92f4f92e0b9799ff822e7bf9860bf751ae2b1ff9edf0afe": { "e0f8014536b364d9d9409cff9471107e76279833faca677b2ccf2c077400b851": { "jp": { - "updatedAt": "2025-12-02T22:57:45.009Z" + "updatedAt": "2025-12-04T20:16:57.647Z", + "postProcessHash": "55141a2c62f6535fe32461c33cdeb4c66d4f71681d3a2ef29cb3cb9e1ba45741" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.013Z" + "updatedAt": "2025-12-04T20:16:57.678Z", + "postProcessHash": "18b5b965525cfb9ecd31cc564ccb311d006a1fdf899af8c54548659141e2e890" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.014Z" + "updatedAt": "2025-12-04T20:16:57.678Z", + "postProcessHash": "2b5fdf0ed802043a9fbfbd8ce27a530cdf591f26f18f17a5507f046d57eb96a6" } } }, "bc010b67445245013c815d8c8dd2a711a400f2ac89689de6a705df179ad8c706": { "58a5d26b93b4269bbcac95ceeeb1329954babd6a907538f5319432f3ac4e6b22": { "jp": { - "updatedAt": "2025-12-02T22:57:45.034Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "8568a27a74c15ccde501a0a9bce8b18275eb77e8e62931d5587d460837c55b79" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.042Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "aaa883772acda6ef1493af2cd28fde6f5bcb58a84290d7e72278f3bc9b072488" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.032Z" + "updatedAt": "2025-12-04T20:16:57.675Z", + "postProcessHash": "80d06848ac2ed24c6a0f168d46a74098614e1678640aca1398cb4bada9db052a" } } }, "c0ac70d88c31f863cc7a3f822cfa525fe69266c4bf831f94c2029759cb9726db": { "b931df20b4f6c77ea8d226087a40d67fa3ecf5f9d09ed73922e7aa8f8f763fd7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.035Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "10b20af44be4b47f2291dec2bdf7abdea76a1da1952c744f17c9cf21e25e8a3c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.036Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "fd4296e6dd80576996af718b8abd30296dfa7357b42933d70ce7c3e7c810daf4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.045Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "6f8191d0eb492ce40ef08240bbe0a25fda9e5289c1135dd1f032e34cfff8aaba" } } }, "c58c920060a64568fe6e344fe00a5ce4d720ac37a93628692770ada830c5325e": { "4a343784a2e6508b5e218dd32d01eb13fe7c9d806b2cb07a5c39a775f7b2383d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.028Z" + "updatedAt": "2025-12-04T20:16:57.693Z", + "postProcessHash": "1f5e347c69dfa473e5463f80ecef88870112df4f52934d23c9cf450ef03fdaaf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.027Z" + "updatedAt": "2025-12-04T20:16:57.693Z", + "postProcessHash": "24e7e87ad18c3f5a48788333f91860eadd5a81701aff46fda68da0f2ea7b7f05" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.026Z" + "updatedAt": "2025-12-04T20:16:57.692Z", + "postProcessHash": "f33a47a01f0bc67b436f1b77a069457c2145f9bca02d74f64f36cef55eb835b4" } } }, "d61225a37fe0c4d963dda12e6171915748b61bb4ea252b20fee7017863e0f8cb": { "e22f186111d1f322fd63ea2a2ab6b8dabcc933c9f1a1d547efbcaa1d9f78faec": { "jp": { - "updatedAt": "2025-12-02T22:57:45.011Z" + "updatedAt": "2025-12-04T20:16:57.677Z", + "postProcessHash": "c771d8c6f55fbece9d9f3ff59de20bf74ab962c0aee2ec221895d2c7420c3544" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.988Z" + "updatedAt": "2025-12-04T20:16:57.647Z", + "postProcessHash": "6eb114cfe8666006b0758611c3aac91b31dc7065e94a6f160447d69baf610ed0" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.012Z" + "updatedAt": "2025-12-04T20:16:57.677Z", + "postProcessHash": "00c10b3a5d4860d68f8f35d76720caf52bde7db8eb7afb934208d6c6151e5d3b" } } }, "e59d25e659a24273c3eef05daa226fdbfb119134fc9c360fb8f10fa1eda0bc5d": { "cea9fed32032cdfb1fc07ee3fd025b189b279642029231324022cc8c275879fa": { "jp": { - "updatedAt": "2025-12-02T22:57:45.012Z" + "updatedAt": "2025-12-04T20:16:57.677Z", + "postProcessHash": "9cd758575ffbcc16c2e2f18bb0267fb929125516a61955c2d93c06d18cf4f322" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.017Z" + "updatedAt": "2025-12-04T20:16:57.681Z", + "postProcessHash": "1db63de25f0b2b675acfe2c8c46be9a36faf0e9fe5ed4dedb426914689a7c1d5" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.013Z" + "updatedAt": "2025-12-04T20:16:57.678Z", + "postProcessHash": "a510505546d1406937cc84cf51db0bb0dd6a48be8e72146a8549675d44726088" } } }, "ebcf5c14bcf3f123a8337f0e4d01711d0d5350b19f8fceb4989ba4967a454d71": { "fcbe8a223dbb47bb59f5c3e6880beb175753d21025800e5178cb322086eb6eb5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.033Z" + "updatedAt": "2025-12-04T20:16:57.716Z", + "postProcessHash": "922bf5cd2c07bbff286741ce257f7ad4c966b4a0cd32460a358ca9f3589e7e40" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.045Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "175204d3a8de64b9282f19e41a22c03c64e649ca911eaa3975f52827caf8ab99" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.042Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "e717307fce2a14a3f15c65632ce8d79b19629c74c109b38d6a16c3157adafe25" } } }, "f8131ef0252b8ff50e0e16a5c5a263d8c4c19d0d5eed0109ad5127d0b7f1e617": { "10eec051f15e6d2b7349c390f8baebb76014741ed3b8e31aa94bf797e786189b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.039Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "3d55f0daa191f0848a7dc079f86e24fbc6f67775766cd52dd1b59620df91f838" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.046Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "dd093dfd6bfc43030edf0867dedb8c4214749d8ee2420027e03ebd679b4a0e6d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.043Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "d454bac44478a0542583ca9d2ec19e218c1cb42b490a33db8214b26aeffe6648" } } }, "57ad9bdcde77c82a8b9abbf11d3820f549bfb779a29aa35c949fd4b27ff2f01f": { "1e38948feed7f1b2a7b35c47b430e56f07e2438c56f10e45d636ec500990a43d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.041Z" + "updatedAt": "2025-12-04T20:16:57.719Z", + "postProcessHash": "827cca89812935c948003ddafa5afbf9d887620be5cb71f0731b0c572e912125" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.037Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "390e78cc1d2e77d34ac7a7499bf164ef5c8cc514945401938e8f6e10490b31a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.009Z" + "updatedAt": "2025-12-04T20:16:57.674Z", + "postProcessHash": "e69dd3d121a43bfb9d31b4883785c565dacd38ff55cd6c89bc2fdbfcf8d3981e" } } }, "7ceb6e3c9247297b0f7f32b5bcc5fdd805490fb8b1ac4cb176afdba619355e4d": { "ac6e6531f103ea9f5613e39ee61cfcddac7133be00040a3d2577c40543aa27fe": { "jp": { - "updatedAt": "2025-12-02T22:57:45.038Z" + "updatedAt": "2025-12-04T20:16:57.718Z", + "postProcessHash": "5222678502cdde74d1dd2ce20052944b25a572af0d3c9e3439678a9a1847ac55" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.035Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "d93021511a07dc2c6d271bb037929ffa9406310d6ed20a9e506bceb9aa4c4e87" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.044Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "16614d2e48714f1d8ad04fa8fe115dc8a67718f510260a4930b8d6400b94cda5" } } }, "b623b9673e0f28805a4afdfc8013cc9c06d3af3bc31cc33238b2d1a449d4888f": { "141f6e9d777628dad68e29e4db62adc7411f17cbe61f3611de81835eed95ff15": { "jp": { - "updatedAt": "2025-12-02T22:57:45.033Z" + "updatedAt": "2025-12-04T20:16:57.676Z", + "postProcessHash": "94051e243799eaaa2c3428ec02216d9dc3382a4d5b18f4f09db69c8c05522153" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.032Z" + "updatedAt": "2025-12-04T20:16:57.675Z", + "postProcessHash": "ed3e2d870f2d43a79a0310efed804929eed317ee8201ca59b0527c18ea562b33" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.007Z" + "updatedAt": "2025-12-04T20:16:57.671Z", + "postProcessHash": "21a28c65c0e63dc2eb0b0939ae6878aef3025707057c8375f63f8a04f3b32556" } } }, "bd529fa629c3f10310f525658df96bc82a80d75ff52d1995cafe1e4b13e747cd": { "ff7e68ef737ee5b8e75daa40f60eb811c121bece05086608bbe25c6ac85d8715": { "jp": { - "updatedAt": "2025-12-02T22:57:45.049Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "afe779123d9b15b40cf2cda23e3cd1b92a0d650144b7fbbbbd4c64534d15546a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.048Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "44e4638157a3d7f941e1f117300d77a5bceda858e654fbda6097b886bedddbdc" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.049Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "6e72d4ad51896329bfc512721c2ff70cb181f1c7e870c8721daeabdd1baa3005" } } }, "cbd3fd46a4918ee9b9919e72d00bd7ce3d00418bb1705c843c677adb3e595a3a": { "0613ad7af0509f61658a0f7a5e17e617139bdf209f37e63f862416353f1241ef": { "jp": { - "updatedAt": "2025-12-02T22:57:45.046Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "00f9c6492eeb0690fca939fe6a03c6aae3ddfc500479e6bdc3b6a135ba4c72b1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.045Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "35624049d95cdc289907942ffd43712e6df4564a9d681efa1c162dd6baddf48b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.037Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "8e3fe49c910a80d4c49d16cc28c8e037553f1c9b5427551c83e07cb2afee9c07" } } }, "e1167cae2cc6ec3ff98f99cc5fdc814c1613d183ffc5a294e5002a5c76629f89": { "bdc0fd08e9185e494c67e0405a76d6b5ff3f2a66fb66986f38ad9fb1486504d8": { "jp": { - "updatedAt": "2025-12-02T22:57:45.042Z" + "updatedAt": "2025-12-04T20:16:57.720Z", + "postProcessHash": "45d8a86f887f200617915b92c87c6fe4ff33cc71fa5884a8153b604460ea0882" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.046Z" + "updatedAt": "2025-12-04T20:16:57.721Z", + "postProcessHash": "9b99b08bd6b7bb865f42dc110c38f15b51ce0cb66e9eae1bb40c94786d953a4d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.032Z" + "updatedAt": "2025-12-04T20:16:57.676Z", + "postProcessHash": "72c6484d31b398349961752a1e0dde81145c01dade60d4fc41e3aa77e19c3f63" } } }, "fddeb9c1bb988ad91fa2ab2fd48f16446790394aee1f2ea892b74b4703663d8e": { "40a994cb1728118007e9bcec1d1e95be3ceda608e471c1a73b546b7c438f8ebe": { "jp": { - "updatedAt": "2025-12-02T22:57:45.036Z" + "updatedAt": "2025-12-04T20:16:57.717Z", + "postProcessHash": "b04322a982b1e4eb5fdd59bd9955e4f5cff6e46a4018bfef887a4382c4b666de" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.047Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "0a22e72f54139962952823f4821676a50bbc95f22970458aa4346bee40ab1ab8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.047Z" + "updatedAt": "2025-12-04T20:16:57.722Z", + "postProcessHash": "d84f2a24166609a68f1f46320cb564ce9b3397364ea601f0f2825550268e2c8d" } } }, "08bffe1dc74222857bd9847a9079d22910222fbbdc1dd9975d3beb50f284f3ee": { "6ff985dd3eb042cd0776c0981bb360df764da84db1d5f50ba4c7bc2fd4394a58": { "jp": { - "updatedAt": "2025-12-02T22:57:45.112Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "6bf45f39a61e8ace8225a623afc0c1c2c1ea015f283ace46c0ea4811a621f9cd" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.111Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "a5b60e3ad9fdb455d570127ceae8a0401996406e768f2b07dfea167e94203953" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.113Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "18272011fece902334999e53e7749ad33f75e5793a330ba3303ab143aa0656ef" } } }, "3a9bf422a9a85629cde7696a05524d19ff37ff8a14e26aa9d363708d50ca69ae": { "3106e22f04396e24e2bcfddd311b6bf015d441abff426e8f3e45320a55f20c46": { "jp": { - "updatedAt": "2025-12-02T22:57:45.114Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "72c7a34e959c0f0a84b2ef07b83b20ec931167cb9231eaa6aebd8748fc65be59" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.116Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "6b47b4caf1476b73be8fc2144b48e4ad3af39595186bf683d3a23767b025ac61" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.114Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "2a0aa1f4477e663ca9dc3cc18f139623ec7d97a456e7697b94413195796e020d" } } }, "0b2918c33c54636514f9106c16c6353f3c90189cb35c245462f264976b158e49": { "8b5e41f784e6af3f274d3cbab3bb71a982e9a0b2df5cd5050b3f76eca71460f7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.132Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "9535c591998d631b28eaf2dc8a9df7f06bd56871a1954408b81c92911f714d7e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.105Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "be9db05c161912bfde11eac461aceafe3709a38c09081116c74c139609b6ecf4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.107Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "32a1b3b56863a10bb78cd6889c602c5353004aac0b704062096ff42b90415e8e" } } }, "190291cbeb8e03da636d545882454df1f5969a43233fa8547a340888416e0d7a": { "1e21922b278cc488c7ca6142a0b58330666f67ff429c778024409f871aeca347": { "jp": { - "updatedAt": "2025-12-02T22:57:45.097Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "09b7edcbe92b4cc6b41ea070ab832c2b7d70f31666c8990a79a1bae55d8d5965" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.124Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "404ae15d886d9f74114f0fb2bd6fddf5be90bd468d2104e1ed16405d00b15ad7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.123Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "dc7ddd29affcbec086ea80681be99267f83665f820caeefcf3217abd17f5cf92" } } }, "1dbfde47d0ccaf8fabcd5ad6a4296155b1b096aae0b5f8d17a8c1b756b2695fb": { "665e7928e61709a3964eb76535bc335c1bee18c8bc09733558199e232956630c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.107Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "d6f927fa8271304c2a5e0db1055044b5e260bfd847c8116da151ac3566c88cd9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.109Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "a8f515005ac723939d4f5463ee743c504e0072a85291432d2c45af7e843068ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.106Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "0ef63c7a84dfd9b8ec6213171c2e743ac990523925946f141d4ef79647690968" } } }, "1e6d8899d944f96b533c9b1689dd0f3c45d1f4d88d4d1edd3d0cd126273c28ae": { "874433a820ac2a172772ed12a2a2e43d64d72b5fa3f8c9060c2ea70f9d9969b6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.110Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "c995b5d0f47ce5f1b027f9390faf44db3b19020ade38918f11b31c20cc266bbb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.111Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "ee0c8ac7e05f33ffeef8b5cde7efa779506cce9b9f988c57f867669e4136a233" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.109Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "b8a091652c96e14eb803fff7f6cf5feffabce8c04113f694b453a4c7ea73269a" } } }, "267616b5e710386f1e95782b057051b61f78cf2ab9ab90a87b76171e1110ba0f": { "526635ff55be813366ca95dd8408fe2713af702ad3c42ee3f6df159c36d7d754": { "jp": { - "updatedAt": "2025-12-02T22:57:45.129Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "ced14aedc77a7a31cea9926cec6c2faa73298334bc9f80ed102f1936c3780772" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.126Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "a40473617567341792692242fad3a712e9388d4c4479b5f8b1dfba340afb097c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.129Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "453e26431bfc4b1ed1e267802d2996d3b172541c53f15cc95efa32a4dc511ea5" } } }, "3db2189c4ab253714a8026c40657f8d09c5b44880bacd30f5d37a00af55f0af9": { "2e5559b28181e920ab535b8433f1644911413cf5aad2b7f7f2077a2124cdb9a5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.113Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "0d05ed0f7213b35bd64142d742afe7e7ab6f8ac180245e4765d3b6b44f97aac4" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.108Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "ea9b6815a1f0ff21275d6d0046894e68495c000f5d47d4ef3b1f94f75ac84b06" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.109Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "45a0adf7cb18d3b2a0fea1ea5ccbbf27b34997e5c34cdc6f2365279e8d35a431" } } }, "4887a31d41443a8cec80c653b5cb1471ad7101392e2a0fd85344bf550b4479de": { "5d542d21d2aeff7420ac405c3efb0280de56bfcdabe3edfdeea55aee2ee0816f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.108Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "038e9e15c60993beb2c1dc3e65e409f744a260d616898ff4414b6ee59d134509" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.106Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "0f7cb1300cd66b806c98209903cf0b403b6cf1bd2375baf65c9e4c1fe8624a08" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.106Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "e5dac5bfa18675a022881dd39b4b4d296e5445e1106dd1032935be0d45bb09bb" } } }, "5e3e9bc17b90a0989880b5acd7291677843b0466fc3c36993015c0a7331f4c86": { "50e422154e7d9340b9ae3e608a02ad691373881011458d12ee9329b251e2ee21": { "jp": { - "updatedAt": "2025-12-02T22:57:28.746Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "eef30f662a4e5d88cdd4bdd6b6e36757a41b478ef67fb9e35f38cebbf526ed37" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.118Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "c141fd13cf527215adf7fc5970abd9a081e50e78c736adf93810ac1063ba29a0" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.117Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "b85580892d4e15c10f07fc37deecb4815af1fed528c16afdd3996e235cf4ca43" } } }, "6820315a7841bbc8c89a60ac5aa8c0fe4457e414cad746f3bed1650c3f297bc6": { "6d8963200cc850f442fe2995954f739d20436c4a7fb4b2ec7f8a636bc53779a7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.118Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "40c09f12d6185409e5de4b4c33ea667fdd56a5747c7ced7b93ae03c9376f0cb8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.112Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "aa32fbcdef5b4916a6100a04a0104aad247ee4db20b0f4e265964b5d0a664d42" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.114Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "e43194a128eb65135b2fcf64664df0f9c1b169815cefe33b94e40e45fd98fa3d" } } }, @@ -17901,104 +21881,128 @@ }, "3b63277eca58a6be86961cdf77d03b10bf3995740802c612a1fe8d80ea7d20ea": { "jp": { - "updatedAt": "2025-12-02T22:57:28.749Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "450688e827f01fbecc42c79fc8875c5cde7abd85bdfa0317f2cff94fa1c3b657" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.749Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "b53bd706e510ee8c0bbf680000b4b5e9047e214b51474d3a9cfe376880e80b99" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.750Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "b5f8ee7516ce872e2445af0cf97ab7005c33e0538afecc40c64e4da4cc001036" } } }, "e0c7e0ffde8dc72698165f5f5a97336beb9082111bdd4a6c98f10c02ab69cd27": { "1bd7f94ef79ae4a259d5eb60f577fdcaa8d2926824240d88238ffb4e9d917715": { "ru": { - "updatedAt": "2025-12-02T22:57:45.110Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "95d42ab9fb023a0392dbc23bb3cee02df92da937b4582542ee22277531047c3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.113Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "bf766e99ffddb00223031ad6bd9f493734cd89619ec2cdba1583826ab9b89609" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.113Z" + "updatedAt": "2025-12-04T20:16:57.770Z", + "postProcessHash": "f0a0db2fe79ec27bd3ffbc024b74e6c5810adce6c6e6d52eb91e39d126e14cd6" } } }, "09967fd0502ac05bc286aeb301c2cc87873b2a18ef14f3e2acde54345b2ce839": { "ced484d2a382f8655c9d000bcfd985aa94545bc671aae3824c264e06b17c1fb5": { "jp": { - "updatedAt": "2025-12-02T22:57:28.764Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "7e99f2d966b889054100caeeddee9c4ab7cc968c7892ada28b9a63e6a7ab99de" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.119Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "8e7e7117206187a13525e26823ce9d1ed21790ce6adfa7b099d727d68d1ca799" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.763Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "3a7b51f37f79da0219126373c991de922f715c1a1a0acddfa2f4a23b19a4444a" } } }, "181adac272e2abd83cc757fde65fb79cacfbbfdd22c49560ad9938dc95ca360f": { "6aca92cecd7097cb7ee90b10d02efba74d48a3de1843308bf7b14b842592c336": { "jp": { - "updatedAt": "2025-12-02T22:57:45.124Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "cdaad5869ea8e0ab4790d7d80dfc8b4c7e3e0a47c8ca3fc41412f19fc3656c0b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.123Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "9bb0ef636b32f2bddab2f6b0fc241f32c1fc2d8d18e77a2238b06eef583b43b0" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.129Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "206fb87f6adad4711c3a90ccd19e865c177bb9f96a6095545bae6e87afe0880b" } } }, "1e8da80bc94e12875fbc8b4285abd87a9ebc00408979ef39716bb53ce4293704": { "cca901fd78a63bb4eb045aec0ee20699b9ea63520630a96e5bc254085761c479": { "jp": { - "updatedAt": "2025-12-02T22:57:45.137Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "20cae0bbc2139994dd4324d84c4664c6c52669fa5f5a576ea8233ecd72f25fff" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.138Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "3f80cead9ee0f656ff8b919c7fcad6b97d2fca209330c75e1c0f25a730da673e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.136Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "d7be0b88bac51571e638b18e7c6087214493b564db8bdc33fe4c6d414539befa" } } }, "28af1868b1ea9cdd8d1446f03dc1a91a48ed271602879f18d0d3211752aa2e0d": { "38f892b234c9e0a9d0e3e6bf087768671979427a8bbaf831e2c0cd94e5730d2a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.139Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "949add8772f2b7bbb93e32b49cdc3917fd693f0ac2cd8792987ea649590e3802" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.142Z" + "updatedAt": "2025-12-04T20:16:57.788Z", + "postProcessHash": "29c343c732b399b47777abfe5937196506f1611f10324562bce719d10d8bb399" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.144Z" + "updatedAt": "2025-12-04T20:16:57.788Z", + "postProcessHash": "0666f73f2c6b375a7f59d5b14257d6a0b278badff898382f39158067afaf568d" } } }, "352b7210abed12f4834ce3966861f5819c1b015976a552f4d8f3417367d6519c": { "aa0583b1c517ae46447bcd58d7475ba0f4350a3b5974cd1a472f07e84ea2b12b": { "zh": { - "updatedAt": "2025-12-02T22:57:28.754Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "6ccc9ba3f7eac5762c1acaf4f499f227a54a6f9f8ab1da3115010df9b1d838be" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.120Z" + "updatedAt": "2025-12-04T20:16:57.774Z", + "postProcessHash": "bffc5eea03dd475d76c1fa17c9122fd9bc93b8d80606793f5a421349e6d7e8f1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.759Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "57b5c6d035c29745590d7435d965ccc37b0de09dd96e2a4fdb208b04963c2138" } } }, "3e04e93b41ef14736c12d8caaaae2fd7c113b2b4ab71ad84553b87b688b2ce7c": { "44da72d1f89df587a02ef24e707acb1da8350d35e7f7a73fc92e5b863e479a62": { "jp": { - "updatedAt": "2025-12-02T22:57:45.144Z" + "updatedAt": "2025-12-04T20:16:57.788Z", + "postProcessHash": "08e7e92d7ef19dd6f4bc549f0c638d8fc44309ca085418bd914f7d28d4ae7b7c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.143Z" + "updatedAt": "2025-12-04T20:16:57.788Z", + "postProcessHash": "b22fb9d058bebff89b8e39f94942b1f0046453844a21f480ebd875efe56550ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.135Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "2f9c9abdf460ead7fbabd64cf066932c09f4f589c22fbf320aed13f2375d9d13" } } }, @@ -18016,13 +22020,16 @@ }, "3203f84f048ca009b9f90eec080070e282108d50d7ba6ea5bc93169f873eac70": { "ru": { - "updatedAt": "2025-12-02T22:57:45.101Z" + "updatedAt": "2025-12-04T20:16:57.726Z", + "postProcessHash": "81dadf65c9d12861a7cd623a55fd3c1b98b73dc6ff6c3897615241cb297b4d45" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.101Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "e50185733620fd94d36a547c3f15018b672d9dd32c003871e55eb22842bb2807" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.101Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "04080a10bdd0c4b69547897121f77c105442d449a1e6741332be8cfb5690cb9c" } } }, @@ -18040,78 +22047,96 @@ }, "f1f6c6ba727fcac4034e8e533a8a14914f296de5811f8ef69aaccc190ed52c04": { "jp": { - "updatedAt": "2025-12-02T22:57:45.098Z" + "updatedAt": "2025-12-04T20:16:57.775Z", + "postProcessHash": "0ebc0c693063e21fd55eb2ea129c40e96918cc4650453ecb108b1efef1586926" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.120Z" + "updatedAt": "2025-12-04T20:16:57.775Z", + "postProcessHash": "a045c0588f3f9deb004d4060cd28aa16421b7cd8966916a076fb892fa9967be6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.121Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "86b3994dc812b0e8eee7d7163022f6a6037aa46c970bf84d68c012a4c23c8813" } } }, "56a2d0968dd32b192f6e6833bf129bd2a1a73e16d498c9f8a64c8e8cefcb7635": { "85317ab67c21185490c8ce6da9f40ae75c6aa792d046b52122da1555de6a0d7a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.126Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "1bd9d0382eda2a10de558b98139b7b0d6492fdee36cd8fc312849922e8a9520c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.130Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "2208fad3d32864b5bdae55e8927891e4041b7ace40899d02336f834bf31401a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.127Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "af7f68d48c9d0fbc19b41b54966e28172bcf36b3d6533457af42e11ada2deab4" } } }, "57fb93819b163681fc7674df87acd51d16808daf3c9a80875363e714ab6b6f0d": { "589fc5521d34b691619a0775483550005c0339c397f9c5eb2ad84a68d38fc0c5": { "jp": { - "updatedAt": "2025-12-02T22:57:45.131Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "f166414083284477507e5c7dbfc55016e248d16e62801b9fa12180dc84660500" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.124Z" + "updatedAt": "2025-12-04T20:16:57.767Z", + "postProcessHash": "d6f1f1e65cb3757ee107d14f92216a63c14051edefe9fdccf426cee3058ea746" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.126Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "3b58326ca0ba0379c47938e33f11482372fedfa021ffae22b4fd6c77c89b56b7" } } }, "5f7acdc3b5ad3c4b70f2e0f6421eedcef49bbf5fe1541b93de796181d282e3f8": { "c3b3c36e1615ad52f46683413733ab6deb9809b9216880d962f14d2b316e6812": { "jp": { - "updatedAt": "2025-12-02T22:57:45.114Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "42ce9afc68d46521cca2c46a9c373ba7468877655ff9e4a2ef8a6c4f52385c38" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.116Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "46c91f9b4118b943d19409660a25ab3a4b3b25fbd53591e640d5e238cbf66499" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.117Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "12b0bb10349c7245a144e50b9367ac8a4ff9d016abbe1402991eadd690c0415a" } } }, "720286aedee663b0895eadfbb8c855cf28e8c889a5c1e959eba2cb56410fe0ea": { "8b424c806172df3664b5a02f66fa091e75d922eace7c6d17ab06a1cd4d48ded0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.125Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "f54ee2f588cc52d3291430b589cbf7c83cfc559bced651bb0e521d0e71f7f522" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.130Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "4a75bd2f32f043105ae263c777c7c189f54b5147cf3b18f4d764bd2ceb11927e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.125Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "277b227a45093d39d157c5372f84bcb6fec374a9bd5a3f9b46ff6a912fbbfd4e" } } }, "72359f73659f510486d116f7315a779a8e182fd9217ec98122618754e8e8e523": { "b7f70662c0d64e5760316e2f601553929e92b4cd5b7d382d9d395b743c0236de": { "jp": { - "updatedAt": "2025-12-02T22:57:45.143Z" + "updatedAt": "2025-12-04T20:16:57.788Z", + "postProcessHash": "c29075eba041e71fd575c11584e031e0c0e468c1eb26e424057c99e0b9d83c4d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.097Z" + "updatedAt": "2025-12-04T20:16:57.758Z", + "postProcessHash": "104cba0202daf96e954ee89abbcff75ca8ed95ed7a33cd466642f042c1ee3497" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.136Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "a46c0894b1adb1bab0abd7f0bd4f9acc9a0b122908f1c0a269c280aecaef895f" } } }, @@ -18129,260 +22154,320 @@ }, "13a75f2e1510d071925413f0a9794c0c5df227d3f3007ca6a25a865fbf3c7afb": { "jp": { - "updatedAt": "2025-12-02T22:57:45.098Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "fc834c977ce4cdcae12e1054f310c08acb031012c1907ce5186a90291109b8df" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.099Z" + "updatedAt": "2025-12-04T20:16:57.759Z", + "postProcessHash": "bdfe1a6c90a9b597d5ee6c507ac791dbd9474c79acc5e1de7e7c6e87b496ac23" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.099Z" + "updatedAt": "2025-12-04T20:16:57.789Z", + "postProcessHash": "73cbf752c31070a36dde6ff03d2adf238d92086ac82c4a67c738f5dce6803315" } } }, "a27f8d321849f13ef579bf79bd9fb504adce87fc32377cb34f1d87d0247b62fc": { "0af225620d1128bf2b7b6df1fd290b2f9272232c08e057bbcdddcb8da980d877": { "jp": { - "updatedAt": "2025-12-02T22:57:45.115Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "38665892ed7315638d4b85b0f2b104c0d6fffb0f8e4d35dfb9a0096afcc89b15" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.119Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "9240bce7da3ac06620ba2a49b24110ebb6bfc2642cd9be65cd6e9df1f9a3f389" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.119Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "967979a64434f3485bc29c8ed905f8ba50a3cf3d5ee8b8df2db381c7826878c2" } } }, "bf4aa8d8478e9cbccac2af56a2392959e788a6b441ae1d334d378fe41c813431": { "03be8e55e0b7b3239928d3c046bcafe55731c78e43aa66ee2a92c237cad32296": { "jp": { - "updatedAt": "2025-12-02T22:57:45.135Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "842d6869d4cf9cd88af6c4e60443b5c722baa1a445b72e4123e50b684902a6bf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.132Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "eabf0445060f64126d7611d45f55a08d96ecf5681d38aa335df9b0766980ffd1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.134Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "2fbd27bc18f0dd632aad9c0bb0b49ede922f74b57e082b5f9346195615684269" } } }, "c6f8d4ed5ef7dc56f976117869cc7a69922f064662bcdd47f24b593a903bb511": { "66256e49527646d9c1360a5db02fe360c867281e0fbebf9751bf3d0a5e4e0116": { "jp": { - "updatedAt": "2025-12-02T22:57:45.128Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "6b2beafd2d6fad7ba7ec4a34508288e28f8d2fe9540e686dd56649d6be555c9b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.130Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "d7cdb71c2ae15150357af83116e07ef37d0fbd283f6afa49a66805792dcbfd2d" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.131Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "38bae75f613d2af938829b383820da55845ef2e82b5ed86366fa3f0d7db9b90d" } } }, "cf5cab052feab37e254b75324c3a852334a8eb3c58db22a1686c9494d09f443c": { "d809412f215411acf69b12810108cd424016766dd4d30a992351f8e69bf650e3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.111Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "1e38eec3b6a9658e04a4332a06c8c32f2af0479eb6ae465f9a2e6ef06bf93db1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.117Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "cb395bb56f10ebf2ce60b0c93f0fa754c730ec9af5b57a76d324303f529480ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.747Z" + "updatedAt": "2025-12-04T20:16:57.723Z", + "postProcessHash": "0674e3ea03367fefcc1b9ab1ca2a9fa078d015129f0523cab1e9e422793d9f74" } } }, "d9f334133320c651967d1b5b665ba9cb709fe4d09178893258245d70b28c5b25": { "ab1cd75a382114032d421c93d59ddfaae337e9528e1ac6b02cc19764422a2124": { "jp": { - "updatedAt": "2025-12-02T22:57:45.140Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "83754470552434729b2648b7c1cbc4a42fa4728409db6d1e9ed69a568f9f4b8d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.142Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "62eb6d78cdfbdac45b9498c3334d819c334e22705ebb54fff3da2d5da1c88907" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.138Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "f2612f5ee490cc4efb0e0b634d471114f8f38febf93e331b2f9f42160d97750f" } } }, "da0fe2e9eb4d4168fde541e5a4aa216882f11f0fe02c65758804bc42306051b7": { "460c5141199908b2fb1f8ada87d50d25899e1061548dd77278916ae9f0194eb1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.118Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "c7ea200bb85df4ff6d093a4320996d689c62acda72cb23bcd9bb69bacbc50603" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.115Z" + "updatedAt": "2025-12-04T20:16:57.771Z", + "postProcessHash": "830a4a6d9fb1f97734b2234e8a0bac72e4499132476d1367e16c649b77a7eec6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.116Z" + "updatedAt": "2025-12-04T20:16:57.772Z", + "postProcessHash": "c969464212636d27de39495bf7090e6fd15176cb0ac1a5c6ca707a25b4c96a5f" } } }, "e1c1bce938fcd121a541dda56b44194cec991a3c399320d28f68662d4f2aa155": { "ab303b424478e35d2600d95fd49a29737cadb6308b63d04e43f98d10c38b5cd3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.139Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "1e92752a4940ba86fb4f413458efb1bfb27ff237e5af62e298b31271d98999c3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.141Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "669e8aae368268e20d7112f2ca66f87c270974e7f86cc02e75abe887b04fa1e6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.138Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "caf4249d5b2a452a930506e72268578143d061ced427b17bb997d5d3a7431b06" } } }, "fd5ff75cec53563913c25d3a84cb92ca6b7f928115d7912cef78a22dfc907f29": { "ba4164cf48205f79abd50e8ce1180feb106ddcdda361d67fbf580922f1a8bf3d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.110Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "1e603a1a920d7f558eefc9e0de09a2b106e28ccc37230f439f029b7a7cad3327" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.106Z" + "updatedAt": "2025-12-04T20:16:57.768Z", + "postProcessHash": "90f592d49615a2c000c187171ac7c60bc16422614438fd3ba4ad96081259342b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.109Z" + "updatedAt": "2025-12-04T20:16:57.769Z", + "postProcessHash": "c95d31d3ecdcbe31471bd5ff8c70efe8481a397f75c8f2a8f3e6ff03fcdfc0ac" } } }, "176d0068a5182e14c24f7b86a941e2993dd5d5375dda5f359181472f50bb49a6": { "3c0a49ce0175e9ffb151adc18ac51e16f2d58c189a49b071eddff19741b2773b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.759Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "d264057c4048da5b061b4236c9332a656f36a65fe5ab5644b96aee307e30af55" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.758Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "d54c698e36d634a2aaaa1fbb6952b52f32d72662f281ef6f38de4f9a4d2a3941" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.762Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "bcacac99c3f0aa0f2690b95c9286fdcd3e2f05f748b90b73370615f1739cb435" } } }, "2fc9ece7b731c86425713493bf6fdb0053ccce96ffd9f63a70eea4019cdff660": { "547949490f707e9c4812b2f1acebb85c8f7858c6f4c8d030784a54ffa0f6764b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.789Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "dea6f7fec109abafdf25f96253fd53cc000004f2850dce3587d1f3ace5514075" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.789Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "7265f98eddf21a6bb177b1cb4386e321fa74648241b47eb6a70320fc82b3ec7f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.151Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "d51a1b5189fe5a8e68daf73908e127dd712986f83bbafde7d3ff7f7cc446e6bc" } } }, "356a5236e325bbd80f92c622b5549c7f59c011b169fdc94f7b59ad1948f64d59": { "32a464d65d3033a6f94c395c523bdf9d52473033f37bc7b58a4c7d5a3374d78c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.148Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "4acc2269ec5e5cfaa3dc7816bf7b9a49d914b45b4db83cfcbf944e10ab34a9bf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.148Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "0e64919e0648e036233b83e4609bac2a7816ff8f1046697732bd46177e240aef" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.149Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "75895529652dd220dd1ea6b9408b8e9ad02f8758b571b7de68c67e8da1aa715f" } } }, "4dcf3a152974b0406b6bb68f5b1c541fe9249595ec4170e386cdf67f9e97d6c8": { "144e0319e32e38db32a1efd639ffc72bf732e5ea7b5d6a3d0883a97e4bec0cf7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.754Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "d93fb854809e96a5cba0ac5064670f43801f2a08715601ad54a0eda56275fffe" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.765Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "f4d85c1a005c15bb1845441c73ea846b1a482346b75a5f30919cb56f3f2feb27" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.761Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "52effaef55277754fa186e2b506fba8d8c7c1fd076df40b5a928a37aeb90cc58" } } }, "512bf2a261651531d1f44db97f0e2477f9009f4f748fece66e5ca2554439601d": { "f65ce8822ff0abf42d5c376dd8120812baee55885d0c7b7b65bd770ce9d25050": { "jp": { - "updatedAt": "2025-12-02T22:57:45.149Z" + "updatedAt": "2025-12-04T20:16:57.798Z", + "postProcessHash": "46c765d2f4be2fbdb711a9e02a09add5997c27529dea3e54692c154744b34396" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.153Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "306244b35167883241bd29a9f09b3f67154bc5ce06c1062d77e1cd86cd0a40c7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.150Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "b4db0c2c9881232e323642dbf749878f67ca28668b388fa8a42d06fdbc35b27a" } } }, "65955c38f425b134d13cac38e2564b302a84b647113466a30fa84df4625f2aff": { "e5d27d0981cb097f6f8db2c3366ef654946ffdaba0ea5433e234e0200fed3d99": { "jp": { - "updatedAt": "2025-12-02T22:57:28.760Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "fa640bc5520f57015f1224bc5a796ebbc18f4a9160edc801a99cc620b4eff2b2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.758Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "2a39c216a822c7fa68e61edc40f1bb0535d07371c5abccb4c674772b4d178ec2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.757Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "8ba96408a8f1e3c8f553ec73606a32a83b4e4988351465a4f65f22a7401347d7" } } }, "70760b9ea84a1492768f54f60022928ceed80c33ef8d2cbbe522324f7979123c": { "5172acba2103f95752ebbc8f74579f1012ec0e81bba84d6402deb3f9ab3b0bfa": { "jp": { - "updatedAt": "2025-12-02T22:57:28.766Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "a073455c5b55aa911175ecf3353c4beff76eda5b433f919fa1cd09284d861402" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.763Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "fcaeecd3a91f734107e2919c4a5a484386e314a2303a38678b01cade9afbb3ec" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.762Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "e1dd9442d9afbd2f8c04c01a1be26841d421f22d9270a4d2b0abb4e931b66cfb" } } }, "832f10a64dee00c5573ad8927271c0f08e6912344a6142b218901f374557d6d4": { "c00fec44d98d20ecff726432315131e9d6815d1bc6d528bba1cbde655c11121f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.136Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "bfc76f5bdd8e9593d593ab86851101a864572ed0fc659b63327c00116e812c1c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.134Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "cbc1bfb3102543ff7b000f4aef012159a27b1c2d5aa93d7071cdf7b449232727" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.133Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "9aac770c7439acb70a6f9b9a0b7c52fdf3c2081fbaed1412d53efefc17ee0601" } } }, "85aaa20028d2fe29973bbd19c0fe7f0bbf9b2028122048baf8aa80c366fa2134": { "3e3cfccfbfc2a9aaaa5c073111f54d43e1d4a01447a2fdcb70bbf2ad0fa40c15": { "jp": { - "updatedAt": "2025-12-02T22:57:28.759Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "59baf1674c6b7404b7453309dfb6b3bdd92f9332ed2aac4be6dd793268a22a2d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.756Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "1c2f51dfd9496562471d059fadef889709f152fb330e6772262cdd79bc185c6b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.760Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "bfd8b5a9c9b83bd19e925d38b1bfbcdcbd3cea2baaab97089ac1dacd1fc6729b" } } }, "8edf9e4f287ceba4ca2d82f14382e035360e320bcc403a4bd0ffc3569444e7f7": { "0210849faec51fc728046caa3f03b71304bb9c646dc07169ab1c6d9e340a0aec": { "jp": { - "updatedAt": "2025-12-02T22:57:45.134Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "8b8d69da71cff2fe1d6d3853a2c5ab5c96616797e78341be5939c6825fb67999" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.128Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "8bd136944775f734866af38f97e77f643084b37bb273155f4bd47d773ca6c90e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.131Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "2343bf56ee5cadbd6439b29a12e9bb4a47d8716e250f75f57167d247cb543b59" } } }, "9c07a7cf8bf10809ed5421b224c9702d1daf802a6511bc28a61380182a3cba5a": { "4e8ed6a1feb2aa52a5a2a4588b3ecb8b8ba68dec83a27b9280790c81f51a60e4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.761Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "28fca144efa62b8497f8b505179d25bf4790a34ed4bc08b921f4c426fd008e8e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.766Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "999fb8b88739487517548e63d84cb951e2e1de4de05cffba44ec9fa4ffc841a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.764Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "2d5fde14c9f42d119a43577f464081efdeac6d02c4fc4d29215e1d5cae8194aa" } } }, @@ -18400,39 +22485,48 @@ }, "2f0734e7c9a31840e186f5a334fbbbc73d1d52db49e8bbda9d6d1527b330a0f4": { "zh": { - "updatedAt": "2025-12-02T22:57:45.123Z" + "updatedAt": "2025-12-04T20:16:57.776Z", + "postProcessHash": "dd2cf67da3b00f05537a0997e5daa87d1d5c6c6f8686daa8e24cdcb49a2d6fff" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.146Z" + "updatedAt": "2025-12-04T20:16:57.776Z", + "postProcessHash": "73dcd12bd60b9a196041beb2e23a2641374c7b4868c76d67685a3df4693c0a9d" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.147Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "522ae35fc4a279d68c339284282408d304f3049dd05132638cec7068fd0b2a45" } } }, "b39b9077d3c9edfb0122eda19c18f981a977ba3d4b35e87ca4e9808c93c00357": { "c4806c1db71a5a0e8cfe750303156d37b0c67170fa9901e7e2fcd40bc40df990": { "jp": { - "updatedAt": "2025-12-02T22:57:45.128Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "85bb605bc37caa317fe6a70b467e855dae02234bb70228cfcd908f60b4f56e3b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.133Z" + "updatedAt": "2025-12-04T20:16:57.783Z", + "postProcessHash": "455cc629ea4453d86e910aa32b689dd5716668cb35e66f91313a79e20f867dc9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.127Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "7e6bc52fdeb64d9f2908f13c79ef6748872d40ba3ac66bd40c014cff21b68fdb" } } }, "b57ac847efe3da698e4e7e930e7c66f735f45e722a25a0fa39bc6f7bfcec60cf": { "9c431dd0d8265db20267a05a0e5cddc327c798c7acfd1be5071f066d5a7aee28": { "jp": { - "updatedAt": "2025-12-02T22:57:45.137Z" + "updatedAt": "2025-12-04T20:16:57.785Z", + "postProcessHash": "4dd770507d26130a0b1de186e20ebabb0cdc91b51b426da778e92033f3c4fb8c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.141Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "fa701c769b8fe9e5acef9111f6b62155cb8f4d6f74bf41371eb2f9484e825597" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.140Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "7fef35812803798402cd1d38d25c8f00a5691375dc71942d1a2720e9927310f7" } } }, @@ -18450,13 +22544,16 @@ }, "a9aaf3d0acf90c263febea571cd562058a89cc9ae231894d698d45f35f8a8089": { "zh": { - "updatedAt": "2025-12-02T22:57:45.121Z" + "updatedAt": "2025-12-04T20:16:57.775Z", + "postProcessHash": "8d44baf931bfefb631398d6720c8294216b9259d94e498451f4648460286bf4a" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.122Z" + "updatedAt": "2025-12-04T20:16:57.776Z", + "postProcessHash": "25f4619e67352ea62c0b94a6f3ecee87e15c935e67d3ecc8e96f015bcf5c654a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.122Z" + "updatedAt": "2025-12-04T20:16:57.776Z", + "postProcessHash": "5093f405fddd981852a28d79837821e638e27d11bab20e27ab956df40781e337" } } }, @@ -18474,949 +22571,1168 @@ }, "ec5f228d8cbe81918d49a04cd8723d3de119fd01172ce2e2b1517fecf7a600f2": { "zh": { - "updatedAt": "2025-12-02T22:57:45.147Z" + "updatedAt": "2025-12-04T20:16:57.776Z", + "postProcessHash": "c8b039c22bac7fcf18198d32e89a4654d50fc61bd2d9d084e9632491390c0265" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.147Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "0a56b38d22764bd93ca0bc535cdc63148881704f4f0c5277920314aa0136a2e1" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.147Z" + "updatedAt": "2025-12-04T20:16:57.777Z", + "postProcessHash": "168bf98677f74e48cc878904c21e74389719cd7be775beef762cdeda2fb6148d" } } }, "e8326b6e3e229b53f7f7616dad224e62d5aabc8c99d1885fa0b294be36436442": { "e0c19959bdee8150958356d19999762296868f26f8c58d573bd31ee946774713": { "jp": { - "updatedAt": "2025-12-02T22:57:28.754Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "7825c903a68226800808b53b160bcf8da2f848ca4d42b42874c4b33f5ee767bb" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.755Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "5fd49c0d5c3005d4cad28debfc17ca50c16718efbf7ae1a3794dc0bf50bc38ac" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.152Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "cca33cfadd9df5ee9334e24c3dee3b0c11041bdcec13ba28d46533fe04acd9cc" } } }, "f6456b0e678701e28c6a4e322798fee754b4c6d0f806d50583a4b3bd2c244c77": { "b8b48f150dd2033fc11782fa83bfba12af99e2588c361eae29e969d7df966696": { "jp": { - "updatedAt": "2025-12-02T22:57:45.151Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "15ad4363c1e880ecd1a91b34436297ffd9469abe98b90995180d65ffd65df71c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.152Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "dde08503690f9559922ea573a2668e7361d7110abc1850853c2b1f2bd7136092" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.150Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "6ef16870f4f8d45f0b30cc6c9373fbfba2e12813f228e1eb1bc341e40d4ec294" } } }, "581431969901be3a99a89764a4cd843b136cf34d9c36a58c385d297bcf0b5576": { "848b4e2ed1094aeeb74cb89d7d3f155262e075c04ec6a136f164406460b1c404": { "jp": { - "updatedAt": "2025-12-02T22:57:45.150Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "9cd40743eba518104799e1fbd150de5f268299cbb974e63020ff4e7ce49438bd" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.153Z" + "updatedAt": "2025-12-04T20:16:57.781Z", + "postProcessHash": "5f3bd11c85a9a2dad8aab7484a36dbf45bf85baf66e642759a96e5c7b33bfa8a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.149Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "6d6793c8610a3629e1dec8f2147a1ce66c64ddfb82572b7a82f59ca87eeed78e" } } }, "90b8b253ec086b1363c721e07a29dbd20c3e79932831c40618a9e15eaed1259d": { "558092fa5958f7bf2b9c27c89f455619f6ca6f3513e83b59425458536609e8ef": { "jp": { - "updatedAt": "2025-12-02T22:57:28.757Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "0b7ee556656f336e7cc1b432771a317ea784b3f1f5bbee99f16d7e49450ed8d1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.755Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "95fa91b06b664a692091ed41f1dc9c6b179915cbc0b64cbe8e548729c0652441" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.756Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "5067a69353b28d4a530ff08bb1d8d3ceca3df2e24af4c32958b8226243e09826" } } }, "b22d1260a64a32ed7c646aebdc8304e5522445a10e936e31715082f3976c0efb": { "0350b0c4a0edef07c101045887230f235288aae9414af376658d84671b54adbe": { "jp": { - "updatedAt": "2025-12-02T22:57:45.151Z" + "updatedAt": "2025-12-04T20:16:57.779Z", + "postProcessHash": "12f7274991c714800206b8f46c9e0ee754aa1a3a504a0cc3aa9c035cdeed6c1a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.757Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "94a3bec799d124f474fd3fdc202acbc6fbe3a73a9f0bafe5bb5a10f462f5025f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.755Z" + "updatedAt": "2025-12-04T20:16:57.799Z", + "postProcessHash": "a40bd800967ed55ef9d25971bd3a3656b462599c801b2d88ec679dcb2a63420f" } } }, "ba3d45a637c836f2218890eff93fee4103508fa1c470944799207121717e02a5": { "f3fd1aa8bafa81bb6a7e865a5de62823158a0afcc7ff7586bf136a8b47ee3a88": { "jp": { - "updatedAt": "2025-12-02T22:57:45.152Z" + "updatedAt": "2025-12-04T20:16:57.780Z", + "postProcessHash": "84a0d63ffa335e65eb5dfa8a84e19d4f5c3d3798426a4e9a88b067d3766ba780" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.753Z" + "updatedAt": "2025-12-04T20:16:57.782Z", + "postProcessHash": "371f9505f83f8a3f1583ad155231a8b2f4d589e7a0e316230300ab4eba355444" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.150Z" + "updatedAt": "2025-12-04T20:16:57.778Z", + "postProcessHash": "c80ff5fc8bcaecc2a69449eeeb45ce32c1208637ce0d8d4a1aaef8f57ad6f542" } } }, "fb6facb17dc3579b44508a305bcb4895b64ecd0ac72b1f50f97559b26bc78b2c": { "ad02c360d5787e1cd581329efbb507dd02fe16448697b4344569b5bc44e930ea": { "jp": { - "updatedAt": "2025-12-02T22:57:28.764Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "1135f956cab2cce629707b969e309f8731ab86bdf62aa687e52974bd9d2056f8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.765Z" + "updatedAt": "2025-12-04T20:16:57.803Z", + "postProcessHash": "b34ba0424c0a1b9a33bf874116d07cb423a34f00b5f6b3b596e166c7d1aa241d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.763Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "40098e525ee9e446c30af8b121d4a16543ef3808e219e86b556f1b8eb83a491d" } } }, "035ee5282a6d20879fad6bfb6f79b4341721a15ea3c52c10046b1dd3709aa16c": { "58cd6998f41fdded6a804039b2debea7d2278499d73c45aac0012b7439df220c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.166Z" + "updatedAt": "2025-12-04T20:16:57.858Z", + "postProcessHash": "e2229d07b793c18b8a013e9fda5c20527e0233ecf58ceefd6b419b98d9a7ff08" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.166Z" + "updatedAt": "2025-12-04T20:16:57.856Z", + "postProcessHash": "b49526a2f651360a1a34eab7557ca06ecd6e89f7856e957bef515a74f82d1635" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.172Z" + "updatedAt": "2025-12-04T20:16:57.858Z", + "postProcessHash": "f8264286be3baf0cfbb8c9d3e80c7b388c45c53246531ac01067f9765b6b1452" } } }, "26fd7d38f92eb5055170efb295d4a4f87a521a38805a47e252302040001b2050": { "6311029c9bad9285962dc8c797429aff225c5d236c038434dbd0c88cfb8a7048": { "jp": { - "updatedAt": "2025-12-02T22:57:13.161Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "d9b5346483ccf6334a5105ca1c30ff8dbbff248c1080bd2b1fc4d5d745aca48d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.193Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "24dc4f8d296569e12ec97723c7fa07d56d086ef04ccafe5253da48eb305b765e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.186Z" + "updatedAt": "2025-12-04T20:16:57.847Z", + "postProcessHash": "392c23af8f16584bf49c5883da5f4e977982041c039ebb7dd2d9a3a98586428e" } } }, "3f43afba791f6baf15364b9b47e22c85a9f1b3dd6af0e12ec732f9dcec39457f": { "1dd4bcf22efaf403e36fb2a77e769a0046ad25b9ce5480ba0ffe16c707a0ef4e": { "jp": { - "updatedAt": "2025-12-02T22:57:13.201Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "dd8fb975228a3458ba44dc1147524b9063fa2889c67c6e5c6ae7f930716c042d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.194Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "635608063008778d6447fa8358008b12c3079ced85ecee9a34b5c43f1607eb3b" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.192Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "aaaa5bd1fc11bfbe1fd5a31b330f54fee44c568df82f0755fe36a4deebee72bb" } } }, "645f7fd9f0334b6f31287f3ff16746bdf9b9befb1bef269261f6079af9ff22a2": { "4cfca9fae37346c2e6b247de1cc83bb1880d5d141f5ad266dea6ae52b8cce258": { "jp": { - "updatedAt": "2025-12-02T22:57:13.192Z" + "updatedAt": "2025-12-04T20:16:57.850Z", + "postProcessHash": "abacd8ac876d3c9b874ca4022906c9c48605657aefef9d8da7b95de92440dcd1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.148Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "b15220dff7fa0c2bbb6b7ecb357a7a2a4f965e102cab4f74aaa581c1b6f15770" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.199Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "f9f5ccfa1e8f2a332f864e4f30fe3d9cd78d061d3b0b5b38002be9890b0d0c2a" } } }, "870cee0b248ecbcf72715dfd0eeb85ec9af5efaca8d3edcf0fe8c5264910fd76": { "31443088162bd3a031a32984a7f4bfd930cc979d324a47439b26f35ddd40c4c4": { "jp": { - "updatedAt": "2025-12-02T22:57:13.200Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "9b49ad6232da1b55e3433b962c3b120951c38292fb74af0b0a2973d8a04eea17" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.205Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "e10211c1d3b3116624202c30f3f42e13c5d68630413e5540ae3ba5ecfa38cb54" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.195Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "7a7368b9afa511a300e041416f2a06da520f5b88ae0025a83c31d904a0a7fd40" } } }, "87cdbf09a8306f33d341ac3e84a3332c186b170f3eaade4500b0517c76c52c33": { "27bd6d01dce2d6441ee156267183789fdfad03cbf3cae1fe51042763a3ae5190": { "jp": { - "updatedAt": "2025-12-02T22:57:13.194Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "10de940bc422fd1ea8847a42eed793ba17e3676d57e0b9bae9ab2c9240f1d90f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.190Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "a6cc17570dfc61b9dff9629fc9e8f0c46e380456b65e37fc674ea84c6a2eb1e5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.204Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "17746fc6089ed959715c59597a889785160b2c747d16d8de309c88d405be6383" } } }, "03d4f9de31c6bf8adc70ca8cc91ea13e8e9e9c9401061a886ff406f2ee77507e": { "31a8fa488c7303d5b196d590f58b9ffddcbbaf82dd7d661a3d06b19f60b7ddc5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.180Z" + "updatedAt": "2025-12-04T20:16:57.843Z", + "postProcessHash": "9d4a8dad949638de7706ea258e84172c8d91bc3f7e099c4fd41fafd0e2fbab50" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.797Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "61cd00e72e82b630013f61eb7ab60d5583b76e1cb2846efd1e6d39c79f87cf3c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.800Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "da3d7ccbd4be86514f0b8d76db690c64fe25f00e1bf3959f2679b42d2b8f71f5" } } }, "185920906ded891a9d2e00cce1434c3336837203f6a4afa9c0afd1752f259e14": { "fb5ace8ecf41cd7a84a0650f9d96ead8a0c11e0b73eb701d4b8a50861ed41f3c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.790Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "17aba8c9eda6d586a1be9d92764b56cd8a2f522b5d580a4aaf2b18cb67980fa1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.208Z" + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "98323875a729d6ef81e69611d73b2ca33b1cd569c050af967a8f88b3d5514a47" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.210Z" + "updatedAt": "2025-12-04T20:16:57.849Z", + "postProcessHash": "e996a5702da585fc6cc429ddd01451db98e75baa509ca86608f2efa75476d799" } } }, "3b5b38cf7b3fbbf741ef360cdeaf09b58c18acb3ff66337f95d902be5f6db59c": { "b37e005c51f403fc9b37bb6c5b5edef44101e2fc840f20186238b36701cc8e6f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.199Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "ca1e341b07be4ef743b7314f5656f68624f5fab78dbeecb1551b9283840daf8c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.203Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "786369bd7cd308758497dccb0edbe468640e646f9fd0b91e24591fb5635d68ff" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.190Z" + "updatedAt": "2025-12-04T20:16:57.849Z", + "postProcessHash": "c5b05d5fd2bb665f846cc6464dbf0c97a5a2c3699366999859be3f8ae45394a2" } } }, "3bc42dea80614a09ae6a300caa882b3109109bbf2c1ff3e4a3cad15872847cb5": { "90eb1bd6cd2087520e2d3b6a42056c3549761f9a48d001c400844b96b08b2d5e": { "jp": { - "updatedAt": "2025-12-02T22:57:13.200Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "0fbfafe6753379c795c3bece846e4097879575b3ef463243b783bb47af88703e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.200Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "ce445b8e39d3fbed2d151131d8b116da9a9bb6476f2047517c3450a49a13dab7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.198Z" + "updatedAt": "2025-12-04T20:16:57.859Z", + "postProcessHash": "3135375419003a3f02fb6a7f9cbab1803d20daa094bca4ba908e095553722fb6" } } }, "4864254e07b5f2ba04547ffdc42c9fa734db92774140cb47efb6c312ff52493e": { "6dadcbfab042a7bcad0c4076a815d1b10666957ab124f50642fb026d185c6859": { "jp": { - "updatedAt": "2025-12-02T22:57:13.208Z" + "updatedAt": "2025-12-04T20:16:57.846Z", + "postProcessHash": "a0c189af7b622e1a17524651a9364f81de68d2702c7ff16d9057a387b69af9c9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.189Z" + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "817c782da1fe7d732496466147d5ade788f24630ef63881d9ca25474da785692" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.202Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "79c83c3aa154c79bf8726bf8465feb8e26c43c289f3e683a60cbeadbb5e73947" } } }, "4b4055e2a3996b0cc1db8bb8b7f1a428a61fcab906f4eb7fc9e8525523570823": { "fe2aceb75f41309c99fba4ee2a1fcbdba1e53d1591a97e9fee22b69867854012": { "jp": { - "updatedAt": "2025-12-02T22:57:28.796Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "af98ff505e460c0110e1a1662ff63c6baab28f333acdd0088144e7cbe184a696" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.802Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "948421c3e1abf23e8120eb54d5708aa41f709dfd2d448cbf3254cca3fef07358" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.802Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "84ad3d5c36bd5075d34e4b48e4df942398316c37917718843549b39f9718080a" } } }, "4c57ae2a858123d1bbd05031233c5f830692e6ff38484e60425dc1e644619e86": { "ac07bacf3135df09429ba59c3085014c51cd2dd6322c81c9cf515a50ac42020d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.793Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "6111b3143335a4494b11b1e9e094bad047f45fd314afc01605b45a74f7c2cd96" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.792Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "7639e4543fb4c76832aef9b34537392b3cb0c95171e9f08a14d2b741632619b3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.792Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "b72a001a1436505d89e571b38956253d9cf50706d70a7e9b21e87518c99f3207" } } }, "5f4dd4a5e3b9c2038ce5d97add1c57db4cab04802675890f9a71c7e24d65298e": { "54f6ee288acad5771ea6bb244846d3f7f6f97153a3e95cef843610f79d82f51f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.192Z" + "updatedAt": "2025-12-04T20:16:57.850Z", + "postProcessHash": "e056bc809c839d0ad66263647f12f71912a49f7c57b9d4e1169fff2283c4f5d3" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.185Z" + "updatedAt": "2025-12-04T20:16:57.847Z", + "postProcessHash": "7cc3a1bcfad0e799329cfa70d25ca3db5417d3104ea3f2808a92edcd3fe04c77" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.195Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "621e006f1700c63e06745812dbd783d4cf18e1e5383ddbae3fe350237b519d92" } } }, "8c9ac06d9f96470f385b45eb7382ea57d23824bef86ddd9dcd04eb31af945385": { "8fd53472854410898a96195caacb583e709b2c67f304949a81fcdc9a6ab77a22": { "ru": { - "updatedAt": "2025-12-02T22:57:13.199Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "b5d6797ffdc4684cbb0c61ce1959479b14134a77037579d9a1c105a5c2f1ec7c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.198Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "21f99dd641294eb9b2ccb285f893daf868667d6c03eda18025b46d13a446cffe" }, "jp": { - "updatedAt": "2025-12-02T22:57:13.203Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "7c834d32b1e8d0fd34bcfeaa5721e8aaa93c80c7a8f3e5e3775c7b6abc6ad07e" } } }, "96f086ac06293e9e587823d8e326b7bdd10741ec2cca41ecf709e6dfda01a137": { "8cde4367a08c4c85a443e691e36a03de277bcadbc7b5b8042f83da242fb60262": { "jp": { - "updatedAt": "2025-12-02T22:57:13.197Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "b53cae564ec2e6a926df37c3b324647c061ab8c942313758e53941fb911b7649" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.190Z" + "updatedAt": "2025-12-04T20:16:57.849Z", + "postProcessHash": "5f0614e8ddbe6cacd50bea84167b26eb00120970046ae5cdb7002ef4be0f300d" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.195Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "508adbd82073da69ec35818ff2669bd57e163613d9287b2fb34a01a12cd257f7" } } }, "98763ad1765b4f7ce59ab7c28c03d9f16eb7ba20340f1fd72f141425b73dfcda": { "2b4ac034aba018ed0128e4b4b5e46817e96795dc002eb687680ef694d17118a7": { "jp": { - "updatedAt": "2025-12-02T22:57:13.193Z" + "updatedAt": "2025-12-04T20:16:57.850Z", + "postProcessHash": "b244aff5353bea41ae51c0ae589f9b007cef05696a1deb51e2aef51de0a16d74" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.203Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "3c364369e548400b5b8f68d69de904605b5d9634aa74981e0e6160f037162899" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.201Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "240662d35f9cbd70a717e10ea9f8661ec34e9e300fb7c684e75dccccf968888d" } } }, "a1f67d04d8c6c016319715cd37f1aaa7fea045040cd960873db250061b59677d": { "c042f748c77a461dd754ffe542382a34bd504df511e412aaa671006d2a6ce920": { "jp": { - "updatedAt": "2025-12-02T22:57:13.196Z" + "updatedAt": "2025-12-04T20:16:57.859Z", + "postProcessHash": "397cb04223eb44d1482af620be7685558167e7bc1f37f6ff7b8bcd72ff38b953" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.201Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "77b90fbfdc78bbde470a584ef730f0f3793375ebf5469e5a2cbfbfc8cbfc4c48" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.164Z" + "updatedAt": "2025-12-04T20:16:57.859Z", + "postProcessHash": "fc399a9f5b01cd8dd7d0b77db118005fa08ede69a3b4600f41869bafec281bcb" } } }, "b6e6ba59aea8d42356d10f15f3e251c9ecdf84b70f6b284cc535f8f2715be871": { "78c8f7d218a9c211659cb2bb3308ce5d14d1718fcdc5e47d42d5c5f55050e6f9": { "jp": { - "updatedAt": "2025-12-02T22:57:13.172Z" + "updatedAt": "2025-12-04T20:16:57.855Z", + "postProcessHash": "1af561a397e1f2b0ae403f5afadf68e6368b3740a6722a520c35b8eb47f87b11" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.165Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "c0e5eff4dfa6784e506a8553ddb2cbccb342b7309e3be761ab1a1f34694145be" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.165Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "666358cce77ec4294d864f1aad959cf8814c750f4d9c13a2a22a87db1fa17a55" } } }, "b96f31274279db19ee455ef4a211f35232718d535097413acc9e87b2c16cdee5": { "d1a30df1933d77a7366535efca514780aa4f237e66085e619643f85b025ea495": { "jp": { - "updatedAt": "2025-12-02T22:57:13.188Z" + "updatedAt": "2025-12-04T20:16:57.848Z", + "postProcessHash": "792a6e342647ea48c99b305adc66920f6b7eb58715d1de859c9b68c26329c3ed" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.196Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "bd2fd3bf12938f24739bc7b5097f2705ca70c79876fe8a06c7895492d91a4aef" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.154Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "0d4436ed8265bca29df34212dccf91afdd828ad27c3e42d94cc0b07008205d06" } } }, "be5bae686c5d8c7787f030404d9391d2b796570ebe3949ebccadac637ae696ad": { "aa76d4c663800697eb6cffaf9162ddacf8d4a6e9e85ae8732672b1aa668497b2": { "jp": { - "updatedAt": "2025-12-02T22:57:13.166Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "e074968d784f976e2b6d377111766a1470a671f18d70d0d5bbc9e7ca0ca60ecc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.162Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "9d532dec0a6331be42abfb9b9057b604cdfce9536b7ae647f94fc2641e2c8c5c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.164Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "9009838fea16ced2e5cf8322f2a19e91e4ec944e001b4c923a189cc484d707a8" } } }, "c61d725ce51260e373784d5a559f17b1c985d873f35f4f40d34e5dc3c9d30214": { "164319294d8a4a2d8ae935edd6e5941fde821158fce1cb0fdc3c94aa7eba994f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.163Z" + "updatedAt": "2025-12-04T20:16:57.834Z", + "postProcessHash": "3d4bb348e802446513f7de4ff23905a2cd4d6dacfb23fa3a0baabde89ce6b659" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.149Z" + "updatedAt": "2025-12-04T20:16:57.816Z", + "postProcessHash": "764d34c033809dcc02ece51d8b2bcea4e2cc9c65f9e1f83401ff83d9e4c906f5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.163Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "e94c6be79ea28963118ea49992489524259fb3d81bb25bd7130f06bd2431db5a" } } }, "c9a3c995b2d1f1da16df65c84fc5fcd7d61a80112b46a37925da4d4c5cdfec2c": { "fe45037d34e9b052151f9190e1da1d3bf5cd89744c552cf345b160f37129f8f8": { "jp": { - "updatedAt": "2025-12-02T22:57:13.205Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "bcd5b75d93a82b4d290daa33ab72d0b54fd4a5ee8118bfde1189ef29e81f93e1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.202Z" + "updatedAt": "2025-12-04T20:16:57.854Z", + "postProcessHash": "f31b7b0d8d0d8031c519968c9e50bbc66e9408ab6a3870365211a11fddade1aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.204Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "99004e3710aaf1af12829154b3057ca1d90d1e0c1c31bb520cefad9e6af19791" } } }, "e87d7bb771e6e969df1f4f17a2cea74b1703104f920ba5110ee4c2bc95819b7f": { "c626b9222d67c0a16c11e25def509ff96d4a34afadbccdcc1676284d3fb3c55c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.150Z" + "updatedAt": "2025-12-04T20:16:57.830Z", + "postProcessHash": "284e640bfe912bb7e4403cc8506a4b2ca589529414c76f895a3cc52a2079c800" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.186Z" + "updatedAt": "2025-12-04T20:16:57.859Z", + "postProcessHash": "0286bf5647ba331f08c9d8324722fa10cc46007b701ed99f07787ddc5c36e7c2" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.191Z" + "updatedAt": "2025-12-04T20:16:57.849Z", + "postProcessHash": "c70231b20a97585083aba0222fc952f559c71302b5deb4b8e4a5c53b8738b8c0" } } }, "f366eb4cbbf4ae87e0ea8145cfd5006bd57589104335fc046ede417d016c390d": { "e26bd50b67b6a44512d1f83c42aa88dd3b0ee7eea44771e913a93704b405e585": { "jp": { - "updatedAt": "2025-12-02T22:57:13.161Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "0c68cf491e9333fc7b3760dd51d09ebaf3b00ab1edd349dfc2534f5b85bcf8d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.163Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "d42d0c574516c4d0645d46342fa743d64b7dc074d711c2f60952f80ecfe94cf5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.162Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "bfe9489d7b160957108ab21680a4082d7bd00cc47e6ceafc564fc3f74e6e0caf" } } }, "0dec45ecddb0d4b9ff3311f5a670eaeb053be15ec02969e2e3cc776a6771ff5c": { "77a1b67ca7c88505859a9611495e54062c95a3d5051d05c9862ba6120252576d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.815Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "3878d919508835373102707335a5a124a853aaf72c41230d23030942be2e76dd" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.822Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "6bebf36bb854770d6ac7f8962417ceeaa9e694e1305011efecaa254ebe0786f6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.811Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "dcd125291b52cfaee5018b16b2b8d5b4f07d202dc8fce9e24a8489830df7479a" } } }, "1345e1194d63be447e8235ac3810d70f7853efd69e98e071d82ffea7cffd7a32": { "40371c6acad0719623ab143c6991d629d5eeef18fd54755245385719989fae91": { "jp": { - "updatedAt": "2025-12-02T22:57:28.817Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "264b8a354e88aa7386a3fde62cd9a810f409df6fdc75ecbf805451ddb23c6832" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.811Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "bb8bae1d541af25fd66b49904afde7aaed07d5fe62f0ef706fd33e0b050626ab" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.813Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "9154967fe3c5037555f883c28d43ebd7f9a963693812ed2121b373135ef87326" } } }, "1784873802e351d4cbfd164226e7d919a480bb1d6312139fa09de23c15d16a8b": { "8742e923d01dd09dc7d8778dca915632a84b942a268948d3212bfca23e4e87e2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.793Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "ba7790130e9847c679550260303e7bd90d29779990d6a01db4d3d0636832461a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.797Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "fb23458ae75693c8bf1bb6c7ec5998493219348445c758f848487f3fe869ef03" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.795Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "90ac8e7e636384c34cfbd1039a6fb5c2eb9f6e527100f8822a03300892423488" } } }, "1976a270e928ec95aa014d1eb571385ad93c7acfac83fd172543fcf63d413493": { "28f4800b7936b39a171e2fb6c8317b4c9829a963ca30e0d8f2cb33e3e1dba27f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.819Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "9c5e6083c62927e10a73aff23d771c1a4f64db63a1f23f87414d9e53e0e2f75e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.812Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "00e124094f669e8c8bf09cc520286bd77728faeb3ff3f6b1f71817a05875b6a0" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.206Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "31e9210d4396d546263f943540e14b096f498e017068b43794faef186b4785e6" } } }, "19d053d8db1755b3bac1323b8dc5bdf881a37b3de8c55e8397cfd48c70b492c7": { "a35e75c19a0f228c55c8e74114787fa88e13457d020f241643da1e080c35d9ae": { "jp": { - "updatedAt": "2025-12-02T22:57:28.824Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "41f1f122ed49aea4f379b562e4ff9153ba9b9acc5b4df2ca44c56b9f6848902f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.811Z" + "updatedAt": "2025-12-04T20:16:57.864Z", + "postProcessHash": "5c80258df833983c7b274708fa062ca0e3efa0bf48adeedc2fe08df377e78211" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.205Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "906cc962434989601007d3957f5aa5adc48130c09e72f84d656bf5a97e2f20cb" } } }, "1de644041acf945417d447dae1559f7cba704ddb7f42f4989d75f53b3432bcc7": { "0d354a4bc3cf5327de48753ad84ff21b24119bc6b87f048f6f36a86e9a56461f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.796Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "6e2dff9ad0146dd7635c9d17238f45532cfd4e9c77baf7206fca280661d82586" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.795Z" + "updatedAt": "2025-12-04T20:16:57.866Z", + "postProcessHash": "c38c3bd35f52876fb2b80c04c383769141a2e1168112ec5ffdcadfea6674927f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.794Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "8bf8e0a2c822167174c041a57248197b68656796618771b050fda1c53641866a" } } }, "21df29d894f5394f8a93a1ff43ddfcea466286f726a703a29d7f5ad5f777ca4f": { "f9004a0faa2530c5a49f802aa2e8e063889d07b4b5779757539ed40941914621": { "jp": { - "updatedAt": "2025-12-02T22:57:28.808Z" + "updatedAt": "2025-12-04T20:16:57.846Z", + "postProcessHash": "86cbfcbbccccda6b1c586f4633473a2aed07648d4219b254f5c2b0b5a740349f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.808Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "f84baa85de5904086a72d87af510cc90de8821bb04b4da2ec7c5cbf3d33048aa" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.809Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "972ef2602e1592fdcf609d8e878c4b072c9b81bc31eb741ca82d784ef5cd54c9" } } }, "22ff9a2316c586c12132ac52204a80c3282c99ea70504b739a00fc4e769b9090": { "9b6474c5f66a5e775df7e704ab5583bc77d7b503d80449e41bcb0fdca582d72f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.805Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "6ce4e20adc483b9eb412a6ca5b244acbb226d6b82e1408cc9c710afd82e15199" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.183Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "ebc87644cdabdb09ee9892ab16418d3b56d467710c44e9aa29aef0a702d37cbf" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.806Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "1860a7ea75cf99c380b45e9a1d8a370a4973f732e14beddeba1a719bd6988d7b" } } }, "642f1cdcfe6481dcca55bd2f485397c27b2cb519506bae85d0903d1022a9a534": { "d58e38a4b38f0454d5c08c7d2887270f277c732f8c21e5a62fa24568ae4fc2a9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.800Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "1a9f83ddc292362420c6a52de2657bc7271f5c21ba3ab0abd2ad1bc2a8b1107d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.182Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "22e211ced8488ecf55bca008b238d7ac47d7adabf9e5bf2e3aadc8871c61418f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.801Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "f64d3c780a504461888778408b202e10c4a9fb705b84295aa98d66dd14c51490" } } }, "76e148edd42e2339581c7f24e0a25ab51ee37d3723b355157641afd3cf2a92ac": { "96f0f82692a94d11ec4bd22df9bf9c367d91f54e7f111247f17715678d4f8a7c": { "jp": { - "updatedAt": "2025-12-02T22:57:13.197Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "df55ed088f3c9193ef4a9840731f673deb78f7a7a219e084bbf959d429b57d69" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.189Z" + "updatedAt": "2025-12-04T20:16:57.848Z", + "postProcessHash": "87ca586acfde913d4ce97da52a9b9813f1d1c4a076f6bd82679ff6390930efd4" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.191Z" + "updatedAt": "2025-12-04T20:16:57.849Z", + "postProcessHash": "50478e4300bedd8a55a138b668e7a6d5575c7ba27f82675dd67a25ea88581634" } } }, "877ff646acb9d8b60cc0a8c397ec6865271899314d2f8d8c3bc6835ea0a51d87": { "cf8035df5e02498f9892ec6d01d716e4e210be81d6a338a2a670b395f2d05b5f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.194Z" + "updatedAt": "2025-12-04T20:16:57.851Z", + "postProcessHash": "fcb554e37e7af4c053e1e672331da8564ae20285875f2210850e6a0609794365" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.188Z" + "updatedAt": "2025-12-04T20:16:57.848Z", + "postProcessHash": "65034cf5fce14a18bac91cc847960dfc3da465cc9a759c15b74b2cc209d8c5ea" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.187Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "9319090b82e0165e7795cae925d2fc35275c892060b98661ed21ddb13a1ced39" } } }, "ba2b228d4949b83493253e6cce36fa61e4aab29868007f5c4dea719bd97fe4e3": { "bb371d742e1c3d8bcdd77214bf030643a0331f8f48e7727cbd847a8a32b85ac5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.198Z" + "updatedAt": "2025-12-04T20:16:57.853Z", + "postProcessHash": "514a925382e7a5201b4d202fdd21b6f8527498b7239b26432da0b044323f0439" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.185Z" + "updatedAt": "2025-12-04T20:16:57.861Z", + "postProcessHash": "b5029fa6b38c05daee2427f694bcc4de7d3c45e00a7991839c51801d6c8662f9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.154Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "8dc81aec40cf12dfdac8ec24b879365aeb6b7a1dab892347e3c8e63f740d96a3" } } }, "c88c05312ecb48fece611ecb971d8437aee67aab577a01d65950c88e236c100a": { "d28f12f9ff28bee751ec769892ca255d368223c72a14abe462c9cf6ad965a8cc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.798Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "918ad5171e00dde35e8ceb4a21227d70175a98a165f3fbb2744b9e45a2677eda" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.803Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "a8482fa3c4cf8bf55633b046eb0ed82451e1489e4d7153f1248153f3bb973d84" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.802Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "56c91b4ff2d87498b4173aa8a265302f5546e84e4c80c1f657939dfd25c2b812" } } }, "d517690990eb5a5034e28f3526bde41c42990306742079c31f30f4ed4524ed91": { "9c79376ce670521bff71e976361e6729afb8128c48c2bd62e07e55c58efa6cbc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.184Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "957dfd0d6384b88503c8e585ae98dc5e0c273cd6aedf00de4bea5e3f405c5727" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.196Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "54755606d721f0dcfc6f2a2cadca5cde84cb6c80f3e4b913cbddcf5d4ebba60c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.183Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "2c35252f68f1a923640589b00e0f8ea0275b103cad7af80698565b052a5e87fe" } } }, "e226489ddbcee1a5f588fea5844e21dcac309588b3ec1f6bbc9f7bfd26b0953b": { "5792c89f06fcaed31fc80316244e3ff2495629cc4d68214bf2ad0fc8b2cafcae": { "jp": { - "updatedAt": "2025-12-02T22:57:13.184Z" + "updatedAt": "2025-12-04T20:16:57.833Z", + "postProcessHash": "57bdd7362459e516623260a58040d69a7a2e1654f40a25903d815f563ad7caf1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.185Z" + "updatedAt": "2025-12-04T20:16:57.860Z", + "postProcessHash": "e65fab7028c7749170d441414b1180e83cb45c68f48da136059e9d47598cfc57" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.187Z" + "updatedAt": "2025-12-04T20:16:57.847Z", + "postProcessHash": "6d02fd7f4ac3123851eaa4d294bd5679844a586bbd0b7b55b21e07da1802294b" } } }, "e3904a052cbf5a3387388c389ae010ddc49649dbbbff19900f769f6e6cbfa1ee": { "e3e518cc255f67640d601fecd3cfb11ea7e915ddf282acc6eabba8311aae5b22": { "jp": { - "updatedAt": "2025-12-02T22:57:28.790Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "7915ad17bc085b7ec17f0144206c4a2207cab99de9a6fd3243b28ae3cf206c84" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.211Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "f0280b151f5d1abcbf656b7d1e622f5012fa583f4e0757f7f4b5a8eead7e1dce" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.790Z" + "updatedAt": "2025-12-04T20:16:57.852Z", + "postProcessHash": "1511a9b0d60c7c3f5b5a39c0c60791b6d91f7741c4afc3cf4b77ffe6fb334b41" } } }, "e6ad4f2ee58b9c424f0cc4e12e443aa3bb9dfb641432accc87e403a8b0597b0b": { "d64cf4716347332440eb8c9bd7192e0eae84a3f3eb49ad6ba4155f87567e3861": { "jp": { - "updatedAt": "2025-12-02T22:57:28.791Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "7d9deb042363000045617733cf181e0e372434880588266736556441925c13e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.791Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "2d1bee15b8a931170f51bf73787dbb9b01b2a3a6f1d3e50a368339c2ac5c22d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.792Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "b554123c51646d20702b19c35e17d2e7396e326346254e10bfaac707da27525c" } } }, "e8d810b58d2fc954739ecb8eae76ec7772a7459c01a08dd48ba208a5ab4b2b58": { "0d3df994d73dcce5dc7c4ae8f510488dca241f13863b2cb49c97f6056079afb1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.209Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "1da7a534734fdb80651bdc99bf056a4ad1c82f53a1d3e572d24408624de1db4b" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.209Z" + "updatedAt": "2025-12-04T20:16:57.847Z", + "postProcessHash": "f86abc0e6cf798945f18aabb41c62bd3e74bad4c5f748c59300a7d5ddcbfa292" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.211Z" + "updatedAt": "2025-12-04T20:16:57.850Z", + "postProcessHash": "5c155421ad507b5748d625f5815793cb034f91f2b5fee29985cea0a41dd8570f" } } }, "ee906a548fde378c55bde17a104978853c964efcc0ac2037f2cc5f90ff301836": { "f49e9e3f91b64b3519c5cc4cdc59ffcf9a84b52eba96cc9a68e95e42dec254a2": { "jp": { - "updatedAt": "2025-12-02T22:57:13.209Z" + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "b817e573aec41425bc346fc4ccc846f18eb70867ba5a2bb1929b46f4044d50a1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.210Z" + "updatedAt": "2025-12-04T20:16:57.848Z", + "postProcessHash": "16a7838061abd7362de6abd0de9885d4cd599be510e2172c833e622cf0eea177" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.211Z" + "updatedAt": "2025-12-04T20:16:57.850Z", + "postProcessHash": "886f63c6915cdfa20765f0c93d93966227d2efa4ddecdd0573c991d5ff95e564" } } }, "f17585a5d8e2bdd6a2ebea5f856955881ef4c473fd73048cf4f26e56bdcb5db2": { "9e7753f5e285750271319abb9baa46c784486772a2b4da88514c28c5141c5c81": { "jp": { - "updatedAt": "2025-12-02T22:57:28.806Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "1bceacb4672367f8d44a241a3754cd82454b970db609d86be5696123ad5459b2" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.181Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "8b0a78860492900df2f0f53ac1ae4ac7f54f8dcb3313f9035fae8b87dc556500" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.806Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "ec5d358a26e1b211a660ac13d764521c98294f781f89b058e23a2071d95b1261" } } }, "fdfddb9175ea6844a8f625eb6ff292798d8dda51dbc62ca44009000f3177a4c8": { "a1fbebb2555661587982370786b093295909d4be9fcca7e32ae5eff02acae18d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.188Z" + "updatedAt": "2025-12-04T20:16:57.848Z", + "postProcessHash": "bcf0b7534c579de5d06bfd7131c8063bc180e7f1503a2717026df704a17358cc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.186Z" + "updatedAt": "2025-12-04T20:16:57.847Z", + "postProcessHash": "f3b5ff21c5e81dde8155a3e73108c3b04846ae1d871cdc641ea2d7046862d5f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.153Z" + "updatedAt": "2025-12-04T20:16:57.831Z", + "postProcessHash": "e84fff1d045cf9c71092d91955562c704761eb8e4de0be20cce15599e8ac06f6" } } }, "04fc2fc59d087b4841db1401316e4d1c9ac88f144242faabf25ec2e969a5215b": { "414e7c4dfb6cd3da8443de0d53c94c82fe3258fa5fdaf93915afe2a8ec3736d4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.829Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "34771a3855369b70075c89682eb246030899b2597f5d7477c49d50d82973990b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.829Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "d936c6c3fe40c206d39139b147128a3e0862ff950513cdd59d30aacbd76863cd" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.812Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "8f3c28fe7c62594402a7f8d27d1582934cadc2eba4f8bc87b7b3d0e75219027d" } } }, "2fe2ff96c504c59daad55285eb365e9e69fcc5eddd301d8a0409670d1de5a9ac": { "79af085e05f9fd1374cba79aa1eea65a5fa7bcadf0fcbabfc3df348faf04e6e8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.817Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "6793ff84b8af7d5d8b7da81647f5cc3b4051f37bba8f934deaa1fa048b942794" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.828Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "95d415dc64a9f8e8e348ad1e8aaf96140f9954d2b314efdc691084a644878156" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.820Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "486a98f503013431b94e683a374be1173e56c396393b30a357cc8c3f36be8f1c" } } }, "32c8d946bfccbad7f54bc00de27ceee1cc1719758ec7a678b9763d7236502014": { "6c958d1bfa513f4a8e0811e9c383ecdf775c2aa645e088ea7d02462f9209a69c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.817Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "09192e3e213aec09b8739b8ca76927cb47ae1931bef0394c1fc8a2d566aa41d4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.813Z" + "updatedAt": "2025-12-04T20:16:57.866Z", + "postProcessHash": "3e40e4930f6f47417242edc9ca907236ef8ae3dae126b9bae198c55ec8c43243" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.812Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "dce00b6aa4729edc895c305821b237174224c992efff4e656658184ed365736d" } } }, "341eea9182cfeebd2c27c019d06a39d1fcf951c990bcd80fa61f11ffc6f9e196": { "aba92e4ddf93c8ac27c276aa33d276f9987cda30270a7b50881edac3ee8d0b71": { "jp": { - "updatedAt": "2025-12-02T22:57:28.821Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "33b0e8257b8c2f4f4ff28bd76c8f5cb50b29926015cdbe9c6a3d3140680c5fac" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.807Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "d7e3b1da7be143e33e2973304e3d9b7b7f1bbb8a91df883b75b84e7571084d21" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.207Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "c889b844f42e4450548262b4874575c28e375220a7c7f4e7caf10500ecaa2d54" } } }, "3fe31c561edbb5416b22ecceae952bb5b07567cc07d75cd64ad4a2caca7689f8": { "af620cd5ed38d2654712e19961c6712bdc7c780d345e73f17ae49396a20d6df0": { "jp": { - "updatedAt": "2025-12-02T22:57:28.799Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "552ca3c3025e1b660cde0afb3552c932d40d1e28e17650a0d92d6efd360000b9" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.799Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "bfa1c0d4a034ed09f1d51e3d64db06a614897cb7cb1c7fca57aac4714d6a9c5c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.798Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "368a6907bf13f87c188b962943377089df692c223e1498a2fe4e969486775527" } } }, "4afdda2989ef4d77a80eb6666ee0e5fd90ac3afbba1e33f8e39a07be3bbd203f": { "6d99a0d2cef83d17f6510958c4402246edefbb9b9d564c2e37e017791950e3bd": { "jp": { - "updatedAt": "2025-12-02T22:57:28.815Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "56ca848420db0579611181a512335e8e9c20e6a4a62bcedc9a400b448580fd49" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.822Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "f3dede27606f3796bcacba25eadb245278de1f535f359b5fcb8b11fa4a22708b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.819Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "e452b308611a16ff6a5d49bb1169de90c144ab983a5530a85eb50a30af675d7e" } } }, "4ecdaa59771417d8a6341e9feb60dbd9c4d4fbb10361d6cf230a66334329d458": { "32e97893f5bdae1c411c78d8f927f38c3f5f53f548071542f0aaa587e832cecb": { "jp": { - "updatedAt": "2025-12-02T22:57:28.853Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "f383c684f798a441c0bf9e216b0805a3e83e4d9835be6d4bd97ae6b18f933dd6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.853Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "26ad5ef4ab9de56dbfbfaea7bb9cf7fb13c2d0c1672b1be89d33e2f61a8bbebf" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.830Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "c8f094ef0c4b7a6e85c9d0bba8459a5b45c984ba601f219e64102a1c7f300133" } } }, "5ed43729b9d1c584d6d2715ce2c8e0e8690a779f998a5295f954f2f562471776": { "1691e237ea64aacab998e397d87c92e5419d9695a9c24f1829f61653d169f1f3": { "jp": { - "updatedAt": "2025-12-02T22:57:13.182Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "651056230433ee9dd9d54555bae0723ec2c8c29263db4a33a7898c90e47d0f30" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.796Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "2d3a7df8d35aee33fe591e5045369d87bd245532382ccb83f38215e7dab9aaf1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.803Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "69b530b44617a0cde638a41373c328a4841acefd0806983bd5ed0cf51ce5ac1d" } } }, "6b19fbc50a3d75e95082802f1b3acf6a3fdda3ff18cd375f0468fb5136f2256d": { "3dcab33a3b2dc5934e1b739f426935f58ec2cc8e37d9a43754b1941d524c7eb7": { "ru": { - "updatedAt": "2025-12-02T22:57:28.850Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "97ce10d0e7cb39bf4fa679af139b44fc13b35d8f92a119a4113ad7b66cfb7575" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.843Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "02e63d393d46e21e219a6f3e7cca7d892ada6c7f78913ad6e824c92db8bb8b64" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.849Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "dbb913cdac833b96615d68cd0e6018a4a9679c368f74884035eb6e9f9a9d2650" } } }, "7043bd98baa35080107f5165fe9bbec5ef39eb9956052fa0c10ef9ac22039a33": { "e6b73b30c4502fd5f9cd04636be35210ae5ea65dc8343c3daaa83eba16905924": { "jp": { - "updatedAt": "2025-12-02T22:57:28.815Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "8a1d61040594d0d9100de611970f617272c26a7c61bce51b6c9128637eff016d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.820Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "662951e59bf9b4f70f9d52f9df37f94dd279c3218b9daca00073a5d29b56b15d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.814Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "a76e4822fc70671bd0dd9a751ceab7c2ca15ac69ccef98eed56fa8aa9c336e8e" } } }, "73cc61c275b13e314a195a2bcdc4cbfb3fba91139f9fd1bffb19f48a659d4e6a": { "190e7c7b34bba92cb96c18d30898280711152aa225a02af84331070d834800de": { "jp": { - "updatedAt": "2025-12-02T22:57:28.850Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "69e0ba42d38ec3783b3d6329d5afaa50069a9c0128bb75417f160609d741e31e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.843Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "7381da4ce5840a9d072cdeaf58ee6732466cdf2c1cfd7bd8462576340ec67134" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.847Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "b2e3fc3f022ebf971aea816ff0a390da74602ab9b3d63167b6b48215c32548a6" } } }, "7478bdb164a78a0066fd05a6a86be0fa7a2ddd64b6f73b9baf2265c59d70f4c4": { "e97df2367ee337a5ad2b8ce514b44485caf7b24462a66eac4a3d178503301830": { "jp": { - "updatedAt": "2025-12-02T22:57:28.803Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "ee44710816026ff858f7397a9d8fc7762d7ca3bd0a4a0803924d3ef8396cd70f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.796Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "00734b6601a1867da908013690211d8699cd2a04634816b3f5ba80b77e22e13f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.801Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "45aeb32c58e18c79ba802a04a57740e8187a27ddfc5dd140d8ebdebb5aa26a4f" } } }, "789c0931dffcacd3c5e4bd954c1cc49f734d35378bd8d9f099bac0b7d7de0017": { "58519a4d43db394ea6d5c15ae1e4f7bfc823bcba6a23e04e1f1b0fc5aea36241": { "jp": { - "updatedAt": "2025-12-02T22:57:28.831Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "ad99f675c5196766388a11b9fcbda3b965900d6bcc756e80ecb78f1f76b4e20c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.831Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "8577a057783fab21101f49096d66716772e6749437b13500875d37f844207839" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.830Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "e1db3d5c02ac3150a0b25616dc1496bf61ebbcf5105974aec463fbc2372a7303" } } }, "85409384bc3d4ff1f1449d76a33ced011be9773bdbf0758e6975a6dbd1ee1dae": { "1fee80d8af00c415d442c78b9ad825b9a0656bc47f1eb00d9ac9cec8430f1454": { "jp": { - "updatedAt": "2025-12-02T22:57:28.797Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "6a315de42fc209c5b1287ecb783f74ae407f8af05b4f539fdc2872f2128c2a12" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.801Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "9672b42b60ad516f490fb3281e40065f600eb7bb8d21b4b7bff69c41c5cc9817" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.799Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "a970a84cd18592943bfffefb2b45be71f43302f80ded0a69e8e54ca9cf54c542" } } }, "940bcfdd1a4ad18a6a6ccd9181dfd460e21675b41985028b535c556f22904357": { "8379073d04e59c3c4b33a28508240fa2ad889504e267a63230a17f0b31b60377": { "jp": { - "updatedAt": "2025-12-02T22:57:28.825Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "0aade2e43a61130da226049874fe0947ecb5ed3483b82e1a09e817603ac898cb" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.829Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "41629d1c56c4d1882a13d92e7c7084fabab50811b91d70e2a4214c15a15654ec" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.830Z" + "updatedAt": "2025-12-04T20:16:57.887Z", + "postProcessHash": "75447afdcc9676b853d6209512eb08553a8d71aa23d87490013108c5acc9b96d" } } }, "ad44e0a653a277028da523b8bb2ede38b5fb8ae3edb129aec78609264961e45b": { "c58dfcddfe9b317538f8fc75e87174efab26fa62ab436b5d3a1921bdcdb71dcc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.206Z" + "updatedAt": "2025-12-04T20:16:57.844Z", + "postProcessHash": "c2c012e4d44253d8bd5233146c87fcfcf756c06a115e2d2cad56beb33b8f1911" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.816Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "7df127040f9fa4dba62cee9ea6840e07bc9e073f3e9f88678af580d8b9b4dbea" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.818Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "09d2121aad813a230f229040ca6cdc165bdc93f86ac2b7b027dc9481d9b61a27" } } }, "b0371f0c5ed81dd8c1a94c3d4fbb5068eda546a915ea97e900025b7967fdc506": { "1adc889763f86e0775ccdc2cb7db8ac95b53182b5f48d36f86a8daf7373c5e8a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.181Z" + "updatedAt": "2025-12-04T20:16:57.874Z", + "postProcessHash": "dd71d8b97392330654ce47574d5ac1509fa1c33f221014933f958cf070cef650" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.800Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "8fe7995ff7dd434884de0496045c5412a74a45b5810451b0b168dda92755f308" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.795Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "6507e7e363ca8c0a2f81d676688fcfa1de6b072261724dcac7b1ab5516db89ac" } } }, "c720ce0e77810fdc639cfe83c2df1fe9c3d97ef4dd59cba6540e1d9e354f6866": { "3f956529d37242046b0834f1c686e59dd0dda8c1b7de96710b47b1ab8e5544f6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.818Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "5b36f94ece8e230d4e65b9d6eead5961c5f51da20c20e25cf1c7f4c477bb22d9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.207Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "2ccd73fe365b2ea6bc10a9c900e4086812c9609599c8529a2bb77b3377e8d061" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.807Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "3a80224ca78b369f5fa27bcce394ee05cfdfca55a9d77d26c29d571818cc7fdc" } } }, "dfd805b622edd8955d58dd44846aeefbda562b1c575f0740533a458f2478f495": { "c61769f8b34a280fa8e6d8215850f12fe517dd969c26c4527ce9543b9b4052d6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.793Z" + "updatedAt": "2025-12-04T20:16:57.865Z", + "postProcessHash": "f54df4ae5d531f39347d88045bd1712f08a0894bfdeb8a2d5d94f557d70c958e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.798Z" + "updatedAt": "2025-12-04T20:16:57.869Z", + "postProcessHash": "ad1d5fd67676975863f1b32eede946980fd86a6149acdbccccb38f018f93fc3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.794Z" + "updatedAt": "2025-12-04T20:16:57.866Z", + "postProcessHash": "1531ee8b3e45c6100af1c4c49e3caba2bb4a5361e57cf076c257dbe9e16a2e84" } } }, @@ -19431,44 +23747,67 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.804Z" } + }, + "322fe8c1dc3f914114b9612a364b492415419450af0b86100c9c57287d3a878a": { + "zh": { + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "540df2a2ad43c66f47f954c3b8004106f04e475051adbb2ebe39596ae9dda6c5" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "5ca664da3330bf44d30cac2365d985df2d8440c8eb2b73238a2940200589b8fa" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "0adcee1d1cf9875be3fad442e19aa1fecc7a1e8431a1745d75116d0ba42b39bb" + } } }, "03b5ecbbf39334e6da0c384a22e5e44c1c2d4e7293956c81e85ebc5a1f9684da": { "a8ec8b1cfed8dd821d7646fedd89af692c1d5c39ff7e6c8263486a44277b6811": { "jp": { - "updatedAt": "2025-12-02T22:57:28.842Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "45d102edc911ab5c31659ddd649bee507b4ff2a6507667527f3f1b51d45562a1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.845Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "c3e43851b83f34525653db6796d9a7ab3985d99c0b7f358cd05c8172d11c1778" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.852Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "6e6cbaebd1c20bb8aa802a7ec73035c318aac8d150e2aa543ebd2971965b65fa" } } }, "0b126951a3c780c939a55fe4567f097c8702408c235f214c5763699ad6daaca4": { "5b529866221693a79922a1408a19f5b678c1f0fe4b7ca31e7401ad0a4ce64dfa": { "jp": { - "updatedAt": "2025-12-02T22:57:45.189Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "304bfa8a6272dc7d2cbabdf30b84b3f56dfb045703e740b64cb239d5aa88f1f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.207Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "3c08112b8d99509e4ee21c06401567fa49f2ee48beef9d898f577a23e64702d2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.205Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "5ab2f0eeb9aec29fc5eae9ff2ea46eb9ee21e24bac473de2d0cd90362987f40f" } } }, "0eafecab32cbe234424b7ea9a0db39d81bfbd85c2d891597fa1309dac8735c8a": { "94efd6e0e379a3b02b71963dbf0699cd5c5ab603e5cbabbb278630c8bc3eed6e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.851Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "17a73eff1be8e48d8815924f4706985ccf7a297d5bb4f070161b975eab4e5cc1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.852Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "b3f69a11426de6fdeb9f7b53656197cf7f569dc061da7bd3580abd0b42f3d383" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.852Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "5a860b2ec6d808b98a0576fdc6b994e943e80c9490cc73622182e0c463b5cb94" } } }, @@ -19486,403 +23825,496 @@ }, "21ab0993ec46252ab7b40a1b418b9c04325c81c889a8af72daa16bc54b1f51e6": { "ru": { - "updatedAt": "2025-12-02T22:57:28.809Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "a2f33db00bafee7f9768502aa3de84e28a0b35c8efb4768162bede07ee7df2a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.810Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "5becf268ae67b7567c4e998c7286e22148a6f0bd22a7b550a9833c65f29eb9e4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.810Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "7808a9badec99348eab7fe796579bc6c0ab9bfba2b242452121fc60e3123a7a1" } } }, "22a6c0463fdb5f5bd56c1f342f979b7e0fbc638e39a22abae139379b580611b6": { "c126bede64139b7c6ab143d42c036651e266197fad3b70012de0b058cfc8a7b4": { "jp": { - "updatedAt": "2025-12-02T22:57:45.203Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "df14d4a4c4b5c565f3da870bde6bbe6a452044d2160f00bd81418b350606080e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.190Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "2b2cd4e11a723d9cafd4858f905fea1fa5adf507f159e58eaeee5015e279a3c6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.202Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "0c03e72a9c6567cce381ede5e48034acf4ad74d99ea8a194d375ea3e101f8636" } } }, "2f0ce2fe6b5d1318ca2c2c11f3ca3100561f2c3b056eac0c92885f76ad381df8": { "22f366f08d6beb4fd69cd03348a69d6ad0fa2634f22a96d663380fcc3e61900c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.841Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "420411cc96e91149e8d1ebd3b74bc315a3d3adb6eef25ba5da22ce6fe0518831" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.844Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "83a526eb85793371548fc386cdfcd7a9f9073b8a6d2a33913a1a986ca3c4ed58" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.846Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "f566503786d1b86a98399c85e646df8744220a22a40b3a1889211ea714a048b5" } } }, "34bd47b9631a90da1872337df6289e0170c67b0cdd5f5773b7367e05d2dcfe48": { "7bea2cf57bd47e48dbaa0fb6eb99c5614d61a80b75f4b14d7d22036a5315b2a2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.206Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "3f1afd0003549be2632cf0c4bb558afe0b5f1e317209aba10051cf265a776b26" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.200Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "b2aa6607e0e7d2448323827d91cee88ea1573b708da75c5cfeaaf04501bff451" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.855Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "659b20ca9997f3c36f6b45d98eb9291e0c561052ef184fae03b51831c504257d" } } }, "3fae7c4513dfdc82bcd5d84b957baba453f6cf9cb2c3086a653e40e66ecab9e5": { "ebb4c00f401d9fc73b63d71739322aba770f129d6784c881ec5e9cd702ebc982": { "jp": { - "updatedAt": "2025-12-02T22:57:28.847Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "1723174181eb45621cb95bfb6dbd756c37955ec7376e9c50d5fd6e3f653e1b60" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.838Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "f85af05f4fbc6caf11294c8594a0f823782a4b7005013edd855956a4c459e853" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.849Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "0992069d4470c29f2d8e372e42d39a05d2303e843ab4bccd8ef7bd8922e198f6" } } }, "8681a5dfe4cb1dc88d34f905cd6f0b880732c556d84f4c6c1a78c2a42a1e2e94": { "937c3315f8641ae220b02e6527a850efc428a4de748f9fc10c3b23118f915818": { "jp": { - "updatedAt": "2025-12-02T22:57:28.842Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "e175012db0f964f6a79911a51c12f28c55a97a99e5a175a8886ceed5c9a12509" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.844Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "c23ca98ee59c8188cf74321b403a225ff54b5faefebf4bdf827fe8f6bed03895" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.843Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "16dbc38e16086163769bf83f790f7a8f32a4edde6fa108f4a3a2cc29e977c095" } } }, "9679382c066536c0e977c5bada387315bb3174921875fc2375dab0f8ecb14a9b": { "775c06f4143e15814d67624ccd103ecbff901762e4be69292f9800d11986493a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.846Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "9705a5d1c9b72412ef200f0e65d035e220927aefd739416e1e633adcf9b9aef6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.841Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "5a5bd5470b147c93e3d09e5a98f849c921236d98b8496646c0aa17720c156df4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.807Z" + "updatedAt": "2025-12-04T20:16:57.862Z", + "postProcessHash": "cff387bbec6170f88d03ec27ee8cf416188b63bdae56b60016edf0771587260d" } } }, "9f914435087a98e271276ebb87f255c29380082ebf766e89899a246c457e4677": { "71530532e2635eadb067e7bfc1e67c37d37113e6474b6d00295249b91f5e556d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.821Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "bb403c4c6451d41c4be772e85067cd6525548c98249a0fece15fbf729db00d57" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.821Z" + "updatedAt": "2025-12-04T20:16:57.871Z", + "postProcessHash": "988499514a8ff5ea02057a88e504cdb62a1af881e22a15c7f444f36daf71a6d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.822Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "304ac2fbb61cfd9ed79c8a7c5cc3c253f7ca45dd20380d989334a524371c533f" } } }, "b5043154caba774e4d5afd58609e8705791d168d7b610c441a9f5eb0c01aebe8": { "8640bb0e91d0ce2469cf06735ac41d782b10893d26d5a5e4bdd88f4ddcf19c10": { "jp": { - "updatedAt": "2025-12-02T22:57:28.824Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "7e56601a8778d066afd82cd888e6b98975e414a743f124ba21c50c072f97a562" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.826Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "e09fd6e43c3c5edbce6d33986d804a170927a2a3fb8253cb48ed078c3aa97328" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.825Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "0511f8bb68c7a0ab03eb0a3b5a1171bb46bfdee145ac316f2f4f965e02fd0104" } } }, "b6b46b2ddce58f83297d4fd3e22a20c0689c8846b02b00d6c901ad29353143df": { "6526c7597b3e43dfe18fbc51f8dfea10476408a65acfc8c77d19c20114264de2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.814Z" + "updatedAt": "2025-12-04T20:16:57.867Z", + "postProcessHash": "cf882a615cc788a3dea86467f0aa984af4882ce3e7fdf5453ccc6ae84d3093aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.823Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "f81ed251b67849bcd7cdab599d052c4e83be831fc64128f45f245a3efcf09acc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.816Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "e292900cda9e5da3050874df5d066b2b983937a604dcf7aef206df312682fe65" } } }, "b760d26fdf8b09ae16032e0dbdd66a6e812e5b85cfc1a2dce387a41c031415a5": { "2a83ac2cbaf9b2ed36fecb623007bef63f6aaaf537e37429095c3057b999a156": { "jp": { - "updatedAt": "2025-12-02T22:57:28.823Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "3d202caaf223b1e4ad7d6375133a2a2781554dceb3584d6aa755ae85dca925b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.813Z" + "updatedAt": "2025-12-04T20:16:57.866Z", + "postProcessHash": "6215b2fa26ee923af659ad622bd02828ee717d586ab5ea9a33b1678a5edb690e" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.207Z" + "updatedAt": "2025-12-04T20:16:57.845Z", + "postProcessHash": "0014a11ebcd9886f835705275d63273f24d550a694b5d9b66de2700e7de6805a" } } }, "c94404af6396786f2d99e4b9e86fe62f37fba23be9fb0992cb4462421350617d": { "8e9c8e608b5e9c9eb4f01785fa62ca818e1a1957a5723d6cb412ed71f639a50b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.825Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "3d888b3f0f556a299f1dbc5d507a9cdf8fb8cfef4d66101c1a129d6824fe7009" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.824Z" + "updatedAt": "2025-12-04T20:16:57.873Z", + "postProcessHash": "84f52b029d075625aa67c20c2609cddbb954acbfc721ce31ce874145f6ba301e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.827Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "64605067e9f90909ead88b1aac520ad5a306c25b8fcc746b81542c9bd2bd7fbb" } } }, "cb7281a29c8577f9362237b726ab73efa4133f66aa1f532e94603029a6608325": { "e7e9ff403010f7419e6fe70d3329c7fb4d95f62d59d52fda8025ee90af8ad89c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.851Z" + "updatedAt": "2025-12-04T20:16:57.885Z", + "postProcessHash": "a566495ba4b7f285354c15226950e9f2e3ec118c7273badf22e1eb95af4c0e12" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.841Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "793ddbd4c8b47639d47308898a4c6e9a6f720810b40c023068ec0c087ab6cc6d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.848Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "a21f16e62780db0aa0627209b7018658c201fc59af53306d64ecf0b9a4f15b0a" } } }, "cdf00c31e8da5ad17f2b40732cf7e7baf65150deaf7488eac143f7201d1dfb3e": { "3c8db57986756c0b913b89d2204dd19e77508a68267dc6a6d737df290161badc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.823Z" + "updatedAt": "2025-12-04T20:16:57.872Z", + "postProcessHash": "1282d52a42d8e711c7b9c99b0989e274172a685cd4d0aec1b6d387ad54f496f8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.816Z" + "updatedAt": "2025-12-04T20:16:57.868Z", + "postProcessHash": "e31ad8fdead4a1cbdbdb9c8e897087df60f0101252e4ec3061cb57eca7707f0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.819Z" + "updatedAt": "2025-12-04T20:16:57.870Z", + "postProcessHash": "3fe501c8bb5a3286cba447647dbab0d3b13f548282ea07770dcf7cc389389312" } } }, "d28f5c5276140aee0909af043384a73dc6d1e54e307092d06f03528d2b1110ec": { "c4f358e96fb5460080efb17e46f53d378939fef04b5fcad4e3e2c5a580a10128": { "jp": { - "updatedAt": "2025-12-02T22:57:28.848Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "ce545d05ba927ebb6758f9b1a7d015783d0f71cf4233f2a7ef5b1ae7216c280c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.845Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "f728f8e8771a198e826729d367d4a1786cee2fba2225eed9aa78e8ba7eceeef7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.842Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "ca004f5bdb73149d8a9cc89f1bc7f43288431f7caebb5f8635f24575a2003210" } } }, "04a08061427f75ae80f6c5be1bc33f6ed46cb17ac49c325b49ad3ed082b48721": { "8c2b821e3c5410720085eae977687f3169e4a39395d1aed6e45d331e39dc20b7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.212Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "502f7f2224468321a87d848f4ba378852646b7291ccb7f527e9567691c836206" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.214Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "342fe40e7449e7a4b57b3aa2e0527b36428598dc57f0f706b39a586460161b26" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.221Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "69443e9f7be327a9ce2c5754542f3354fbbb5bb76c05a89978c8e18b8201054e" } } }, "0f4329370fc5999889692e3e2374c65bf3f4dd5e8903e64957d654e1c712ee1e": { "87fcfd05b5f0e870d641b6800c171abf3d47bc7484fb7612f4151d70caaaee3c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.854Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "08c64340c404c5ed8814c4e3e0da7bcddb5d406049904d36ab209b6e14adf9b9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.227Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "396be4281242e992e7d1f4a3136fbc299f1edeed5e80a2539ccb92f9450bdfb4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.224Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "95ab71529516c47649974c2ec908f50a9b91e75f1dd7e233ab4dfcfe25253da3" } } }, "100c02e77cc38427381d9c58741ebe9d9d8964c870d4cbb14624da2f386e6691": { "2d845a508a5f777e5f61b8dae330312410e821c6f517150d000bebfbc18e03df": { "zh": { - "updatedAt": "2025-12-02T22:57:45.223Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "dd7759328f986fde9b37d86078cc307604a45ef2726aeb28d2bb6cf4c5bfc864" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.218Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "3c4ac7567071a71fe14bb3dcdc4734d46ee6a563c1022116fea4965808891a1e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.855Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "680b1204dc8114bf5acfceea262b076ac3fcf2825f6737b5a74d8889ab6c7e15" } } }, "1680a9db2b149a398008cc3048b13dba799f74c5bfd3549470992ac1fdd41eea": { "2b8b81210547bd248aa80daed1df50ad236049f83eec7fed484a31e64906811f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.199Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "8ccc1e3d48c0ad89cf0666bab5d2038055edc6c73256a30e29d3b12d9ca9257e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.832Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "0f6d838ba6dda5f689eb1df53b0c18019303301268852d42dec067a8819eb9e4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.190Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "c0325a80fcf7c600872265c2bdf1abd5eb3ef0ae544e033fde00ddbfbdad7267" } } }, "1b89e2e1ad09ff845cbc6d24f7a759d61540214cea8a5c79bc2e68f266ebcbba": { "9d8c96f15a9c91e38b4c55448e86a206752b8e56970d31964de0de00beac4133": { "jp": { - "updatedAt": "2025-12-02T22:57:45.226Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "d0c29dee04f700938770ae33095cc7901e30ed9fb7b1debf118cdc42f790f35e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.226Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "3ad902b68bbff11590172307b9ebad17e52adfe02064d79149810788cf327f72" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.227Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "865fe1da7ddfa6dd8af7766f527f1edc685e6074a3e9dc764c8596fa673b655d" } } }, "25332a58ba046cb340c206ff61639fed4457a1aad56ffaa7b53917205f1bb761": { "ca54f12c897481de5b60e4f4170eccc7217a2e000c56dcbfd023eac144ae760c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.856Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "35812bac96225c5b56234946079e73b78a4aafb688f2f5a4602546fcbdb98fac" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.198Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "ba5edbf9c64cfbb8dc37e06d64ba18fa6215d73ceb9174a4f772761eb60cfb05" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.203Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "db489a99c4afeebc2628b989f3c4c8a315c2cd60a9067cf24c6612f0577832a8" } } }, "45912b7cfa3611f18ba43d31b4bf270d57e4bcee3fdf2ac5e2ff6ded3b672601": { "25bd45fdbb02d82cf7af7820d3acc7ccf1701c6afe3cfae317a6b4ac9289a67d": { "jp": { - "updatedAt": "2025-12-02T22:57:13.213Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "d32afef8080dcb97c082eac36f36897d194366b3c4416b4fec2d283cf742dc88" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.213Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "04406d7576ffe2a73ff025631d89a9249ffe058f6c1069df3a0f9b8149abb22e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.225Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "31e64e3687ef0bb0aaff26b36d647dde95bb607f9a1daf3cc5975669214310de" } } }, "5a251aa88d6ebebbfbc12913926476ff0da32b3862d705b6ecb28ea2c559b45f": { "b32b63bf76eb2d854a947bc3926ad7d875cc3ed3eeec677de22a5a760014a32d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.199Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "989b46f24aede5b3af482b932b6d136bc76105006dbc54df97ada08bd92b3992" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.201Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "60b9563bac641b8868e1fa544bb5ca8509f65b86b6590d2994d793cf1ba45332" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.203Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "955960fa780eea9b71e4a1a671db7420666ef03fede1037e6301a3f5341396a0" } } }, "696be7be6ffadd8471bfb91d7ba6ec45956dc7e449f3fc81dbaa6fa67d66b3be": { "8aa635a63a82ddcda9a254960f313fdd8f129e472d9fe8d3e6dc10d1b38c37ad": { "jp": { - "updatedAt": "2025-12-02T22:57:28.840Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "4dc797f49a8a8287394c9bbf8723b831b094de0890d5ff6e8121b529c3ec5cb1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.201Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "19dbaa2dd53ab0d7c8c7e713803d772838f9123b95c9c45bbfe5fff35b7dd501" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.859Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "0121943389bc34cd24cc8641068ad137dd33d5b7d99cac5e5a78021fabdd445a" } } }, "79e7241d6edd82b0dc1989f7d3211668c2f24f997b5fb74c55c6db437b7da25e": { "be2734886fbef14228e09151309f47d77c7dc82d6b8d68b9d3f8b6dedeaa8944": { "jp": { - "updatedAt": "2025-12-02T22:57:28.840Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "d41172d34f33648a494e75d32eddb012eab121dae1ce7329da70e5a4a45dc421" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.839Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "2380e757b48565a6bb1c96afa7e406d5364e7b1e8e2214002713580540c45540" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.846Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "8b8f420ba891f5036e4d4a060ad1dd39f2d6cf0e84301100ef5d7d7a1f1fd316" } } }, "7aca79eee9aaf67fab4127f69bfa7778f63bc7a7f6b384bee18e809c672f7b49": { "55febc4e35972c34cb1792867e0fc3cfea4841faadf9de0e30f4502a613b8363": { "jp": { - "updatedAt": "2025-12-02T22:57:45.207Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "9de09f5e9c3164e4583e1b42828d6ddd484727545ebf1a70d02a9dc8ca1bce6b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.206Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "f84f653cabca9ac4864ceec978f15ef9c00aa87186bb5cb55b587ae03f6f4406" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.859Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "a096528db5ce114db80fe19bfa4821fdac881262272aec537c1f588445b28a01" } } }, "92f16152dca7a77dde912f0a5b22ce16b22c2dc499873bbedb28221aa56e8739": { "f3fafaf3dde2049ce02a32a17ef225150db00d0562e505ad5431a06ed8974f2b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.223Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "133c7686b9a631cf247dde3093cfbc0b8be229703250524191088d220732806e" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.854Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "3797f9ba10903400a090effce4b252b507f88a29dc9b4e9e394dad6560729c23" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.212Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "d0392fd2193f8aba28ce480cbc6c081d1e2f315ceb55ac394cadf61b4791b525" } } }, "972010f567fed406e9bc7e9fa61c22b7128c4779998665424ab71c46338d1f3e": { "ae02d48d7b29f026ead3f4af508a4e2b3a97657cb5051628dcbbee9111248f7f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.848Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "d331b64a04953166407735126184d0113eec67318331bd6822571d5c291c0c6d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.847Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "d14e7342169a700c3904269a73245389c00b8d04d8ba249519104735e53a28ea" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.853Z" + "updatedAt": "2025-12-04T20:16:57.886Z", + "postProcessHash": "942f028e720f0e0867e4730801ecedf79000d2d67192f5ef9cf119c9b0a60722" } } }, "a15ab08919f6acfa97670cff9afca686c2351120dfd9d4f8deb2b45ddb99aa0a": { "d344fdb9b77fe64b9863b88b7aea7e3a8e4c7d7db3d3d7a7d7584b626a3c8054": { "jp": { - "updatedAt": "2025-12-02T22:57:45.191Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "81f73e3fc11134a99646de9136329692550d9de6f87ad15daf138b00ad623752" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.202Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "5ad34d5aecc89f1175d98cd318e5c5dab995ebf69e5efa715a49ba5cd107fb7e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.858Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "4ee1d70f38580b8db0a2e894285440020ca0c0c04fbcfd8f085a5b150b1df734" } } }, "bb1dea393979951d316dea0be45235c346fe0c28cfe6756a5876f4804290c7e3": { "d3ecf8e3f0da56d9ba8034a953040427b08dc7fa1c165a2173308415b8a6d17e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.191Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "9b438dfbccc8ed411f3b92c526eaa1715a664f25cf7f97e9a6d5516ee4f165a3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.190Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "8919c17c288a2f9b66dd3b0ad5f141f5df9e33df5cab700ca87e0e04195f9b0f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.839Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "d425b0e86b13a4f8392e109a10bd32f065c8f4fe4ce4290b81be1d35f9ae285a" } } }, "cdb1f009589e1e0b485965e6b6f62f110d284ec9f225d0eb9717cf9f54e381c0": { "694473bb486e1e21cb8814dc53f5204436b1e5ffbd3f851984bd46f00c011179": { "jp": { - "updatedAt": "2025-12-02T22:57:28.832Z" + "updatedAt": "2025-12-04T20:16:57.875Z", + "postProcessHash": "72052c16381b05eeda913dfdddf6e898b79f3b71a79d80126babfb974b763b65" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.832Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "a8c1f4153baa16f9852ed6b233141436da9f6682cde56da0549916ef5bf05a35" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.861Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "3ce26fb8500933f8e6dcfd94effe589d648e3e6fcdac654b4e5f77e130fde301" } } }, @@ -19900,1443 +24332,1776 @@ }, "7f17768c7754fe62726af95719e525e92c0e64ec5573a51db338fa863d1513be": { "ru": { - "updatedAt": "2025-12-02T22:57:28.809Z" + "updatedAt": "2025-12-04T20:16:57.863Z", + "postProcessHash": "b9d54a8c3e95c9258ac3179e002411b85c8c8f3f7f1edd416565224e8fe686a2" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.810Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "96feb1806cfe0332a6f850050bd6dd0b506b1e488178d557cae55afbb7495b14" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.833Z" + "updatedAt": "2025-12-04T20:16:57.888Z", + "postProcessHash": "e5895702ed9dda7110c1423d89a210e3e1ed8816ac398a27a49c4f9ecef6528f" } } }, "d7c9bac812afb3a149a06c61bd4d64e33efbdacc006619f813e625307caa403f": { "bdd5ad8ff2c6c4cbf81696dcd7cf80196be279d10a61a61d0f45caee15d90df1": { "jp": { - "updatedAt": "2025-12-02T22:57:45.198Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "257c4c8344e461d76920ae8393f24c87edc697499389fd9decd8d185c3cad248" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.205Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "46ba61cca9be2e547d2d5bf8ded5211a740fd21e322f95af8d1952a3e4c9aeed" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.204Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "ef8e692da66816ef7cfebd024a6957b5804226891b31c3e88df7a4fb1bb91d56" } } }, "dd8bec416e1a990e1a7ef46ce2f7761b51432155f4c149641fdc484fffcbe786": { "e43ff80310727083fa06482849132d96578ddd46a8478a49dd3bf42b62882609": { "jp": { - "updatedAt": "2025-12-02T22:57:28.855Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "5fc50c2e29c575645a23990b85191d09859d81958931815d4a9f46d1359e32d8" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.201Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "f332b5aab8022ba892057b0c2c48265f9b88e7f7b5f275fc8c8668922d73ef1a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.202Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "9ac918e6a5811102a441d2472bb0d6570f6c1891fb4f5f9049c6bdfe8cccf601" } } }, "de693811d680fc3f30e32c9bc40614fb35f73f55847f45a16264e614a65d74cd": { "8fa72ae7500048bac519db43150657d9500e969b9167f548ec14e8f2a73052c7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.845Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "a49e7c2502529ff17655e6d109f58ed4010dfe4d5c2f57de3648714708ce6d1c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.849Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "371a72820c4144fd514e9c488b8f163a3640be9a89ec71be27a5de1cc5a7d9e3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.850Z" + "updatedAt": "2025-12-04T20:16:57.884Z", + "postProcessHash": "85ef2a78a583092d90f79dd14f0f627199b81d14b1d3de9db7f34051cadedfc6" } } }, "f05e54b97f0cc26e7e6d19993081fe3b3e87410932334968bcda276a9ed28bd3": { "1d9ae46b239c5f237c2f10a2d4e4c6dbc9261c9c864accb4c80b847fe59481d8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.858Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "9734bb932d822831b488180ffdadb1af13dc2c692b57d16ddab01a07d73b40ee" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.856Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "e26d763fcb1b080ce4295e3f9b3c190e182bd32ee4404a2364a569e4d0d865a2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.200Z" + "updatedAt": "2025-12-04T20:16:57.898Z", + "postProcessHash": "caa91c3943bbca0f545030dde2dc2514f20793055bd694372b1a665f20382ab0" } } }, "fe5b39e56c19c291226f7e3197f67720f7eec98c8343fadf7b1a283589869ee7": { "afa11621c4420fe8e64e6a032e92ea848928d0c35428ff0c7a1b50f7215c04fe": { "jp": { - "updatedAt": "2025-12-02T22:57:28.856Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "23a24ee37ccf631ff821742f1ff91e57757c794a960ff0bbc8feaebd9d561d86" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.844Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "bde8597c11dae1ed8502fce9e77a3243fcc8ea95f40a94406c08b40cff1c59f0" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.839Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "9b4b2308899858a518e76208a706d20c271a8667e2f8a6aac51b2ed2362890cc" } } }, "010d68e65065006bb03ec5afd6da3fb00d5d932dc58d86d356a1fb32041700a1": { "097920bd8ae55b0c4c40422f164718639bf789af17784fc7d268a39285332660": { "jp": { - "updatedAt": "2025-12-02T22:57:45.214Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "8d490b9e4d6b796553eb7e2a775ca73c95a3c90800d9dcdda7e98e66c1418966" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.223Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "ff609f5fd51b2d633f938eab16cdf0fa437808a8fc06a8dd6e0d333e3c6f174f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.213Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "c6d0305f0d417d25208474e1cf5ee49cc3ae47a1ab1fec50cbd36bb817d18256" } } }, "194312689f754af1eadafa36fb316871d927e7555a7e9237115b13fdf9c16217": { "efdd19891bd36c4b5ee32e3469c4609b62a971eec1305634c7e49ed5d594e5f0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.218Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "f7ef0e1622c746dcdea97a3c6bccbc6b8bd5353064d062c845228b2913ba3b3f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.215Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "c3aa3d8b033edcdfd9a3f4c4b499ba6914a7447a3643be74544904a78aec2715" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.225Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "31551cfaa3803ce7cba54dfc8ca1a585aaa8d4c11c8d1479379c84ad946d2a28" } } }, "1d9872faa89c7d85b9aedea5b9a72b7f79022036a883f0d76368ba0aab461711": { "70eda1446c7a201ec8f6c37abb74c39a9535a96ae3e057af6538915558876b9a": { "jp": { - "updatedAt": "2025-12-02T22:57:13.216Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "ae81fe1966f5a4c45d670c83f098b0d12155d8ada4dee6045d3791e39c77b4ba" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.223Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "aa2b29e1bb81f2d44fb14f8d3d389576f36e43e6aa03cba814f1ce8604ccc87a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.219Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "7752b12cedd7d0fcdb5f09102f50d6a655ac8a84ab0a18f0cf8817a562b3c6bc" } } }, "20fbec5fbf3b5f7168ad991d57410b2a6e132fb9884af790cd2c4a29f361d02f": { "36ab0d9536cb78b918f577f351ad01da73a11122ce416a9654035e7dd9a193bd": { "jp": { - "updatedAt": "2025-12-02T22:57:13.229Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "9f9584caa285c993155f6f2415dc82914da8901cefa5435423a4098aeb2c0d62" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.220Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "265c6c141ae0e4292a36f48478df04ff3e66477b7799d130dd1ff61674160094" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.234Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "a07979f6a12ca64831c95dcb3a647dcea5fc18471deea037c55737477779e0ec" } } }, "286853d39a677e8828ecbe663218f27fedd5bf2bf0e04f6a0845b378f6e8eb8f": { "b384e9d652969f7c44b75186494dd5743f6f7d29a2d07cdc6516f906170b8ecf": { "jp": { - "updatedAt": "2025-12-02T22:57:45.220Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "aecdb64ddd77fd6d4a61049bbb68f1a19acb919c9f3722023e4010a735c88a99" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.211Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "b8daa3af318a4d423de0cd05c4de55d8f78be0ae61950a973a3840b88c0027e1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.210Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "824bc9b08383112117b04fa220490cb946a4122f6caea18bd8eceae812f14e90" } } }, "31135bad715065dcea06e31337e3a5dd947f27dc411676ba95164d339409a83d": { "763ca58dfeaadfb6457a37642666f8a6557e78cf6969b41e8b1c31735f7e55f1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.233Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "6ca824ba0609e0dcc9ff9606f8b4713ca7df5fb353689077361826b05c383df1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.234Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "8a15932f2724a339e2b405de2ec37456c1bb0669a1b59a433307f6cc334c8652" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.230Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "f990906b4d4b9d9dcb5a6125d59b2450fb970fb59109da00f4d5f66ca253b2de" } } }, "3d5ae5ca94ad055105e113940b4e5f4f01c26351d5e0aa85b01fb3569699f7c7": { "db7ea4892aba1694aea64f46778e44e4d3a93c6f1d8d5290b4d72c844116181b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.220Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "3fe7353c61decc5d72c2aa911caead430389d94e172bb0df3305e2331e3d2f8e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.218Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "93049eeec93adf7a881563ce84798565ff49f3dd736cfc5ec25333d52ae0016b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.213Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "81fb031eeaf9a9fef4286a44ca4724f24dbd18d8da14672620cff794f05e0e97" } } }, "42c6ee1d7586b75ddf294b270cd91e6cbfc04990b03c458263060339691f65f0": { "e28070dd3b8f9c8e1de1f84e6213088ded4997089a0463fdced52aa0d7126bee": { "jp": { - "updatedAt": "2025-12-02T22:57:13.228Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "e8586a930782ebd681965c9b32dd48dff3fc5a5e10dc146c809314c6920caaf4" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.229Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "1f48741210908e6a400270f29ac658564a0ba4c062307c57f46be7c6d0d7a121" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.233Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "b7017a739356eda90fcb52ba7e0c678f76cb3a728ab3c57d91541029a94d97ef" } } }, "957a8d1238fb98455672f68cf73445d00c58150afae706f656904ea7f56bbef7": { "438d8f6bebdf4c4f748f67bb045a037db4fe70bfbe607e05bf05fab5e60702e8": { "jp": { - "updatedAt": "2025-12-02T22:57:45.212Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "67277e181deaa5900b706ca3041f2ad0a0f71cfe200da192f3fdba6652ee21ee" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.216Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "4a079fae31e0dab4f8849ca2a5e1100fd6f7676b38aa436212749c9d11ed9f74" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.221Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "64eb0254394380f4a58ee3b855ff33e5439307efad9f1d43b2068541745eeb88" } } }, "bddea4d6e2b142218cf0aa18075b105f560306487a43f98ae93666cc5b0a2088": { "d82b30f533151c915ffd2fccf00cb93c7247a81a9af41c32c0b6ee0a941f1dc4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.839Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "924a576751b9e5582800c7297c2aa53fffdec58e85aef82bf14acf85dbb560ea" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.206Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "09ddf4bada82aed2ba05c7072af7c23fcbf26ccfb2dfb1fd394914481f82fcee" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.205Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "920be0430f96ed262ca57011eae8e8851d08d729d0590e061f3a4d7bd6293108" } } }, "bf43a73f5fb45ab9aa1813ec5b3c6567e2f43085622a3981fc47bbafb9f28c10": { "e5f80b1293069b81103b1bd7abde9c4afd1e877bec64781bf8b20adfa5b92acd": { "jp": { - "updatedAt": "2025-12-02T22:57:45.222Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "127a3535d46775f90a8e6321082f47e55ca07cbbef30e0e8cf3505429a381d5d" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.224Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "ee554c086a7e74a84a478f3ce03f9f94f037bbbd3129e369d3b2c2ec2764c891" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.854Z" + "updatedAt": "2025-12-04T20:16:57.876Z", + "postProcessHash": "7cb809d79f82199d1692b2a2e0eb2fa7af89c84afd31039c77550645289e7efa" } } }, "c2092e34e0d63b8d13f5c437a2c60457a006bad8bb89baf8c2cc3dceafc6ec29": { "afa4682df8ae8c2d39481ae157b1d008ea8cf2cf75aa79ffcfdf3cacb4d9b0be": { "jp": { - "updatedAt": "2025-12-02T22:57:45.190Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "275b700d69d510c157ec344f7dd0c5a8eb429b3697d6c0143afc3494b8b8cb65" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.857Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "4f74204a39c490f963aa7b30e7c2f49452e7cbff84f7df10953a4c66bd563d8a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.860Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "226597b35772c4b368dfa82bd2ad022c4604a23cfd7b6738bc9974b4ccbda8df" } } }, "cf7f50d7a1e362e6ebac5f8205b53d0c8eb6dd0efeecc010f23b8d8f09ea8f80": { "b7f62ebe9c2d110ae4ae2cca482b48cb6a82bf22cf7a6a11933cd85ee6309d22": { "jp": { - "updatedAt": "2025-12-02T22:57:28.859Z" + "updatedAt": "2025-12-04T20:16:57.881Z", + "postProcessHash": "70b1fabdafb4eb5fb7451d9741a3dfb52edb472bd8ab728fc30d14c6ad169738" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.859Z" + "updatedAt": "2025-12-04T20:16:57.882Z", + "postProcessHash": "42723f4d21e00e9391b75c61d826345654d7a0818b5df5b8bff8677e703425d4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.860Z" + "updatedAt": "2025-12-04T20:16:57.883Z", + "postProcessHash": "d1d0bd5f8628d3d39e78a84c994cb6a83b7b567c673bf11b7b3573b8f75db45f" } } }, "d1838a9b36c0c0fbc1be851ae978af65ba7e34ab07c37daf5e5c0c741129fd76": { "14af3666e0c05efc3ffcab87b38768b94b93945123edbdb09cb8537e7a7d07b0": { "jp": { - "updatedAt": "2025-12-02T22:57:45.191Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "60032f1510e4442720d95ee40018a00d0facbb473432166b2186a286c458f671" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.206Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "c0ddd07a5f78ba3609b509cabb2eb82390a3de5aae9fe33bb3107866ebdf5e35" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.200Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "6b5d5d9cbe14e7a6dd99395088c6278ef56d0b172666d6c20208cb333072bea4" } } }, "d488e5566dd6bf95742db0d7525010310bd38f5971c4a87992a3ec793feba8bf": { "5ba7a81bc990b2456dc8374342d01a7253db45b5183ee93be9b51553586efb4f": { "jp": { - "updatedAt": "2025-12-02T22:57:45.217Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "c1853aaa72262b93f00cb8632114451ecbc1703b22836dc8103a61ec14bd51f9" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.219Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "9d870a92e362b3168b61db8499ee6e04e552c640673c868acea8cb7a04880456" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.211Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "32bbecc91f58fc799133abab45d11f5e606520ef704741ef9f551e33ec8bc338" } } }, "dbd27b188ad3cd04691439c723e924796170d0bfdf59a9e9b53d90caca0178bd": { "80b5c91060724e755120c034531a62ece1e13c4c261ac38e2e448b4e2d0e61c2": { "jp": { - "updatedAt": "2025-12-02T22:57:13.212Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "a52de9c3bab8a01f0373f4ea29900ab5319819de81ca22dfdb209011c259b063" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.220Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "b01b14f2a35c5e8443999fc41a31761ae741dde445f0eea2cf3d4488a50089ae" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.226Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "c26aadf1268a9fd7c03d3d6e5b46ed9aef287f3c5f06456454305c3f8f3287d9" } } }, "df4069454374e8aa9593f9687f16b9e3b26d64e2b0082ac22a7123faaef82740": { "e7ed1c4a6adc17da8ad5806d7ebfbb340ba0839bd13951ceea09267bd14c0a6b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.214Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "f9a98f823cc1861db4565537072d19fbe660e4acbd405a51c07ab6c305bb5056" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.225Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "79838d0d41f478fb34ac2990c33f9c99657867fc9da21a868a380d1cdf056fc7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.222Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "d4dc60eea0590ff677fb60d5b3e7f94420380748928b65fc0ef06f8d9ce4d2a5" } } }, "e1f18ff34031035a08fe64318b680c893d2d37fb3ac9d30c908d0671a1180f50": { "85e7c92ceca8e1da3949120488020e40a9d10af04a565222bb41223f27a16de2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.217Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "7c3faf9cff1e32d8c7dc7e9e6812af0603e54de17752d306de9c0562a71974ea" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.215Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "8bcbf110cff7bf92af0a40850a4063b2a520bf6cd347ccd94a7bf0f96b0eb9a1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.213Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "0b934ec494a3d55ac07d9ef85c375d23b5d52fe8fd9f892af795950d8def6196" } } }, "e4102128c26bcb3f9ad172af76f46d964de749c24c132d5348f9ee3e3de5951e": { "9f2615fd10d6b26b0f5f878a17f58c2100fb6bca45e41b0b5783df222e6dc6e1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.860Z" + "updatedAt": "2025-12-04T20:16:57.895Z", + "postProcessHash": "371fdffd8c87ccbbfc6a1b72510d1b2eafaafabed522856fc7acda97cc670df8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.857Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "b6cfb2542c9cbf132a0af095ac50de15cd38b0805de7150027b8b17379f4964f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.189Z" + "updatedAt": "2025-12-04T20:16:57.896Z", + "postProcessHash": "1a3dac6e022cebd3d3b608fb79d9ca5221cc155723f98d89dae758ecc5bdc31b" } } }, "e6493689e0bffb010f12f340386981233dcc9a2f28df11fd9a6e6066d3c5ce8a": { "8de8a8317a3584199eab7b620cccbff20a6c44103452bed63f66cf645cda12ea": { "jp": { - "updatedAt": "2025-12-02T22:57:45.207Z" + "updatedAt": "2025-12-04T20:16:57.900Z", + "postProcessHash": "fc07d3422e7800ece1222c7cc0f7c544a63afa05b8d21edc119264017f7793ed" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.208Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "f497414b08f8dcfb983dba049c462ee47f7a19ca2745142ac3c513f464aa1acc" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.204Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "b36287be4d4c032165873fb0d7d4306ada50c307d2aea4c0e7049213b808f144" } } }, "f1b73f2ce3c7b6d1ca6f0f28439acb8cc45586fb6f3d1fda35224a8483871689": { "84181ea71df19456b8c88cf67e1c18c054443ce40152a17b3fe3d33911ecc651": { "jp": { - "updatedAt": "2025-12-02T22:57:45.204Z" + "updatedAt": "2025-12-04T20:16:57.899Z", + "postProcessHash": "88c46cac66e42099b8c1956e8eb1cc132dba7c665bffda0549abeec99881b972" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.199Z" + "updatedAt": "2025-12-04T20:16:57.897Z", + "postProcessHash": "cbb9f0de216406e01421ad92adfdfdabaadafcb87952f9630df1ef7a993e64b8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.857Z" + "updatedAt": "2025-12-04T20:16:57.880Z", + "postProcessHash": "21d848b50e628e4697bac22570e1c2d526c30c5f85d4593d2554563e1018deff" } } }, "0060f21968d741d1a0f39b19ac2622ebb5065bdb709b03e138eef82e28e31244": { "2670f637399d04628da2e0f038d37565f781605423d4d054185eb0cd33613948": { "jp": { - "updatedAt": "2025-12-02T22:57:13.218Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "cc58a73461fac3d3b1946529e11e588340bc74bc20c2b1b0f26cee8011dbd55f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.227Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "9e5ae7b89f7e3bf3472249c620ec99b8f1ab7d4026e85a55ac028dc47d4bf3f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.222Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "fe4cb09f79343c8a3290fae6eec5048c86fd83b1b8df1f951eae95e4ab92bf93" } } }, "1e131dfad9800937839c4d2f0e5ef58daa6a99e44ee4ff5ea4e66de6069c7c37": { "f76c1f685c5d14375dafd9a42aa84e6f31aeb5b84b0d8c24a2915f02c875d4ca": { "jp": { - "updatedAt": "2025-12-02T22:57:13.240Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "2ba3c00ff60f4a6bedf6754f550c43608e028c0c10586d2d7daa702c7032a0a2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.231Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "00ab2ad06f32e67fa98705b28d3346e29258b16bed12e39ce43f64cd27f6f8b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.235Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "cc477d5f950cec44900e9c6f7365daf78f922d54fd4a247f3674aae6a7f2ac2b" } } }, "2149ec9c3299895bf0097e125705ba36a1e04efee5f43e59c08371caad0cfd45": { "349711e0368c3473e04141d6855c62a92897b88020143c2fd44659089f128368": { "jp": { - "updatedAt": "2025-12-02T22:57:45.236Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "4fddafed2a43e5e009106c14d5a3c9ee2f3fa09349d5f1371cd88deb1ee38e2b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.237Z" + "updatedAt": "2025-12-04T20:16:57.949Z", + "postProcessHash": "55deddfa92037ec8176d60c79e221c90b1433bccef974c5eecad24cfda4fba6f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.237Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "525eeb574f90a79595344ab779b936aff120f9c4526e22cdac7d1b7d76c3351a" } } }, "2a714f7169c51c1804757b5577385bc512ba198c41b0cd228e98a66dc148abb9": { "47388640fbbd48eba401a20cf2754eced76dbed9147e6841f469e2f4acc14075": { "jp": { - "updatedAt": "2025-12-02T22:57:13.235Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "2d54b861afa7a6527ae0630101281dab3239387521453c1a856ada9c1b0de61f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.232Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "e93cfc4f0e5847cd974160fe40a906ecbabf56664040e8ce00adcfda3cb10aa9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.226Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "0ef0d5ac815af5580000cee78d1888578ef5fad0b6c42a30be050d215bd55c1d" } } }, "3f24f2c556bcea3bd4a8da649d898ac0d1aa590efbf76127ecbd252c8df9b55c": { "87fc99663ddeaf7b1d38d03a534b4d0b7cbb70edc9c3b460d5735be114f9f413": { "jp": { - "updatedAt": "2025-12-02T22:57:45.211Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "5f38df9dc6c08a5b03f7d8480d8aa9507ec1656722519a1a4a416f0b13fb1eeb" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.223Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "bac1934bdcd500f3c4f4750359be0f2fa5d6c5bda248b021ca91948ca42cb2ad" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.216Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "2eef56187ab1145d04d01ad653a58e66fc3f990beb026697d0b31996d323c429" } } }, "42ee6b1cf2d60dae66ca7799b6e3c96a470d6fdbdff801031e35cb9e1891dfdc": { "ab655d464095f3f0a801879f7e0058f71ddf7741b59f1ac855f58f9f7d807344": { "jp": { - "updatedAt": "2025-12-02T22:57:45.211Z" + "updatedAt": "2025-12-04T20:16:57.879Z", + "postProcessHash": "e95b7682df9aaf2361898e7544eb99f652587cd31c613a95c196625a217e0369" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.215Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "07440b2517ff08bdaf5992888e7b2c278612665e3033774846e854821957ca70" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.221Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "96ac34ba183b9dcf66bb8a9d38f272e5db232d564952c6144ab620ddc068f490" } } }, "46e9f56a57a3931558fcf69333139681d05b4c2f69040e2cfe7a939c976963f3": { "e1fe2166283accd68531dcb58d1682b96cdfd9ca452ab7df14ceb9a7623b7419": { "jp": { - "updatedAt": "2025-12-02T22:57:45.234Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "8dcb3d6dff45c09da61fc2c9d3352cc069e6351448be4918765c3b56fcdec51a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.236Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "5d7bba5dbfd8ce3f148c3d1714ec6b782b64d8ed693d6148978f82f1d5cc07bd" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.230Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "c6df338bb7c56a4e1cdba0a8840146c7c1c2868873fd03dfaeefb4c482480079" } } }, "46ff568b059cec990fbf679bc4bed642abea08d09f7bafd4747a7036515b95cc": { "714391bd24db523bc05255d05254efcc0766f0f4b43e9f23aaaa7548eef953df": { "jp": { - "updatedAt": "2025-12-02T22:57:45.224Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "d04cc21b5f3ce9b2bc712a69ff4da36cd771fd553bae290e4551c3f3e85255c6" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.217Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "697726d542dd0f996bc7148048de9a17ffdfbbbb88ec848cf5ecd5d3a069a624" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.216Z" + "updatedAt": "2025-12-04T20:16:57.891Z", + "postProcessHash": "4769d265d27475661961bd1d2d820227edf33e8e834526f025573f3a2cd2a3eb" } } }, "4fbdb5b1520dff0a17e0429f575aa6011097f81752684475262c7ae6aa200bed": { "1ecda987c93d49e1fca1c7c93d39044137cd955db9a36fbd10169f0b85cbdbe1": { "jp": { - "updatedAt": "2025-12-02T22:57:13.226Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "b167678ac61abd011deb00a1d914afafbdf6be97721c55dc546818de73ce60d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.231Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "5c59e6325b701042a58da3c448112e8c37391b1518d13ce2faea666d7d8e816c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.210Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "9030d1af79fa3e8b6cddf1c1549d90575e58bed469c26e6132493723576291ab" } } }, "5083281bd3b547bf9df36adfb2bfba73c9e0cc795d0090fcfa111ce30996f661": { "b09f8b5ff58806fe3abfa9da3b343ccfd7b8e980a6c46bd43dc32927ebac6ce0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.222Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "318a2c3da1e6202517fe1911bbf38d5f836a7b98df903ff8c2d1fb5df15d07d1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.219Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "6135cde851af74e11fde3e31f09460b65ff67c460906fb832944c08f84ea5ec9" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.236Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "2fe3b6b01619791a4494a5e46215183c82420babcb7f543cd764104819d9d5c4" } } }, "5b010922cc17b528fce9cb609955f868e53ad71f5f8622066d24f7b3953f893a": { "83759792792ece10deacdd4a65c5c3b089a7e420df7df362574464fe94fb9408": { "jp": { - "updatedAt": "2025-12-02T22:57:13.244Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "831c2eaae1a897743e2604f2c8cad8da12ce62d84883995cf2e4cfd18e04917d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.245Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "f82dab472885987c5e715de851d9fdd99515ae0f80bdc5797f50ae39f278b3d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.234Z" + "updatedAt": "2025-12-04T20:16:57.923Z", + "postProcessHash": "6cfbb113fcf2cf8c8c37ac3da9aa0e3e865009d590f609a981870eca6f68b761" } } }, "5c4315a7496585196dea68631d46489e99dab1c8daac61b452a0c580a509d21d": { "4c3fab4892c9a5c579a3017bb4cbe36c271aad9734d4760fecd5bc4ac75d16d6": { "jp": { - "updatedAt": "2025-12-02T22:57:13.222Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "c0ba7e1d3041218e81500928ae417059dd1bd19de1b107d68c7dce812d274014" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.221Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "a3bbb234a689fa3b6a49feeeeba45dbc965e50a060fa826402a1fdf370582ddc" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.226Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "895709378dc79f407194b50b01b8b73e62e6b76077477b6b928d9f7f8b5a70d2" } } }, "857a5c3ed29e79d55112d1802865f308f93fcc1035cbad65451f1392ced56b55": { "daf1366f4d86b97aac48da95d72257524192b5104b5dcfd34230427de3762a51": { "jp": { - "updatedAt": "2025-12-02T22:57:13.231Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "d1962e1f05c5e03ed344ba7b8defdd542a50e830d79bcf1c9a94f81290236cac" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.225Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "5d65a0e7ccec40cbe5f7b3bf7e32eee17bfd42b1e89b0c2db32e5b50af3b4260" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.228Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "e041e7108a02093d8853a3df07ca246fb30a1c0f740ec1436b80839b5325a937" } } }, "8677ca6f754c9510b46dc0569151e8695270e1ddc3a7791067d3b1b9e5ed0ce4": { "daef99c2eee6c12072def84a2de12f54a7398d20df2b000023b0e91f2100e934": { "ru": { - "updatedAt": "2025-12-02T22:57:13.228Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "7fe865cc69774f8ae7a7a53055b00aa3687e67ef4a216c6ca2fc6e4c2cd70263" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.222Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "8f87709ca0c8a1d2392c9d90dab8f78a3d64de2b95d17628ff3db743a2c2672e" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.209Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "8f574dfc770fa58dbb940b5eeb6ac46c54a589ed516829fcded1807976a56cf1" } } }, "9056916609446b5b12baca0332da8e5e8ad117eb3017488e4c5391bf09af1c65": { "5c1ac19a6dd8304196f8b5c3c4538997259c7d50017642a246b97a60197a70c3": { "jp": { - "updatedAt": "2025-12-02T22:57:45.214Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "1397b41e76bbac1baf75e0a6f14ec9372aab5fc0c3017f91c9bfc6d765416b99" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.221Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "3b988885b8d47a0c13dfaa8add35756be5d035ad11827a7571e12f8681faf3ab" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.219Z" + "updatedAt": "2025-12-04T20:16:57.893Z", + "postProcessHash": "3cb0ea5af5990414d97e94b3ebce6a84a2c7aa8b8b3dfaaa8062196185296960" } } }, "93878162d293d38a3f960218a0ee8b1904f199878f15fb0a11f80cc5c6b78ae4": { "38c7da17603cc8d822478a774e4a0851139aaaf988b5e6ac6aebd7c75546c08b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.212Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "28164fb4c37ecfee48a4599a1fd657ef756b7efae7055b8a5e44f04a3ab7f013" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.854Z" + "updatedAt": "2025-12-04T20:16:57.877Z", + "postProcessHash": "15079f8bef55569c92fd762cb6a026949284380fc2a904cbe1d84c0322f23ccf" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.216Z" + "updatedAt": "2025-12-04T20:16:57.892Z", + "postProcessHash": "787bb5df3b7509f70fb007848c446813f000e03ba5d5bce7dc2020c556a15c7e" } } }, "9dd17b4dded970a85df00496de13a74873211a7c3eabb2bfaf1670710eaff639": { "3c77cf690b82a05ac07374c03da339b16bb18f1f69cfa9c51ba296c56cc2f48f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.221Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "f2a9861fc5f546100618f83cbb9716457f3dfc63d89443ed4931c236f59fa1f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.235Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "da97c5ce7dc18f47f175e57bfebad570a657dc63a6685acab22274efe91a4fcf" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.217Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "97f4e2c19882c1e5b24107758f3ae2de3889d0d2af9bfd26ef61bb6ce60c260e" } } }, "c9c4898b83cd686a39de5d1507a5f2308fdf824b67d0f19322fe25b8230ae68e": { "81003853e9247deae604d51fc5acc18e581a4e0c4f0d79dae6b9207ceefe7142": { "jp": { - "updatedAt": "2025-12-02T22:57:13.235Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "3bd212560e6e0111ee3986061feb8d5e2ffacc5975ae5fc59e057e010a0d53e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.236Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "a130806480e20ef0b8d79a7bf6e69ab26a728d6a0427d3e67e3eb5eb9ebad8a5" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.233Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "136f401f5e86f082c08a9ae4ac315c5cab2cd7da274dc2e4de417914454e2d7e" } } }, "dc429afc1c845ad436d31b61fb908e473d3a84f5a8919f5d78c6cc647e6e44b7": { "f94a5951c5c6355f3214aef3392f0e31f245f1c2a14bce98a45d190388085326": { "jp": { - "updatedAt": "2025-12-02T22:57:13.218Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "c77fa378ca7ab8068d30ec532c0a704d56f40cd1c9d80b09965cc53367b08b79" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.216Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "cceee4e1e3d053eb476e7990d4edd206d3b74fa75c9a91d86c4ee7aeb95703f7" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.221Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "ad35b896d8d07108ed611671322c55cfe7763a148af0d77233b190cad1a1b8f3" } } }, "e8b753d96adf0305cf90b9e579ac4cf927e2e7f187ad62582b9b9a11bab53b3c": { "d6af628ddd5106feb87f50888fafc6fb21ea322d5d658688b385daaa6e2bbc05": { "jp": { - "updatedAt": "2025-12-02T22:57:45.213Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "c37464751b49d7dc9ab7ca689436e0472d8d8ad446cd32d188958d21ce31b22a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.855Z" + "updatedAt": "2025-12-04T20:16:57.878Z", + "postProcessHash": "149f5b23be317c723e8646ed6485f31297947ae8406e20b5840b1f2d5c6cf9fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.222Z" + "updatedAt": "2025-12-04T20:16:57.894Z", + "postProcessHash": "e94f0449814e7dd193ddcd53767d403e4d464e8858652ef3782248f1bb62a1b2" } } }, "eaa9435dae8d90063d0ef13fb0d0245e00f3c444e99fd608251e1fbdb283ad76": { "5671b5319cbc284bc1f4dff7b698d72202dcbb66b153aa004c508aa68e5dff04": { "jp": { - "updatedAt": "2025-12-02T22:57:13.218Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "91e03d6468dfd6b782e5cb7fdea5502516b884bd2cb871ce9d55c5d15d9b8739" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.224Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "8ce55bcf546f97c13886c399501449a1d7c44d330730828a9f01d5ea5f8db94f" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.220Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "c1d11c3e92774ee4598b5c0873de893ab41cf59d5061aa5f67fa70a7582070d1" } } }, "0045741a471f4dac9a3db4c43669d28583bac040167b1d39d7e25215fcda5ccc": { "dab964b634db47350d340e0931ec7aea4b46dc1764c4d7c24c6cf164792b3f29": { "jp": { - "updatedAt": "2025-12-02T22:57:45.243Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "253f313883ca88f552e24b940fa89166c4ce2007d3cf15dab12db8c1556274dc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.245Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "8d8501fdeba4d1e6cc2c22505c479b6d7d42cc63f4301dfc89a84e9c48370529" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.246Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "fa9967a51b217d869bbd33441a42b260115d2763afd057569187493e8a2b0f0c" } } }, "189388fe355c19cd463ff375adbd81bb8d731d323bbf7cf2cdbbc3058b2bd826": { "5ed3b23bcc3bc844b8a42267d9198f127c4ab515a87acd7da5858ed9dd6fe278": { "jp": { - "updatedAt": "2025-12-02T22:57:45.230Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "abcaacdfd0918028eedc8e1675d31cac3658dc986e4b5e4dcd5c74d599c9bcbc" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.246Z" + "updatedAt": "2025-12-04T20:16:57.907Z", + "postProcessHash": "f8b5160672802d2f898d0ea72ec77dc6eb1b534956acd58382a310cae83be527" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.232Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "b29d6266450e4123bdac5f2761d96c0846b7730e4eeb280cfe677727763f1f79" } } }, "23c47eb4902870785fffe1e4baa6e41d6084e1f924e6ae197c27e7b51f843750": { "052d52be77d868d3d26620fa34155f9eb31b5090d664d799d412457b60c3f050": { "jp": { - "updatedAt": "2025-12-02T22:57:13.244Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "9880b3f692fd9bbe392c3f804f94a86933eb950b2692bafea1e96fe7a278d32d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.246Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "77b8f90180fb62f9ba63d8b4d1050f5f08d8340da7d576126491fbf965598692" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.234Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "81315cee4f7953ca914170308756a150d85ac0ebd52e53bbb60a2935fd81e765" } } }, "242658032c19f8878ea27fde8bfaf1c2d950073ef6e50d896370f00b777e974b": { "9dced94c1aa74f4a1989dad0844123eff9d336fc99be750b0bd645446ef2190c": { "jp": { - "updatedAt": "2025-12-02T22:57:45.241Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "6eebe742e3d95fb68ddf4fec51c9c4f9700b384e92223d41a924d209a3fcdce1" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.237Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "c4473b4318c9a8dd30ab59bfd381ebcc8c7d6beed1f6ddb44706c3228affc399" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.245Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "33200ec6518a946f76b3a8582a331bfb9d2aea06c499662885ee0af3024a28bd" } } }, "302b0ecec3c1a1792dd0c359652c37057d430e76b3e96aa7c8afde8a7172dc09": { "3c7b42d588a09f60f2e03cc7401555f547ab89b4421ae45faee2a50a6b0b0401": { "zh": { - "updatedAt": "2025-12-02T22:57:45.255Z" + "updatedAt": "2025-12-04T20:16:57.923Z", + "postProcessHash": "3200075edb9f6a9d14181dc62e71b0a8be5c14788665f050bc5ee9df4fc8ad41" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.245Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "541d08d59589d8d674fef4c1e5c4154b4976c9b6c2be4deb9a2e5420ca511c8f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.250Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "dad616fc7c44a99b06f7e0bedfec6e3508fbfd4b33368eb725ee361666d3d1d5" } } }, "52bc7e8ec1f25b547908a73830b6d7664f88e3007b6ea89191268490da4b6c29": { "e4376d6532a2d24e4f86f129429881de208f3ea0ab1bcb5f5e31cb841a06df0e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.256Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "4f0a70789c21def14e1ae925d1e18c00626f199c780dcfe11e004d12c510c63b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.248Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "a58320623ed8adf648396b6ef87306705b6141740aca0375394328f7bcb2dd38" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.242Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "676d55342c6fa37b2a30e79dda42e222e4b142eaad18a5a5e1a784c44fb275e5" } } }, "55d7240c880120e92dc6163e0ae953ba2e5f00fe1352161637e7b7057888a3b6": { "d8020d4cb0f5381e78c97181bfa0e7bd2ff6585f606db5db616fcb0afaff7589": { "jp": { - "updatedAt": "2025-12-02T22:57:45.243Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "1289a1a1da9a5bdb46791e920d5705fce21840b5916197eb420c6423a27d3f05" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.248Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "d7e1635d4d41141e40e1704d9d77e9e77484a1131ff5897e6a0edd9ca9cf6654" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.246Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "e7ac6012c55217cc76b01be4dbd89db584781c3e3b1edf90071a39c1200fcb7f" } } }, "59b41159dfba51bfc26167978b1127378d106b8d443bfaa28a298294319587b0": { "c8e3b18d83b85dea1ed5df57b3bcb5d76702cc3807eb0d8ccc3a2a6bcd46acfc": { "jp": { - "updatedAt": "2025-12-02T22:57:45.241Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "e67ac546168913b1f53633f0395feb870b498e561ae836f1782b2758d28f21e5" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.240Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "a16be56f9cbc0307968b323146d9d96c6f6bf9480c3e2f71fca5ee833f8323c8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.244Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "2726fe821b9e6770de3498abec06903396e63ad35b4e572f286b41f6211998df" } } }, "5a412fdae8fb53a15204e66324bb2d0da4e638bc75ac56e67179382d206d7374": { "02f14c2f65f281503e41f11f04ee9f6cd6ab49c4babb7d84453226444e626ce3": { "jp": { - "updatedAt": "2025-12-02T22:57:13.239Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "b36939c1c095d5f9db64b50ee763a7e4c5e090f58a76db743a1b22c348fb1275" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.240Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "cba825dea2cadbcee154a06d6d027a523a0cca4caf6fc4a502a1075721d314f8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.228Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "bb13d47507cae46fff9db7678162017e6f1095616553ab5645c1a63f2ca64679" } } }, "65b791b7c4a125ca183cc9f15c013f5460cca336367cbe0b2dfc01f119a90d1c": { "1a1f03cbe833217e0e2c1ae7fd100d78ebbcc8c0657e571385e72c88889a8da5": { "jp": { - "updatedAt": "2025-12-02T22:57:13.242Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "b84abfe9abbe8379aab76e95cc36c62a9c92e8284eaf593bdc29ca0697fa78b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.243Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "41250871f25032f498b6cde08b68f462bcee6b11b45bc76649351c94e0a2fdff" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.232Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "4ce8e506cf60860f0c376cf54ace478930d2d12a3915f95121d25fb54257f9a4" } } }, "6fbd798e9fc4be572840b3ebe3124e7c1982606aa96d7b42be53bd6c1ee9676b": { "123889af8c3d0c2ab264480c584493f0491363fd067fa94edea8459b1555318f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.247Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "c41d686d84d7e82af819622013a54cf91a55c7bc98438b76110339d07a6bebb1" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.230Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "8afbd968baa8f2b7588186c2600cc9497a4b0d5068fd897eaf20b2be9a3bdf85" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.231Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "823d672c357b5cefb3c1252c7e4b008c0fe130f7390195543c0a137d6e18e9ef" } } }, "a44ac107c2f03ea1cfc68d15bea4e84005ab3111943ebc6245e22ba05bffe8e9": { "30eb0a47b3a70c9804063775a6d033975254804002f913220b776bebe7566da8": { "jp": { - "updatedAt": "2025-12-02T22:57:13.220Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "db80899e8677e8b7cd296655e7c30064b55314ee85abeeaaa79fd777acae0452" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.231Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "ae58b24cefff500895152ef4f760ea0cbe44eeb2b1df87d4c1d7dbb43fde474c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.224Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "afd24e56e05d3e278520ff13cfd8b0cfa7b33c7ded8e421370b913bec8ae29a2" } } }, "b8b5935e6157dbf3000442e0ae9da10c119186446dab9b0b6ba59ecd8e081b43": { "3aec0cecfebdb1bcc89b6b5e6d7edb63838928162cbed60f94e123b0001dc3e2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.233Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "42d28a1f6d1dcb9a8c6ef8238d2200eae5c104f4c668b38911a9356b0caabf0c" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.238Z" + "updatedAt": "2025-12-04T20:16:57.949Z", + "postProcessHash": "ca35e36525bdf9432ed7ad3e00a3749842f286722a5f4aefdcc1ff6dc3d767ec" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.237Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "4e5838c149a6cbd29ccd166406eff21175b2472c6cb214758adc0d56721e84a2" } } }, "c2b6b4b09ba9a1b69a2623b9e76c0169f2800d8215a3a24ec9aaddb566e07410": { "a509a683d08d5f5fa0027e4566599afd99d8661f1932316929ed7b7f5f1434fc": { "jp": { - "updatedAt": "2025-12-02T22:57:13.238Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "ceaeff49976cc1124bc18a08d46c82592131f1978205b3ca62c388c091dc0969" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.219Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "53fb502465af9ed2a9c260b2330a378a244c3bc009b8572aa786c75d52d38f3b" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.232Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "d763f50709395f8b2b57edd5b1c5a47ec79a137ad61f77a12fbfc948a2dd58b9" } } }, "c78d724ce19757f519a89ae81413bdcf8c707c62709608c1fcd90f8f2ad2737c": { "ed98e153a80901d835f37a02ef176c4789e69c4833533e0096f7181d92ddda23": { "jp": { - "updatedAt": "2025-12-02T22:57:13.240Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "122cc6a693135cb41fcd89d1c2d06ce9bbf0b721029eda2c882305e38dc538bf" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.227Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "7b85e9aa329fdfb40d37b0b12549cfe0a311c394b31604a61461e2a8ba64e7fa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.242Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "d5761d18e32c91d07dad12bc89721a6775683856f6980c2aa3dff4ef56d59b4d" } } }, "ca734035b219f4714c9e6c2cdca7a1904792cff5ed4cbd21e39a0c5b2a486565": { "73b78bfc9381e1ef4959ec2997ac7ae0499ef6be647ea0c493a48b57261785b7": { "jp": { - "updatedAt": "2025-12-02T22:57:13.236Z" + "updatedAt": "2025-12-04T20:16:57.912Z", + "postProcessHash": "40d76e963faee1adeb008767b59a9d2e513836dfddf09194a06c1f4d5dfa132e" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.227Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "c4d8be8468be156e31fa69788bbd0dce8511421457715964470ca2928dde8743" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.230Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "6774fb80354fae4bf2121a6b6b494506baef2aec3545a22f3aa2d2753151c78e" } } }, "d7e629dfded6aa789e7b13dbe976a72e204135dfeb9119292f63ce16cd39473c": { "995d171ddfcf778e23a9288af9f2f3b5372f8ce14a4ce8feb377503b79703cf2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.228Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "5388d433ae54148ab732d974e4489ded32c5171e1cd93f1a4a64b5b54c3fe81f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.231Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "a5b3a2dce520aca20e1875fbba94b102a043df22fb5869065b599775e7726fed" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.247Z" + "updatedAt": "2025-12-04T20:16:57.908Z", + "postProcessHash": "40b6400294e641705938f93ba2ed244ae67a9ef8aec20fbc8a8452488e648cf3" } } }, "da9d4e8b0bdf930b4854e634849ad3b851aaff67143620d95a9ae1e5cb3a7b9a": { "bbba21070424707e5a6f8591bd4bfaa20069a36dd6b196fdc7050d7a1ab8486f": { "jp": { - "updatedAt": "2025-12-02T22:57:13.215Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "b8ea3057cca6467dc70da699f47a64da6f588b0cd1e93483e59ada9bb04d4585" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.235Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "7101e6984112ca0b13886f5d094bbfcabbb14580b48f86822d0aeeb80c162871" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.243Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "25e9303898d8a23c6127a29445d30e93172781e3ab44b2e93866a0e62044f372" } } }, "e774d95a2c81c53102c61249027c7f00d0f3179aabfad8f71a51ddceb6505a11": { "e0d72b4c4c836c1bd36aac8338da95ae7abce2d57528db5e7d5f1ed3d95b6f29": { "jp": { - "updatedAt": "2025-12-02T22:57:45.229Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "05f57619159b0a17948f253932ba901dd9aad60c705281b8aa296b7485f1924a" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.214Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "e302a20a39469e11b1b33152054f8f29c77bb2a9e919462b88c504b6de6d2059" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.231Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "96985dbf88cebc1a9b410c2c988c0c29fe3e63e6aea7f2552c0daefb0da2c5ab" } } }, "eaef488a67183c3737450e1c070243954054aca5bcd96f3b4148d73f6a7399fa": { "893797365249d93ec499eaffe4b1ed5f848af3451b59dc62b1c2c0828602a016": { "jp": { - "updatedAt": "2025-12-02T22:57:13.232Z" + "updatedAt": "2025-12-04T20:16:57.911Z", + "postProcessHash": "3e0d8e0be4eeafbd21a89e483fde570aa88f054119baf5de519ee7e7e2e50485" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.217Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "7818a3911cced07ab48a7e6020f9a34e4f6d064e8db01a5a047dea8c99ba25fb" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.224Z" + "updatedAt": "2025-12-04T20:16:57.909Z", + "postProcessHash": "adac287473e4ffd886c1ca7b8a214915a86ff1b46cdd1821d3b15e3041b6a336" } } }, "ec49a0cee949d263027a7b97accd10ea82850898c06f8611df19e985e58a554b": { "33e16cb7d3af2bae2f39127844a9524539563891c9e3db379b8d508c23f9b634": { "jp": { - "updatedAt": "2025-12-02T22:57:13.244Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "9869876106f62f5ea990c02d07ea0f0bfdde310c78048cbcd1a8ec99c32507ef" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.228Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "a1da742b0336c273e603c22d853e3e9de062e9b048a538c754f17ccdae53049c" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.215Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "850da214bc48cd4c9cf7cf3471df3e752a7da8d1c4351b2d1bebf528deb41211" } } }, "ffb2e794247dc89ebed0e232b0ca7c0962e63c5651c684b4d99f74958eba032f": { "3e28ee25ce5b288bcfcc6aa247be220c6686ae678dc50aa107da3672ec9cea32": { "jp": { - "updatedAt": "2025-12-02T22:57:13.225Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "38fd00417f3b4b4a349b8d59dcf93e8061db27cc7371b66907189fac34cd738f" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.229Z" + "updatedAt": "2025-12-04T20:16:57.910Z", + "postProcessHash": "7834fa00bbeced0af131a1d44efb3b8131fabc90b63799a70bd607f037a7bcea" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.217Z" + "updatedAt": "2025-12-04T20:16:57.890Z", + "postProcessHash": "9ab170e6fda55aa5525990fd88fe7386ec0622b81b45716a9e383850162f0b17" } } }, "10bf6a851bc722dc218ed84feeaf049930bd2d7b38be10d0175a4b45da4c9e3c": { "72a26e0ef3fe81a02e1eaba48c8ec2828431893b8e50ba8b3dd2152f58c16698": { "jp": { - "updatedAt": "2025-12-02T22:57:45.255Z" + "updatedAt": "2025-12-04T20:16:57.946Z", + "postProcessHash": "b67e314dc7e5e94b4d1a600dc8b4a9502fe2d8711b56b4c19ead717be608449f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.254Z" + "updatedAt": "2025-12-04T20:16:57.923Z", + "postProcessHash": "af1d2a37f9ef1545f6e4ee2f4a151a8775b096d267b2ed38f02d71fa44a6318e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.242Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "9a81c2fac1ec6fc5698268ebea202b6e2c0092ad5cfe99f1b3227c5b3edce00e" } } }, "2ba4aedf1481fd714296b22477ae890f08dba4b0496e12c98e62fe2811b6431f": { "e6c19e03fd150258214beab57caf618b7ccc0baf4e6d85d9c67796cb3ea9fd44": { "jp": { - "updatedAt": "2025-12-02T22:57:28.878Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "1ce4a83ce9ceb2a62e1ffba36f2c00b642b8d7cbe45101644c23b95d02afd54c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.875Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "07d15be89228b5dff4901d85f02fd77421cb13748c4b497d145b82f4a5a4226f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.875Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "70fe15c022d7e5edb65062a2899f6e4c03b5dfe26c3641b393125ee635cdb642" } } }, "2cbf8ac76941d9ddeefe32e9f176ff03397d09339a8d40eb2cfc57efa00fc1d7": { "2d3d7395ba3898aa08ea4bb981e7bffd7607a25fc091046d7a6a359bc9c589ba": { "jp": { - "updatedAt": "2025-12-02T22:57:45.249Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "3722fc52a2ae4065ab22cdf8feb8c2d9dbd29f3263e4ab7531341f6f586de342" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.253Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "e1d512395e207ef8e04a25c768f9d254549c871061b24c62523f08e1d5dfccb7" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.251Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "d4a76132e7d460a1dee73c242bfac29f39584eb7df213b52db437026a75a4dfc" } } }, "2cf9993a309ce837e0def1fde3b9ec81b984bdc367d668342cfcfe3647301013": { "f44de4bedc5c963bcfdfb8f911d7420b96d114fbac92a40412a2594ce4bc5180": { "jp": { - "updatedAt": "2025-12-02T22:57:45.247Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "5d0d9a8ac6c7c013d7084e4cbab8cb5624510b60a81c7b238c504b8dc6fe6015" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.258Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "c9b4db3b46adb10ab5ac2688ad55da1888f573542d307ed49b8bbe4e4f01b229" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.249Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "cfb6fd115e4d763cede023db093d28cfb4d6dad4b58d0075c652e1b3658c38c6" } } }, "3682e2d45de97f6b173cd748b8b4d7583b7f1420f40557e91bf935dd09b009da": { "28eeefee37cae95ff6cae2142c3e8807b596db44875ceafb1b3e3c2b4f5b62be": { "jp": { - "updatedAt": "2025-12-02T22:57:45.250Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "f91370ed8fcfbdf27ee7f841f2d9704351608b0c8f82b06d7dc71737e370bd32" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.244Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "7ccd7ce55f7941d66d4163ee04c4e9938b50de16f7f34dd37db447f5c3df847a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.254Z" + "updatedAt": "2025-12-04T20:16:57.923Z", + "postProcessHash": "9e122dc904efd71dae61d6cdd24dd4398507b418ee601c036e30890e176b2d94" } } }, "49403ebf7c98c9603a561ef10166db22cbd8708cc533f76c0feedc9aabdcf4ff": { "512f607384640e8f1cbaf19b2b517930edc16a84b9a618f37f91116a4393bef7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.879Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "b2b5fcbcf9928708a663e75756e70d039277e0d03eaadcd92ee60b153378e490" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.879Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "d6dc5a24d45660c2d627c1b5b43f27ff6566c5189ab4dc0d0988131fbb9ebc62" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.880Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "b5a7e8b0cfa00645f52db0133d948bee3543bbc31836f7a25478a8cb2342ebf7" } } }, "4c4a469c4038db0bd30d547c74475eb77e6b3c4d4eb98a9b5406301541d45581": { "32eae8f070a25e27b3cb7b763fb46241c3e69525a2c4d2ba527136f413a778a2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.246Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "96030444fecde77bd7218a86998d4a642f8789b2925bf20d6a1efabc0c68708d" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.239Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "f4a7ea046be1de3a34a74a37adf809c84f6fd18d9576a58fb54faae029d7d1a6" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.248Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "2a36463d38bf179226371c425d35e9fec64b6d93e2202df601740f92a3127e39" } } }, "5e529ee6f1c6b44d742cab16c2436b0f98d61cee3d67b6c243eb91fc94e5747a": { "b5eaa7df44d170d16be268ccac271b07809b8f738fe7f6bc1658432e3f8af2ad": { "ru": { - "updatedAt": "2025-12-02T22:57:28.878Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "9d2735ef3aa5f78e478e6e1b2252148764c4ca0fa837897822ca1f3197aedce5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.872Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "6e7a908c68f330d41a79fbb4ed880c439c07b2287b5c5f83ba2fa6e6c7bd1bf4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.874Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "e605296d5b2c979092b737b1f2e0b9684d0c124d7638b7f9695eb5d6fef843d8" } } }, "69dc87a0a0efcdc9ce162824232e0caf45af3973a79857510730075407dab81b": { "f55102d7e2ca214c7f9f0866a2bb860df9999592d3a40c6d9b97a2ca5a47cf98": { "jp": { - "updatedAt": "2025-12-02T22:57:45.253Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "1a8674c0b859bcdb8548c8c274bc46a5a6c60cb9b8b6b9c254cc046682562acc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.251Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "9aa5997be323d13dc0887d6f082a614c89012ca409f397cb953d684f4a34191a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.257Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "a287e030a8cbce7ad6ff7e3fbaf3e7579887c298206009586d27c5d8af22cab4" } } }, "7cf646c7ec8330a693b4b1f30fc05c3ef68f7af5200b4c3d5be55f5e6c627d12": { "b392f20796bafccc3efe1e80f4e6ac3a7db083acc7209c5e540ddcfe853a6127": { "jp": { - "updatedAt": "2025-12-02T22:57:13.238Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "f39796a6598b6f76fe678e5f12878633bf0cc9cfdb53855a2403d1a6572df26b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.241Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "058dbeb00bd553770b04419f99cf9e2457607c542b8eb58944a6f4bc815bdf54" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.257Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "31639d0beae3efee63d1b035c0fba8de85e9868e1ede30e4358b30028dc63116" } } }, "8b692c2ad787a446b25292433cebf4bef12b92c8e1c334682420d14be45948e3": { "59296f60723eaca7cd5a35c2a97534cb75c9c73d8715867db0a0e547de415157": { "jp": { - "updatedAt": "2025-12-02T22:57:45.250Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "ab2d8d07a8913d481abd103c31f20bdcec790686852d9d5fd0e67910fd32b254" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.242Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "8da1c3b0bbc80029c72cc233faaedb1439be2b901a8f8f1b77afcf46156d899c" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.253Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "9a8c9bc26d690d410b8352ebe40e265486227efdfb6ba98072cf61e32bc75410" } } }, "92ec8f6b08ecfb56cf3d8225b5aff3170cfbbd0aa5775ef3532b3a6f5090f16a": { "24d1012de894e965ee2332b480daaca127319bc8cedb17d9ff8c5d9d4b57de00": { "jp": { - "updatedAt": "2025-12-02T22:57:13.242Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "76b5f8ddf66a2beebe30528b0b76328db876f097d5f9382321f43c63fcc961ec" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.243Z" + "updatedAt": "2025-12-04T20:16:57.905Z", + "postProcessHash": "f56a701c0cdfbb3edbde27ee75a7d0438e56d0603fee3913c14da1206c216ffa" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.245Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "7deb477967eb0070ab3d0cb2c8a22fe920b33fc9e92d243b2ac90e4f8a3ac949" } } }, "9f34b6230075d04ee88d715b8efa4b4287ac5ef974d0bc4c4940ad96532f8fcc": { "8527ee18d786491e874ba6c6733def703ace3ed743538e924d577e8b8cf2ded0": { "jp": { - "updatedAt": "2025-12-02T22:57:13.245Z" + "updatedAt": "2025-12-04T20:16:57.906Z", + "postProcessHash": "8e76294d340e2e1113f36a5c7474f76dfd527743b04f16a88f26e64da1165c93" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.229Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "af2e42c5876527570a46792475848b388708a8a1dfec4a5941a38c11f5bdaaca" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.241Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "ebd31ead7b9a49265e285238a8b50b9291211a6620a8a4a78cdfc039ed16b170" } } }, "9f6597744edd6252f669f69c58d2636f8aa9a6b09dbc8b995f9479c4221e22e7": { "308c3f9e814a2ad27043440f48438bae8864dd4493497ab0a517cc656aa82356": { "jp": { - "updatedAt": "2025-12-02T22:57:45.256Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "79e79764cedd543692cad3a720ec3ae11a5a4d4a193bb12128246a0250c94e4b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.258Z" + "updatedAt": "2025-12-04T20:16:57.948Z", + "postProcessHash": "18f633dcc4cbfbec91d39b5fb6b3f659d9207bf4f83f102ca3616bce6b286696" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.249Z" + "updatedAt": "2025-12-04T20:16:57.920Z", + "postProcessHash": "348300fe99ff5be116735639e58599ec245dc94cefa23fe8df975ab99a602e02" } } }, "ac52e240a096d2b15ce8bfe0c48a2efac10eda017b425c2339c5001cfcb72318": { "56334f7f1fa03f9b3a42096ca5749c43c65a9573954fa56e40e339606f36c1c8": { "jp": { - "updatedAt": "2025-12-02T22:57:13.239Z" + "updatedAt": "2025-12-04T20:16:57.903Z", + "postProcessHash": "418d14d38959f7759012e6fd062d9e66f0f214908dabab01f8e22fd165beee7c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.241Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "dbeb9c2aca63434eb35a375ce6afbe0842bdbbe6a533e58afd1f5dd8fdcd37fb" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.215Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "9cba57a60cf5785a27c59ac9074b567325b4da0f255b6a608ff98c63f1d7b0b7" } } }, "ac7c945a9a70e136f7bf663953e5789b51065cda16bb4013fffa3f1f5633a518": { "79c8e3c46a6ede7e07368f66bfdc60525ced4d42f656a8f57a26ee701ec28b66": { "jp": { - "updatedAt": "2025-12-02T22:57:45.259Z" + "updatedAt": "2025-12-04T20:16:57.949Z", + "postProcessHash": "04ce7f74fbc0737625dfa9081b573386074d1a5cac11405c3166588dfdc9c81f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.259Z" + "updatedAt": "2025-12-04T20:16:57.949Z", + "postProcessHash": "68a4f0ba5f2a3ee201b00107731df1e409af127ba43c98bca0228db3fca6b4f4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.258Z" + "updatedAt": "2025-12-04T20:16:57.949Z", + "postProcessHash": "ff6b70345579dadc751de1d5b982a40e0616e20f8527dfa9f67ad69ed491c6b1" } } }, "c36157e661a0ed678a48034a7b5806bdd2feedb466d46088c035d8bde2fd79e9": { "4b9ecaa4510afe985e77b7c0bf367ca64dcfa7463bb738f45d328855c7efc166": { "jp": { - "updatedAt": "2025-12-02T22:57:13.238Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "d0ab89f2f4c56f0c6cf6cd474c45e49404faab89fbf8aade7a9b53139c58c126" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.238Z" + "updatedAt": "2025-12-04T20:16:57.902Z", + "postProcessHash": "76ce18c038da85f76b2ad0f27a922216ff10b9b18c02cc6e9f0f790a1c01c6d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.252Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "31fa69e28ffecc9ffd9212c8179dbbee1531b0ae80ba82cb7fb3b68c4ba6028e" } } }, "c3d15c85d4784a496cd8acb62a731024d5bb9915807be3522653ec7b1167d18a": { "608f13e19408e1adf4e6688ec8886b26bf677b304247727063c881c2d33f3968": { "jp": { - "updatedAt": "2025-12-02T22:57:45.236Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "e44023d532fccbfe25b2b31afe55177162fc0bd62bf8c3bba74c89bab0f2f21c" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.246Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "1994ac962db52d4a53fd6a4db78f3d94879865d969b8ef5f11a674dbb8d7121a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.233Z" + "updatedAt": "2025-12-04T20:16:57.922Z", + "postProcessHash": "1fba3693f3c26be6a16e04a3b8dd859aa2c9ad51349f495a2a53b28e145b67af" } } }, "cd116d178423eaa55d4970d5d78d398dc1e5099ee13c6221d781e9ee5978b899": { "ec13b6563341c4b7d66f4d675ef48acbc1e40f169c0016ceecaeff7982621eca": { "jp": { - "updatedAt": "2025-12-02T22:57:45.235Z" + "updatedAt": "2025-12-04T20:16:57.946Z", + "postProcessHash": "3293c946ec5a2c45c682363f00fb87e8582c457a8ace8f3e43f3ddfe891ef7b2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.229Z" + "updatedAt": "2025-12-04T20:16:57.919Z", + "postProcessHash": "cbc3a1dfd0baa8b0956d745fcac7def0f1f0b84f7f03aab566294d84edb9af58" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.232Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "3089b3e08e0044685873547cc3997a5ad76c9541e763ea491f9cce3e4a1511a2" } } }, "d0d17f6390066626b3cd9b1b5cf3bfbe37d88dad9a6142c1db99eeec90102fa3": { "f10f076ae99bcca2c49fc911b738e76676d074aa2444ae614ac526d5065f04f7": { "jp": { - "updatedAt": "2025-12-02T22:57:45.244Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "f7a7fc4330db3938f5cb4da71fc385869ee6a5d262f62b00fc7a1a932a052e9e" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.246Z" + "updatedAt": "2025-12-04T20:16:57.917Z", + "postProcessHash": "a5f56cdb4a1a56bc0af8c98f3465f2cb1a4c1d45e2cae841badbe1a65ebefe72" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.240Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "2c5980a59b0f382b73396932387960579fec4445297f5de20d50f0966395a984" } } }, "f12a63823b74d3b2b90d31871ee06bcf19ba66effba17bcc94c800ce464bb39c": { "5f9e4fad6300cfb262a29845e8e0aaa91d2938f09671d81c5ae2b2c69f9a6483": { "jp": { - "updatedAt": "2025-12-02T22:57:13.214Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "f9810040c6cb233ee3417d84469a8b876ce9f61aa5b04aef17a277fc70242342" }, "ru": { - "updatedAt": "2025-12-02T22:57:13.214Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "5400f9cb834c2589848836aa91217e2813727288d9e7cf2403616d01b415fce0" }, "zh": { - "updatedAt": "2025-12-02T22:57:13.241Z" + "updatedAt": "2025-12-04T20:16:57.904Z", + "postProcessHash": "dcda78eba0a38c6b3370add3b416c02d1231d70b045b16bd47d8fd2d3b01450d" } } }, "15e69bdeb4774e041a333e57689381522781cd859797d0c321068053bd1ac55d": { "ecfdec0409be257ba876146227e2e778ae5f272c3aa56e2fbc1cacb35dd43ca1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.898Z" + "updatedAt": "2025-12-04T20:16:57.977Z", + "postProcessHash": "a03bbca893e0cd2cfc23e79b0acc9ac5a546f066e051ed11f90bd798649de7f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.896Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "42e17945fd539f512397c478e7ae671764a4d278dad79a4f9259c6f403f5998e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.898Z" + "updatedAt": "2025-12-04T20:16:57.977Z", + "postProcessHash": "17f1486fbf6e93eb0b2487158866cb2b81ecfea2aa032e1434b4397c3dfc0cad" } } }, "2441b704f1648bc3443c9b054ec8854f3764cbbd77801b8747d10f0c1380e055": { "8946d488f9c46e6c14fad461ca002a664b5a2d6561da01977d53a7c95d31e4bc": { "jp": { - "updatedAt": "2025-12-02T22:57:28.897Z" + "updatedAt": "2025-12-04T20:16:57.977Z", + "postProcessHash": "8f62b78e498ed7fe41789cfee52083b63c341e0933f095db32f61576cc4b4f91" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.901Z" + "updatedAt": "2025-12-04T20:16:57.980Z", + "postProcessHash": "9119a7762b5f4a7e03e896bca1b587151f478a4ae522671beaf2a24f4369ee0a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.901Z" + "updatedAt": "2025-12-04T20:16:57.979Z", + "postProcessHash": "9bc173d87a8bc3e7b4901bb1609b4997e23f7014d4ee69bf3e734929093b0cb6" } } }, "253c517a16655bd1af2910bca26a946ec5b5257507a84e5c1083bc68edcbaaae": { "383175d865a3e8e5eeeec2ad520a6706a7fe906490a2365a6c124bbbd35fbaea": { "jp": { - "updatedAt": "2025-12-02T22:57:28.863Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "43aea793b5cf8221dcd2f4a8c4cd2d0e5c3e911a6561fb339de954a4b6cddf43" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.865Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "e281bec70544a0f39febe4e9f758385e1f2ad8fb9992882e8efa6ee9b07014d2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.864Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "3842d35f7adccd887bcb67fb8f4f4b19752611a82aefe1f6fa0ba0352eb41ead" } } }, "2c3512a703d975c2b75e7502a141cd8a3e8b086796e9dd5b92d66f1f2a58358c": { "f1c375550607f160ff41977c4e39aad3343f7094f427e196bc55d8e72c22aed3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.888Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "d7ad8208511c7aea613680bf796cf42fda0a0b0ed15632825e46aa442b442fe8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.894Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "bf073d8e07ad0da3827549db6b8db6e750a01a12b27ff409b1231c5cad41f944" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.895Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "08cd4394425921666998d9d59f134304cbb5aa8acfa8a4d42a9c7bba4a2a7444" } } }, "371cb4852709d9ca0ffc244925c1336472d7b3607e49eb600409ac2634d29c9d": { "2c08ba9df01012e99f6db6d87ed3274138d3991bb7ef1df26cf943bbe938c83c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.885Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "096f16e681336a2ea06af0c13920feaa0259ad024599aced6a5ad48156e973e3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.891Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "8188d4725789c52fd30bb0931460a7c4c15860edab7d7a34df46e771292dd2bd" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.888Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "89442a5a1d2c716829a8486224becc217c1e2c21a3f1ab5cb0c6c8f679524152" } } }, "38065e7c3b022c9edd666529a176fb393cfb28490dd15161ec6ac71c2d9529db": { "35e6467692a1dada24e738d0c85e6530cad77f3c956b13d30d9734eec88985a5": { "jp": { - "updatedAt": "2025-12-02T22:57:28.866Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "e5b0a9566318bde340263da9186cb0126efed874aca6d986b671f8e0803ff73f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.868Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "b6c61470e8bced13b70862917e96d49e87acc10cf73b346070221ab105b3a065" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.867Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "78036361117d1443c66d679ddfc8746b0c3ccf4bb1dac9fb3a19efcd95dcc27a" } } }, "3c1dbc013406b1c31a215c47a6b9edb7f3dcaf68974dc2c38989fd26dd392af4": { "54d4adf41787f75b127c52923ea0abbe3e269714267d20e9e3f8f38afabbaf56": { "jp": { - "updatedAt": "2025-12-02T22:57:45.238Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "7d15bf1371fa44c6d79c4afc732bf47c58f481c2ab0b4c11e31d24aaddacd9ef" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.239Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "be55a85cbbd1c824bcc6c722b07a5053912eed0618093602fe6f47b9dcb919b4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.240Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "e32cd339fffd144ed0bbfea1e68ec0baa7a383d6fe11c423fd31a25cf6d2492b" } } }, "3d0840c01249868fda2bd1e95b3f042cdf2c618bd34004df654106ee3d7fe77b": { "abd6f88511214360a8b3d4a7acb1e68208916aae6edb5e22025418320d437381": { "jp": { - "updatedAt": "2025-12-02T22:57:28.902Z" + "updatedAt": "2025-12-04T20:16:57.980Z", + "postProcessHash": "85bc529fbc04502daf2a78c397ab1fd3ab661a54f9121319afe9fe1b5adb1417" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.902Z" + "updatedAt": "2025-12-04T20:16:57.980Z", + "postProcessHash": "3b999a83f48325ad7464dfba413f252ab0356c40cb865a2eba33964e50b7609d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.899Z" + "updatedAt": "2025-12-04T20:16:57.978Z", + "postProcessHash": "538e6c09feb94701871a4f30b31c56e2d61c90ea913acde234ca18c891a078fb" } } }, "3eb17266fde17cf983c1426830939c4712a727fd7eeca3116f2fe348d7489f01": { "d7d5ceeef5f34571ef1e4827cc0966f80aabd85dc08e22be3a3583aa8cbe8a2f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.894Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "c35dbc5e683229e1a9c257c36cecb673cb533f68a7a0bf5d94377b3ab87e0cf6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.895Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "ab21c5d536783771bab280754e2184fa157b7fa5eba452ca8db4c5b9a8ce0e88" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.895Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "44cde42de95cbe4da1101d5b2d3663c95fa8bea423b98284749132e44b3ba039" } } }, "6bf7c7b51f6adc00dec7d08e30d4d16d28e682b5d83a2a9112cfe37d49b6b1ad": { "3faae72ad8b1f70ba0b49e66e434c0ca46525d70f145c05758337bee07817ae9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.870Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "e1182a5db4208222fe15925dda4a654183f3a52c0d2c9f53478d3ab0f9d894f8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.875Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "0775977bbbd380da3ed7f284d2f48f946c39ff6118b563d7e0860388a6235875" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.243Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "25181e4fa5c3cb9319646e1a063aaf2e20853d2600aeb85383b453803e378180" } } }, "84bcc067be4c969ca78c33fa50f4efff4f2a2daacca3a415c5c86d0fceedd5ac": { "2eb8e19e71aa05266f701be373a387f43f2c6751db4a43fdf67169c2efcd862a": { "jp": { - "updatedAt": "2025-12-02T22:57:45.255Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "559fff0530c2e420ae7899c767f6d3383c92b16f2c0375a8058d75486b346dc2" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.247Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "fa78487273e51d713ad71633e3db7ec910f393faa4a4e8aee1c099c4b6dc64c9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.247Z" + "updatedAt": "2025-12-04T20:16:57.918Z", + "postProcessHash": "d33e83d1d0fe44bb86db139731cd7e30935e1dc292ce15ca994e16b9ea90317a" } } }, "85d48d85dd722310026bcee5e22617e344f2aacd9f8e9ec67d816fdb2703a37e": { "92cdab1f6b712fe93f35828375006e26f4c9671ddb601b08780bfafa9a16e196": { "jp": { - "updatedAt": "2025-12-02T22:57:45.251Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "3d6b48ada141a87f3111b73b43f409814b3e103054086bdc8bb2bec23df17659" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.244Z" + "updatedAt": "2025-12-04T20:16:57.916Z", + "postProcessHash": "3c9a0cdb268aa82b52d9a5edcd31748db9e78ac0dc3edd557728333cb1c00302" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.256Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "ca83d40fa47d4c27ad9fe958e697eab41d6ad1e0ef625bc90cd5d204952f4e44" } } }, "8d8defb12045ea6e4b617d20e5212582181c730d58236e675147eba18be53d95": { "c53f9e7ae5db8452601cd25c2b2d9ef7eb21620b4522dce992bc50fa2ca137a0": { "jp": { - "updatedAt": "2025-12-02T22:57:28.865Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "aac93fff6c3df97402d7fde9fad33b164adc39afd83d65053e66501dc50ac036" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.873Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "0980609b75e4f836dc1632d414e35f3f1d4c2aee9aa8e96a36d3563408aaacec" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.867Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "91d1383fcca67bf170f975e1e97172a5d2ab495825aa4542e42848a44bc36ea8" } } }, "a5236951d982490ee0af310dad8356d6d6153f403e1ee58f4ce2f1c0eda6a81a": { "c1b636cd594663b0ead8b055a758d770ff99552ec72b5c80bc4f4e7f722236c1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.876Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "de4e9a14e4e1f6616fd2700b7ce589c60739b3e6c62e6cf766e6892d9b2450d7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.866Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "a93f02c1e27240b2f04444f5fab55783d4b23eb9f3a7d40b96cdcf220cbe253f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.870Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "78fb27dfd605d27bba57fa35520f2bdca35214a271d7a4db04bb0a7908c3a6e6" } } }, "d4c8c149a2085ffd9c567e330ccc163bc309990242e7b28d9b404761f935ba4e": { "37cd2110dc9673e6ecc3c129fd27e5e27a8e403857f4a2d17738870cab29a747": { "jp": { - "updatedAt": "2025-12-02T22:57:28.871Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "9a9dd94f0e695a6a9c72468d5ce2dfe05dcedf4211088f624ebf68a553222c55" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.876Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "ba33f9e3bf0ac8f7ee449b01aa97d88d62447cd36bb6ff214b5f3503f9cf9211" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.878Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "f05544f1c281ba16e0c2d373c20c34513c6ece3aef4b0c66a5bd0d8ba260bb2d" } } }, "d9be63b990bb973f2145b0fede5008f532e3efe16cc74b19670e7c30fb33cce3": { "6520ef784c8cb65030b31629babb751b59c90c4785704dd342ccc7196be05ee1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.870Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "405e479ae2e28c7fa56238cdce80f48aade70aa71b6a76fecd7ff044bdbf341d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.868Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "36479702c28d41b6ca10ad00a0d49ee353d8ec9b6dd1f0efd5f735439be5963d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.873Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "72da0ca21be7cce62cba92c6d1afe11f692f011f1c9f4c7b05d6ff522caa0c99" } } }, "eb49ba497d8db0f37c1298e8ea9f8be1b244b82d159157e8ede112df8f3c919d": { "4b16adf3d0e0aeab42ce3ab01c36acb9cff5de72d7b2802148d15353f359ea9b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.871Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "857024e574341293293eb04f891a61dce21681dce4e8f64a1e3937d01a3af76c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.870Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "58e0f030bc8929f072f5deff5978b284cac3c8dd23bf3ea2d1399657f865040c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.869Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "57848e10599e7ccff1322b40db22559bedde4331aae5227f963fa50f293e44f3" } } }, "ed2113745ac93661c6152589c4303163561a52fecfcb50853a532d0c4d3c4c8c": { "91a36f6307074f27f0253a1a697372b4dbbadd48aaa0cb2381adb6ffad7ec3ee": { "jp": { - "updatedAt": "2025-12-02T22:57:28.872Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "5417ee55dd54a625f24fad6b3ed7da3088d2c769d157fb09f00061bc0872dc17" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.873Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "f6091e046a80bdf89d0a5bbbc23edccea6c1844140a7321f12c618e313cfc766" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.871Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "0ca2897bf935e1d9db3fdad309eb70073c61f66d2c4da2cf2fdc507c372ad2a2" } } }, "f7ba33421a28aa3de7f23177b5e40153a4f0e0efc37a2106a3e8b5708fe45005": { "4211afcb557ca12ed79b2828ba3000b6bfc93501ef7266a7012e6f73ca63a27b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.868Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "0dec70d780b4d80f5c8f8a862938ee148db5dbdf69c68b4cc46609acf1b87699" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.875Z" + "updatedAt": "2025-12-04T20:16:57.961Z", + "postProcessHash": "208734603e447e0f5f44669c876232ea99cd78245be78a57be60add29deb58c1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.877Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "d8e26471246a2324809be8ac33f56368a38793af40b731d579bc40b45145b082" } } }, "fae26c9194eff01a95214ca36a03f887f3e266e90a64a4b894ad55f02c179bb2": { "7386d025ae2748ca0b87ecef00be245390faaaae8fa265f80c33e3480d854a49": { "jp": { - "updatedAt": "2025-12-02T22:57:13.237Z" + "updatedAt": "2025-12-04T20:16:57.901Z", + "postProcessHash": "ea4507c46597c071a2f386822000ec4b51c0c0e5864086bb1649634b53db2d3f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.252Z" + "updatedAt": "2025-12-04T20:16:57.921Z", + "postProcessHash": "2a40b160a1320b043fdd890a5d4dd33215628bcf9bceaea64d5b2d744064e9a9" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.257Z" + "updatedAt": "2025-12-04T20:16:57.947Z", + "postProcessHash": "40c483395c3e7c8ad3a0a20970f5208d0775180d2a017c93615d7785c231aca3" } } }, @@ -21354,637 +26119,784 @@ }, "ef588f2b6385c55726c920e57be588ac227d274976872debd444eae9c0c673b4": { "ru": { - "updatedAt": "2025-12-02T22:57:45.240Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "139f1f8ebaa1b4f043784be40ddf9daef8f3a3f45d35dd21bbd7f2e7f17ce395" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.862Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "eef307f96f009426144636796ea74e077a85374e001f8e1d96d2bea963cdd1cf" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.862Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "0a81e0d72c5b01b4efd4b1253cd8bf7144f47e3921da750fff0b7df58216bbf2" } } }, "04c615906de14bff138af4cdd85c3c07b4fc5433296761dca010e8ef60f78e93": { "91810a26e7bbbe9ffcd2f092006cc98930eec1fb41bd4802d4297bf1f45413c7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.902Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "2a1090308e19f6be11ce6b386ee6ee0823b5f5f9f9b7f535961c5382cb3bee64" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.930Z" + "updatedAt": "2025-12-04T20:16:57.977Z", + "postProcessHash": "1134a8c5dc114979dbbaea6953657bff6e21c94ef9ccef6e998dda29d88894f8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.916Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "2b29f96541147fa2dfff41017c4d38d354afa44612ddc93ceb6e2708ace74199" } } }, "1580309aeb8bf89a02431ce4e3958695fd0114d89488a627aab1a37097044adc": { "a04bc210be5bcbbe776786b33eff75770784c182f110822abfb00ecf17ff032d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.900Z" + "updatedAt": "2025-12-04T20:16:57.979Z", + "postProcessHash": "3752c7865eb0b53f55216ed4cb69725772774425e14db91467d5ed492d28dcd6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.890Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "b6c4aa915e125d3abeade6a8b010ea7bb778a9c888dd1e33719cc5af04f43261" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.897Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "7ebfaf32045ba4aa2a7ee03731f510b1c3d201fc6ae2e8fad44b1d6fe6309894" } } }, "176150c0e3d077975a3bf364d1abf67e535d6c7aead2f176b61c34aca79abd59": { "844838ff96f065aabb06386cc366cf66f183135f983db2d969bbf61b47c89398": { "jp": { - "updatedAt": "2025-12-02T22:57:28.884Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "dfcc5498de6dfeaa3915e95305314aa56a44319eea291164fafe7245448c0483" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.883Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "7f720cc301dc8ad23304ebb8a6ed16df32277fbb39a229a9a794a314e86397f8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.881Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "a45b55eaeb7a3f687962780cde27f9a04639513386923b7e5473528fbc03a14c" } } }, "1a55a8d8cd9d21c74eaa692dca8aac6491f16ba3aee28f43616128e2d9ef200b": { "da55650acb4be1e891fe2ae5f1756740a01821cd992f3a8ca4695951fa27e52c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.886Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "effc9bead930e7bb1eca17e01c80d50d3453906cfd5e3be62a1b08c579a95917" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.884Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "54f13f1f3e6e9c0a48f2f0ecf2662c95fb214782fa0239f41fb2952c33561624" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.889Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "28f1708a9b997a6253c73986620664075db4700929e26a80df9ecb8615007307" } } }, "1e61b3b890446ac615cfca4d43e26692a7bc7544426233b862918b5d3fb722da": { "68327a573af2128ef9f8b75c6d3764adaef0d6d6a2518cca36e25acebd3d72ff": { "jp": { - "updatedAt": "2025-12-02T22:57:28.893Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "d8ccedaaa4b0014d9ac7b08a3c8ebab3a9c06ed4eedabeed8ee0f8b04671f6ed" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.892Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "dd72d0a86dcbbf6717bfbaf93203244bfe1ca0db2d99c44988868099ae307ed8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.890Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "25005b34e41bc623a47e2de56ba027387731ece9827e2061dc9a478b4bc9c6a0" } } }, "2365f342aa73537207eea12c5ea5e59b84982495f018fb65d762d8ced77d7432": { "303a2bb1adcbfc7e719c1aac71a6de6454f8a1ba771cf607483f97b277db1bd4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.883Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "e9a3954d8156e504747867473f52e9ca2dc923c827454def4fb1fa39a3e5b168" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.881Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "66ca2d64baada5fb5cafa50a8a82d0b85a451c114aff8e0e505706aaa69ac23e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.920Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "e81685ad9f2005b647782af40003aeabd47d1b81ea164c60347218ebe4b10658" } } }, "361b5b1d32de2ebb3e52e8460adeb4b22ec4bc8ca04ceb0e717fedc703a31195": { "10b62158d3216eb8065dd2ff7515e8754275c4c7f5c6d4eed8d2ede3b37286ee": { "zh": { - "updatedAt": "2025-12-02T22:57:28.926Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "d810624cf7423b29ff4d81c67231b4fa0491a15ad5ca328d0c417dc74ad36607" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.927Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "a3242824c76148e6858ef4eb30666ebafe25d4f252726ae0c6175a4d8f8e29c6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.927Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "2d7216a06ef21322ac07fa7399875ee367fc3eec6ca069398ad32ff06f787c39" } } }, "3e3f9cdd02598c16b281b93fb32c30b1be85298c6b705aa31bfbce0e5880e103": { "e9242354e112109aceb1f980cb5bd9997a81807b4b2b9ad51d2e395d6925d743": { "jp": { - "updatedAt": "2025-12-02T22:57:28.880Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "a1e6c645176e39da205781de5e5b82e52700f81cc95963109308d15b29ecfee7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.920Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "169481fca9e352f90c0c1212e90690e9a4b701b8b33a9f6bee037d757e46fac6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.919Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "f19650aebbae837c804d88a04fb43ab808e27170f9d660ecd2ba408ab2db1170" } } }, "47db68ab348b969e40c4783275dbc11e1b7c3f0d1e0f7993066d41cd80abc360": { "eb30b9830f751c6d73c63c4e71376e8e862a1b79d67ead319e3a93512cfb332c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.899Z" + "updatedAt": "2025-12-04T20:16:57.978Z", + "postProcessHash": "e275018c47ac0dbbe36610c0f9f68bc798e0b654f6e613a735ffb5527cd42ed7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.896Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "cef7ad5cd5e09daa97d9a8194a604a8b58b98ba83a2b298c85b1c80cecadf3a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.896Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "38cbc736578d66c521d0186ed302280ddb55455f3e2a5a86d89a122a0a499bd3" } } }, "49e360371f0bc0d697298f4470438952e521fabefd1b9e98218955be3cdbbcc0": { "974e376db0d1f6bc3a3c2778b18c785b8cbb420855a07c1b3d0cfb100fdf6562": { "jp": { - "updatedAt": "2025-12-02T22:57:28.903Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "7a8fca3087fb12294ffae6bd5f755bbfd4859fba68d88f67ae8475906a11a345" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.916Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "fdeb776de39e39edd8ee304e483c358eb4ab48a7fb4f81d2adecf6593f83a217" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.904Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "84fceb0d3971f255f49edfdba3398df1ccc4bf29049df35cd7945e3a030caf5f" } } }, "58cd3f4391882ce670046b8d82826c3c127fcee3b6aa2afc15ff717cd3d10d71": { "5015c123581af2b4d332b12ea65e8e6ccfdf0a8a5c76d9fab3a9a30aedfe8767": { "jp": { - "updatedAt": "2025-12-02T22:57:28.893Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "591179ed9f26d8c1fa32c8e31ece93ab9055d0fea537721122ea2707a1463022" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.894Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "97c8f03440aae761b3ba00d1ecd4cbfc5cb5f9b7593c69252fa595dbee01ae11" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.889Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "932abb07d594494ecacd915de1d9c4fb79871976556b0029797bfece1ac15346" } } }, "5d6ff265e282770018f2a3801b1d623cdca059cd587edf9408ad75b7f0427f29": { "7bf23f00d17d99986e4f0927c2dad27c8d9b95293b0f84a3bd9420e9a2cd90c4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.868Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "d9ce87c4939decd0e700415f899f83f8901a3bcbe315ce65b6119467479cd426" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.867Z" + "updatedAt": "2025-12-04T20:16:57.957Z", + "postProcessHash": "75eb8d64c4561bb70642e8e7f3b0a17a29702d7ed5d760aa489aafe4a75ffaf8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.865Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "5d510d09e1c66161714c4824958e919cdd8ce3cb73c16132b745a3c275a0099a" } } }, "92e00a40688842f014f868586a36e069a52b6ebff0afa9668aa0116030f149f7": { "507162d0c5f858cea0fe1b5f4cfb599166143072817567489682c950f1313b5a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.864Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "f1c5f7d435ff322b01d3cdd0aa445f8471fcb516056f9258e49a3cc06aeffb7d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.866Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "dccb52874d3bd89a3767117080eb13a26a60e692dcee7c62f36a8d975885c4bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.239Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "b4aef90cb7abcfaf79d16b01ce5094b5ab27549ef2cfd0b27011e1e803d1a0ca" } } }, "94305f7921348204002c5fceee250d8a329b22e97297f5de432297ca6b6ce190": { "68e6800c1c85abed9956b13cc6c1349b8178fe6cfb23ebcc8aa5475efd99f8e7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.861Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "71c24855c314f95e77bd52d5b5fa4f3681c547c5c9746488462131c986893156" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.898Z" + "updatedAt": "2025-12-04T20:16:57.978Z", + "postProcessHash": "222389a28cdb74644d904b69f237c945cb5b0ec2a13468ef137ea85b70db676c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.891Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "642ae1e256456eacb5a1f4f5f5c968be83c8279208fafdb34bae7b84d3bfaf9d" } } }, "978146b52bf1385e45bd326ef044217c2dcdc8bb47040c12f8ac16274fa8addc": { "229b20a3b9f2e01d63cbf0aa22d459b44b4535cff9593d53b6edbfdd28847fdf": { "jp": { - "updatedAt": "2025-12-02T22:57:28.885Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "b9ca39067947cef786dbd137409b88594b9d2f32977f4f40ffa8b59aa773fb6c" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.887Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "bc00c70892679b9adc8a4223dcab7537a42c1bcde9b98bbad7e5de779a3b1d93" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.889Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "7ac8c3f71c8511ad69ef17a67ed998ee9d4fab1d190947413c40a9fe7bf707b2" } } }, "b65057e512e1d5ba2b482da61eb41e96a0451b3633379d8bfcd74a74bc5c5255": { "d590e32dca83cbf697fbc724c2b52de9f53b427e55f5e82add0e7c98c670b72f": { "jp": { - "updatedAt": "2025-12-02T22:57:28.877Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "cd3957b6edc9174fa530f6540701f7de4ea76598d72f34931de299793e0e5363" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.877Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "e1f8b2419bcad2b4edb12090ff5a2b9290cb16d1a3fc70fc060aa10c4d1fcdb2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.876Z" + "updatedAt": "2025-12-04T20:16:57.962Z", + "postProcessHash": "259d58e26c6bc32d85257157d3f93bfff6a2467960b0f4d8998049f7bd8d0ac8" } } }, "bbc79010b259fcfbd187a6891a0f4fb7b780904c181f0266b6753f4d179bbd0b": { "9124cca07daf9271adc7984d01efad4c1a6d47441c45c6be540d3204e5502916": { "jp": { - "updatedAt": "2025-12-02T22:57:28.863Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "87704c635095c368e5f89e5ed060240042cc2c41f3b91d595e2f10de09f53111" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.863Z" + "updatedAt": "2025-12-04T20:16:57.915Z", + "postProcessHash": "234b7328cc115ab404dd2808d118530081f93e0319a6d00f93a54177e379a11a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.239Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "0ec7cdac4c09347144587a641e279b7facccbc0f3654b68637b003460c370984" } } }, "c04de4891f93a0ba91486fc9aaf76205c21818b034acf58a753695af7332b3ac": { "783554b75229a238156945270a3356288601a5016510ae7113ea4d4f746a89d9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.899Z" + "updatedAt": "2025-12-04T20:16:57.979Z", + "postProcessHash": "32043e15d00a4300bb37977e199d7ca3d5f637da13d6de9d95bc4bec3579e347" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.261Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "eccc6675486c6da863129b95b0828a39beb61bce814751b1d449c8f7acfb7f9b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.900Z" + "updatedAt": "2025-12-04T20:16:57.979Z", + "postProcessHash": "0928567d5c1e5356bd7b0d0e3068bb8cc53b42f12202862bec62273b165e9c58" } } }, "c5ee15352746ad76714767dc88162427e77db4c02b35d0258b67bb1a35882ab6": { "1e07570b89f9d1753c7c6fa5c9dc7f96cd00626361968edca1ee15a898637fe7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.874Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "9ce72dba4c3c592586466b44a6d9343f95bdaa488e54aa97200ba4311566f9da" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.879Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "dad2b1d8dc4e8992de3666ad6d01d181bed3cea27bc574277da6af804ed9939f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.878Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "8e91c3a574b898640eb2380fb15073ef9148917d621622dddc82c38ae60e13d9" } } }, "c9f381cce8333661e63bd1e01d8c4f1774748ca4686351ffff148b88e9e703cb": { "e4a9139614a7f11d3b10e77e31631df6b358e364a358b51b7e9d35e161a62d0c": { "jp": { - "updatedAt": "2025-12-02T22:57:28.887Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "705053e27bd76029ee3cda704e216687410dd156f266f6392976505d2c80e060" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.892Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "00480164cf4bd4f91ff5f2ac9ee361f4a789e31c01e5b035358be468525c509a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.887Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "87449791129ef10364b3c39e8b80392b1834f45c3c2cf456f739f33de0fbc29c" } } }, "decba6568d82bbae43bf10ae33288e0bb54460fab2d76fb910a5037c036d8b31": { "b3961ee327c6fafcf4999b1abd14b74444d3905528c75bc8bb8c2bfbefbe9765": { "jp": { - "updatedAt": "2025-12-02T22:57:45.260Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "8ca57091aeb944193c91aec088d9b72009caafa29b96d4ea385a375d7f96cd2f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.865Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "9239d03284dfc27647b5182fc083c43ec90423b9e967e9542c487d96a8d7b2d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.864Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "0ee8e82715a4b02b696033e738e4577e6d4fa96011ebd7d3717e528031a29a40" } } }, "f8499afd2bca127eb328fcbbb1d86926a4b6ed99899c57bf912940e11e81fa53": { "57d37a6031f92bd82e315b49237fe134b84352ea376fc2fb6ae7f50d8a63cb03": { "jp": { - "updatedAt": "2025-12-02T22:57:28.872Z" + "updatedAt": "2025-12-04T20:16:57.959Z", + "postProcessHash": "6493b503e9c7d7db7ae546ad8b9518ab546c59c1fbccec074965e8e2ee3c17b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.874Z" + "updatedAt": "2025-12-04T20:16:57.960Z", + "postProcessHash": "061a487b8433820dc5e9e0fb286d96cf7c73c5af12c6f8017455431a0ef8d63a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.869Z" + "updatedAt": "2025-12-04T20:16:57.958Z", + "postProcessHash": "81357eb326959a9abe7459c72e3455dd2aea4cd21fe7de5fc8ec2cc7829c0c36" } } }, "00801f2886d2097d3f3fd23c2495271df83abfb95d59a9c9a2b4a905b8ec2d19": { "20cf324bd963db14b9a1a4346dec4811329f6ebe733b3eeeaba7616399e4d20d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.270Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "2062274e89a9bf7c25c6d3f2d0efc1541cdce2c3fe2c4ba3254b87e378aef52f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.268Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "ad7a25248430104921d117f8785046fc45f16f621ff898fd4917759fe5c1d99a" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.274Z" + "updatedAt": "2025-12-04T20:16:57.996Z", + "postProcessHash": "e6569a6cf785adba8128ebaf2dd593dc6c57bb37a3ecb7866615729c91ecf8e9" } } }, "0d7f085589a701521498ae4f2032eff79402e3efaae1bf069e42f610cc1714dc": { "65b6c024a83d6653e55cb1503b9816b66a3ad761b629019961fe3f8f698afb45": { "jp": { - "updatedAt": "2025-12-02T22:57:28.935Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "cdc7cd4b6ec4dbc9ee6185c15fcded5ef93e26b7103eb330a278bbcbab74b21a" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.262Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "f43070632472ba49b38dc89f947add96ca933bdaede54ad1d7f258e06a82115f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.266Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "212fb0c4f57097656f6b239e1b0f6afe7e70e3fa56f20d6e48f158f2657779ed" } } }, "1b24b02c3b8b44ef65014e1185ac74c302c13f1cd510990f907cbfb6af75565c": { "153f09d0dc6e1710e949f8df69bcf6dddffcd2f29e7b48e271192abe56431443": { "ru": { - "updatedAt": "2025-12-02T22:57:45.263Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "bbb65618b78009fc62eec1238d14cc907afa2e8ba85e015b7690a17e2d53f03f" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.261Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "75543c2f6ca499eaa7b068a9ac524ba93e4b9aa63c1af6080ded4d7b17c289bf" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.264Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "548724a7936970faf6f84f22e6e4fd12f4d08690871ddffd5cb2076225b4f092" } } }, "1d3ae6305b61a5daa4272a2fdf5bc89befcde6b3c3cd8ac506e835ebca98d2ce": { "7cfed78448288b1e3ce81098eb348b43d832571045d5f68b5c05727141c3c15b": { "jp": { - "updatedAt": "2025-12-02T22:57:45.279Z" + "updatedAt": "2025-12-04T20:16:57.999Z", + "postProcessHash": "5fa6c43c5d42bdc0ce133a3f1dcf51886e56607350fab6c9d44a5068b3b49f9f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.276Z" + "updatedAt": "2025-12-04T20:16:57.996Z", + "postProcessHash": "24b3e741d8a1460b7c9688333f70c9a680c19309fc0008feb501a472c33edc6e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.276Z" + "updatedAt": "2025-12-04T20:16:57.997Z", + "postProcessHash": "238f6416bbf77ebaef9e5d212c05ce1d0ed8653bac812449cc66d36e53b9c4fa" } } }, "221df8cc4bd4c59f72e569ef5c6a2072eeed57f90181a227e34cf111231768d7": { "c38114543f910f77d0865008910f7e9c6395ef18ca1ffab216e250ed274cc4f4": { "jp": { - "updatedAt": "2025-12-02T22:57:45.269Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "4f5ade0138784f61cd6e20797ab15e605c1cf3d35f8f34e96de3ea8ac8d05e97" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.264Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "9761a64d4b7aa1e83ac758c888cef83c787ab44964412f14a8f7768e4297b2ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.261Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "c7dfd7c5b1e3fc9cbf147edd08072de3e9dd7680bcb70efd1e0374e7ad0bdb10" } } }, "2dbf7fe23f006182359a9db8a0997fc25605a170bbf502414f10a4d0445f3741": { "a3d059702798e7975e6104e13702831f09dab10cf354c44b13f40788c8b697a6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.265Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "21e4b25901ffd5be77b4ab569d2602b4ddbbef84b0f88352518ae9baf5ae398b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.271Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "823900b1283282d4b5275f6f9b9c553f331ca881d4a04df6dbda5dc41f6ceb98" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.272Z" + "updatedAt": "2025-12-04T20:16:57.995Z", + "postProcessHash": "48dc628dc2677794e6a054092ba912cae0653f3546c52bc480b64b7443ee9f8d" } } }, "36a66d817a53f3419052a70bb1815a864c606b97c1626029b4822b58ad82c762": { "3d820438e1d508017cfc5d486b3974a03a6f0475286a479dfda2cf575d825e99": { "jp": { - "updatedAt": "2025-12-02T22:57:28.925Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "5f0e4aba98abc592b9726e3eabbb9b43f69a85a45f87ce5b53389a47e822e9a3" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.924Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "04cfd59b8aa4ea2b351947cc409e0ed5592714b93559f88727e31871f274eff4" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.925Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "084cad5340868e39d05fab1ce8d073367dc8092b3785ce2f084ab01176e23876" } } }, "424e2c03bd385b0d797e5742bd8f37c67a6d1d9878707ee374ab10fc13b79f63": { "a39308aed08887cbbf1b7ddcfcc47a901be42991586b7b0c15366672b1a8486a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.931Z" + "updatedAt": "2025-12-04T20:16:57.998Z", + "postProcessHash": "fb6141666abe56b207fa9666b4c5df770e8d2bfda88ff708e31950018842786d" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.932Z" + "updatedAt": "2025-12-04T20:16:57.999Z", + "postProcessHash": "1518fa434b60154809d2fc7c076fd287a61b7be5b2962ed4a9647e9954d1fc46" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.931Z" + "updatedAt": "2025-12-04T20:16:57.980Z", + "postProcessHash": "3c23bfe9e39f1bfc385d6586cfd5a345a7e90968bd612191bd30800d2f2f88af" } } }, "43e8a84fbf33b51194a80d037248d79de4e1436f50520586eff76e3d3f2af304": { "f19d15b264b03da92de17398ccc6c09d43af2b4d24b0b7c5e1a05393cd4b3fa6": { "jp": { - "updatedAt": "2025-12-02T22:57:45.272Z" + "updatedAt": "2025-12-04T20:16:57.995Z", + "postProcessHash": "16a5d961a77bfaf265b5e159fdf353c7f22f4392f89aeba8f9d278264a803589" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.274Z" + "updatedAt": "2025-12-04T20:16:57.996Z", + "postProcessHash": "a17ba46561227fea7d2efe5f77cb19bb73a46c146c8ce441258265b7f8e46425" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.929Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "739f2c7a5aa3bb011da67b9ed9f60d5c3b46a688627fe5c5bb944d23d8bf7c46" } } }, "519d5b1a64a00744511c1f3e47df4f483237ba323bcad90f4c2eca4ce9a37794": { "f9c93f24237acc26028d821a685b28dcc71dc3b5ef28ed3f611cd0074fd7d695": { "jp": { - "updatedAt": "2025-12-02T22:57:28.883Z" + "updatedAt": "2025-12-04T20:16:57.981Z", + "postProcessHash": "b8a73a50f0be9b04552097c496626087e4371b6f0b1f16681756b5b1440ff4c8" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.884Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "88d237d75338bca796af5bc25eae4aaeb01972e128716c84937e8c0f74229fa6" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.891Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "8ef9d2f776046709e0ad74fea167eb08e9d1125efb24300f1121477fa7b01111" } } }, "595165a4c673965a825c2516944ed6da341d1802ba4af0d1f8e1442aba248fa8": { "8396ae84019ca44433161f57c91a29f40404e3a589100e8cca8e8000206607f9": { "jp": { - "updatedAt": "2025-12-02T22:57:28.917Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "03a3154ba4f99d31775c62de341551a0904a15612a5e106ecd610731c6c89b68" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.903Z" + "updatedAt": "2025-12-04T20:16:57.963Z", + "postProcessHash": "a7301fc43a16530f9c0772c315c12c3631b968b71e3ca09e3fb2bd633d9a43a3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.903Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "9b8d80857be18ce53140734003199f2c285f1ee4de1f4cbb828a0cefce47b3a0" } } }, "7e455500c000c9cf2e608bee5ea8ceda40748f754e86eb2dfa6fb808fff46087": { "bad6198b79924e96476294bbd990cd527edc29dacccf3bc3408a2a70258e5f0b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.928Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "7536119e8bb960242d159afb7e219109e33183f9b24941c9db2f6f6d1769adda" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.928Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "7788b3d1d48180baf7b8b1a6b2fde46f49dbf5a562ea3fd92cae27f28f88678b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.927Z" + "updatedAt": "2025-12-04T20:16:57.974Z", + "postProcessHash": "25a825feaec0d17c57564ff96ca6b2cd00227601feb528b6fbb90954f8af8adf" } } }, "976d169b47215323ef4cab38c850345f280e34b651c35ee7a506d07e901ec587": { "91662735bc3f121c2f531adc960066dfb766691e7210f186029e52bc32f80b4a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.917Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "25bf6c4210e412fe0dc57e35bd4d77f2104703c3fc971bb9e0027b363e6ec3a1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.918Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "15731bf08b4141dc85d2d9382827760110448ff2a5a2b6e3337ebf6d606e86b5" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.921Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "1a313c2f807ceb551d36fb9f6f8a2da70ef9522d668db3f6808a80eb8e3c4dee" } } }, "a2d877584716bec8ddf5f64a0ba5fd7a0a6b67f9077bed82dda87ee72bfffb8c": { "8d6d45dafb5a931c179b3f202896d1e34592ec42eecee9e2f9c96e83bc4cc999": { "jp": { - "updatedAt": "2025-12-02T22:57:28.862Z" + "updatedAt": "2025-12-04T20:16:57.950Z", + "postProcessHash": "ef2cf38acfb532a1def6af379c4c7f59b8e3e90f17aa0fafc450b94604a26b82" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.892Z" + "updatedAt": "2025-12-04T20:16:57.956Z", + "postProcessHash": "cbfcaeac9e3f730a1cebb16b7b423995a7925f5f9536da2002b314d7f1ac2bc8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.890Z" + "updatedAt": "2025-12-04T20:16:57.955Z", + "postProcessHash": "472e3fdec26971246627471c587a7ddfe9acfa5ff246a718d1ae1528dd3b01f1" } } }, "a5c7b243af8ea45f4cac1779bcbf974f63ad2778759dea05635eca542de84b9b": { "d7c29ef5219d22555b84953c119240e3967ba43e9caba2c80886d14046eb7fc2": { "jp": { - "updatedAt": "2025-12-02T22:57:28.881Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "9e8b61081dfefe4cf26f5af9fe7ccea595ca71de37b9f717eb56a4467986fdb9" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.904Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "baa386238c29cedac978290e6bf3e1b4fb39b7120ae275651fa513cba1782775" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.905Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "67385a38c74c6206e6d6497e2ce040344e9f75d1af9135a37a172bf28cc0e9a6" } } }, "d20916d14ade0ee04f39675be5d395d4a057b6b2238ab20a85bf425d1e48c431": { "1ba41582c1e8ebc8a0609ed6a4c503280d425de63584ec900b123ce79c518b7b": { "jp": { - "updatedAt": "2025-12-02T22:57:28.888Z" + "updatedAt": "2025-12-04T20:16:57.953Z", + "postProcessHash": "b37bbf8f70d4ff381bc44de27f923cda8afd8932af6ab0ccdf56f247e157dbc3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.259Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "ef09090d0b1da406f30f2d2ef88e5500baa3004a3fd50e8c52cdca750ea19497" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.260Z" + "updatedAt": "2025-12-04T20:16:57.914Z", + "postProcessHash": "47578b12eab28e0038d122bb7cc16afb06ce4397f5509efb013a7ac9e72ca880" } } }, "e4ba3f71170ffd976d7ad1047d155c73155719b1d252f0fe0608a02ffa3d64ca": { "a6ee74f4a5fa3c471abd0d72cdd9151b4614ba229d109564ac3a2e5c5454bd4e": { "jp": { - "updatedAt": "2025-12-02T22:57:28.904Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "8fdeac88a355bc5e9c999c4759031a94714f86fb830be308b4a7df95b8d9281b" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.903Z" + "updatedAt": "2025-12-04T20:16:57.964Z", + "postProcessHash": "0878fa62cad5719c555c5ad67562e732776ba2e3f9623709b80a866a17f43cad" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.929Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "1513b2feeb6b3569185823c04eb51218b668e9bd67efee9e911db33dbb61fd87" } } }, "e7b858b48d1c9c70d32c523d9dc6357d0917ee69b16fa5c6a88fd2a2cfac0098": { "092cf9506a86a0643021a3bc1abcb0426387f5124df02aa60181da49a76114c0": { "jp": { - "updatedAt": "2025-12-02T22:57:28.882Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "057de8c114d5e2565d5aac3f8a2a3c6d16b8b36d2acef7ae318230128fac4672" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.882Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "6e4de5767563bb9fb18427dbdaf2f3b76ad39a6f2d17edbe3bdb343bf64a6247" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.905Z" + "updatedAt": "2025-12-04T20:16:57.965Z", + "postProcessHash": "fd871f843763493e362e886fa1f04008f9da769b46dbc4035b3a13d31fc4414f" } } }, "eb1a1f01631b05bf1533ffd2c55c66414eb49c1741c154d4907afc8f73f7235f": { "9a41183439ccb685d921031462bb9652422322a842f20a13f989ee4825c98e54": { "jp": { - "updatedAt": "2025-12-02T22:57:28.926Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "08416915362840ec089904d32bead6d37b2acb975c31dfc085817ef110e254a4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.926Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "3311bb4ccafde6d15ee9008273ccf12f5fd335975bbed37aa0e3a766c34100d1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.915Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "526184f7801764b5f50fa68cd5f0f0f72a186a4d85b3d8fd25bb4d78b980a68d" } } }, "ecc50ef743da07587f50e2e433c891d853c4145a43e14073bee65beca199ca9d": { "e3d9d895a670833c385d032550d1d2f2e8ecc66328713f84bde5f6eb645a9a70": { "jp": { - "updatedAt": "2025-12-02T22:57:28.886Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "d93ad14edcecb069f7720e7428250c1be552c9a43c8ff2c6d9fbfaf95e08ef01" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.885Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "f6aef821614e275d5b699a1113412f5ae0ebd1936d79e8141ef849bc916ffc2b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.886Z" + "updatedAt": "2025-12-04T20:16:57.952Z", + "postProcessHash": "9c7efdd2a2cb429c34cf4f497e453d024463dde894387383ddc87ddaf6af140b" } } }, "f811cef1e60d3d76b1890136803c78a8a4f1f5c0702d5e269d8ea318cf5bc7b7": { "8ed2a0a54a6b4cc5249d9184642124cf15bfe670fcebd8151de225c2a95e77c4": { "jp": { - "updatedAt": "2025-12-02T22:57:28.889Z" + "updatedAt": "2025-12-04T20:16:57.954Z", + "postProcessHash": "07a969b74918842b9b40ad5f0930842ca3fd27f46d7038b22ef57217837cb43f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.882Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "ea1434f7d0b25aa9966cdac1ce8a014d401fdfb9dbfdef2db4615d3fedfb0eb7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.884Z" + "updatedAt": "2025-12-04T20:16:57.951Z", + "postProcessHash": "d455dd0c48bae06de0dd49ba68a2dce7f2d8fc7c5c5fea3dbcd582c0f1280730" } } }, "037cf7beb5f9d2290c0e219686b1115e8e4b773c79f541f5c81f9a4989e58cd3": { "3f6353039db49376892bd891e326535ed8f03543ad08cc2ad5b7bbbe193ee94e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.264Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "3a8d7ffaa9444e7a288d168a8bee0cc4f5d25733fa495c1df9c6b900dc125a7b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.268Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "38cdec688fafad29cb0196e7d52dcf0ebc79d468fa122d17030733a904c9f5ca" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.266Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "ae3addb58b08330f4cf9dcdb567bc37cfb5c2a6a385e959bd8ecd347d82f5333" } } }, "0a6b34520ca8168f8c366dbf6721239ffec9e0995481a49f17e32bfdf43182b3": { "d12d9428ec537b38678164b4a2d6a7eab105d1f3658778da83f05b64228fece8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.940Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "aae8639dfcb4d05330da61c726224576925db03e664398ea060182ed154b13f6" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.937Z" + "updatedAt": "2025-12-04T20:16:58.014Z", + "postProcessHash": "930b770730d099618132c293163290633606fc2c848d9efcebd7699b4cc3f891" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.263Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "5c3048791b049b2df77d766b505499c6c7ed470e015ac6ac589c3093d7a14d67" } } }, "391cd20c30f8013777a8a8300b0127fdc765340375b9fa4c319adee3b24ec843": { "c91f5ec1d83b0cec76d7a0b4857bf98e46315d814f9cad3887ee4296fdb30001": { "jp": { - "updatedAt": "2025-12-02T22:57:45.275Z" + "updatedAt": "2025-12-04T20:16:57.996Z", + "postProcessHash": "70a072707fe323afb3ac6f2839e68970867074fa091ebe73ae2c4c8444ca0327" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.277Z" + "updatedAt": "2025-12-04T20:16:57.997Z", + "postProcessHash": "bd726caaacccd38757f2982b4bc52df9cc44a8d6a863f34fb019a452ffbb1d3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.275Z" + "updatedAt": "2025-12-04T20:16:57.996Z", + "postProcessHash": "270278f2f4d02433981d96c624cb89b1553a35e806084a3ffa132dd1618e75c4" } } }, "4fb36325d48e506203d8d75bcf6b70879d8bb4bd5ac0aef7b03cf1d784b85934": { "e592ec6dc8b770289b11562b8d28fce8a2ed7c9589b8caa85832638eef552890": { "jp": { - "updatedAt": "2025-12-02T22:57:28.942Z" + "updatedAt": "2025-12-04T20:16:58.016Z", + "postProcessHash": "c05dadae1f20728daa76a1a652991a29230c7a741017a0ed4adde3ee2b6265f4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.945Z" + "updatedAt": "2025-12-04T20:16:58.017Z", + "postProcessHash": "eb24b6e7d280da615609f641194603f6156614c17156243ab5950ae4063903ef" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.944Z" + "updatedAt": "2025-12-04T20:16:58.017Z", + "postProcessHash": "bc5425ea7f0bb2e9cb1b2087317f634bc42752bbf106c263317117bb37ed2b84" } } }, "54668b892baede02e5b6a5cbaf788773fafac8277e523ed65fc920f4ea6df2de": { "0163d4482566b616b6e411361068fbb4094a1c1d66cab5c5f906a2faf1fe96f8": { "jp": { - "updatedAt": "2025-12-02T22:57:28.937Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "7395fdbd8a4d7865e0a4d9b53e89d3c4a16a2daea8f097d7c14965c6b7ee6ce7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.937Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "0a97782489196b977e009683bbf33b16d0e5e868b49aea49c6df31eeab803ccf" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.940Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "5e7c4df17c64524380cc1c650caba6350a4192f770a9f28045b4ff3f34b8a792" } } }, @@ -22002,208 +26914,256 @@ }, "babb148b7bd1d5550fd997022b79bfa7c7fc825242898f499f8001b7d0a27451": { "zh": { - "updatedAt": "2025-12-02T22:57:28.913Z" + "updatedAt": "2025-12-04T20:16:57.967Z", + "postProcessHash": "e3ac99e61b71bb4a8f3b754e5a0e0a3faac10ecd59075e256d4cfd7184a3ee65" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.914Z" + "updatedAt": "2025-12-04T20:16:57.999Z", + "postProcessHash": "3f30e5fb2f2047cfe887240f73487c2b10bb219bd1e1a7a499447b032b603394" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.914Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "10da1283692051fb2bec6fc31bd3e9d239c152999a54014a0fa4dc87fdf2c02c" } } }, "5ad0090f8bb37d66ad4ec384fd8c183a6ce6d85bd5c293bdc484cc9f40bbfc3d": { "fa3251d9fbc086f42e5d133962432d1e0e3306745b593aa2bc755f7b16f5bfa2": { "jp": { - "updatedAt": "2025-12-02T22:57:45.266Z" + "updatedAt": "2025-12-04T20:16:57.992Z", + "postProcessHash": "0ad57c45005714a50777ebc21dbf3d96788cbb02f7c7ffa0569f7f0e887b5ec3" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.267Z" + "updatedAt": "2025-12-04T20:16:57.992Z", + "postProcessHash": "18b79f24f8fe0a4489a35a2c8c06011840f2353728daf767833c520c068a01eb" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.265Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "170b108784098a488eb5f393325aeb451002811c0eae6ad44593322bfca34dd1" } } }, "67deff08df6c97036b3da071e7956e16555880aeb53c7d8ac63d1316e5f89993": { "8b19006f70430697684ec4194432408cb6d68b05965376bdeba185e83774be1d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.265Z" + "updatedAt": "2025-12-04T20:16:57.991Z", + "postProcessHash": "e94aedf66d637b6f275980467625fe2ced852b26c96348a34e422d2ca1f5d695" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.264Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "948347908e015afdbe6635bf35f74bc8fe85bdf5e88d596d604d1c4fd8685bba" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.936Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "2f58e50197f8aca8c51a76870f33e79c5cb0577bbce8a3d80b7fef2e5c87b277" } } }, "72054126de2c0ba649ef4842d3a88e42bc8fbabd3ec579abd629308399d48364": { "f53eec1c24f726e22bbfdd53d757a2f052bbadb6e11837183028dab74cbef510": { "jp": { - "updatedAt": "2025-12-02T22:57:28.920Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "db6e04c46a89993043fdc233cfc4f01614ae311ae0aa9166fde68bbb9fae5310" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.921Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "2e0345e868c14464f470b16f9afcff38db5afb0aaffa421faa3895f30bff1071" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.915Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "d2f7a27acf3c587c848427f284be7956df1daf6f1259aa523deae38633ca2fad" } } }, "79354c33a23d98f8b63fe6e965aef5d6b18cdc962e36d20a3b148d8cf335f86c": { "a1b7db6e0aac3869ff670ca64a57cc2cb592944192a99aea022777ca4d6ae73a": { "jp": { - "updatedAt": "2025-12-02T22:57:28.929Z" + "updatedAt": "2025-12-04T20:16:57.976Z", + "postProcessHash": "e05a793bffceddbfc93630ce0ed9374eb911b7e668639a3d664c025a953d7381" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.929Z" + "updatedAt": "2025-12-04T20:16:57.975Z", + "postProcessHash": "8d4276ed81e19ac6df6d65d79e880e2eb2a8c8fe2404f50b9d29bfb82ed2c1c3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.930Z" + "updatedAt": "2025-12-04T20:16:57.977Z", + "postProcessHash": "b2ffec7bfdc2c7be20b720d8c1c2cbafcbf64ba3719b07859aae6234f79b0369" } } }, "8ef8c9df9ddafcf602e81139aa8e6731772a4658d96021c4f2181a1d5e213669": { "bbc0b523bb0b92fbabe619f5570db6bf3895fcc93bc57a5e31d9f3b2110f036d": { "jp": { - "updatedAt": "2025-12-02T22:57:45.278Z" + "updatedAt": "2025-12-04T20:16:57.998Z", + "postProcessHash": "2ace1cf9bd4d631b9f16200194887e26a4121ce1026928bfb46d428f9e7af2cc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.279Z" + "updatedAt": "2025-12-04T20:16:57.998Z", + "postProcessHash": "3e48c7c0aae614a42986488ec4f3326f2d10b3ecf38d7cf64ab547f9eed8f431" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.278Z" + "updatedAt": "2025-12-04T20:16:57.998Z", + "postProcessHash": "de07d4a2b8f44b2e4bb703916d55aa0cb744bcb580ccda88a8fea1721d4286d1" } } }, "9a882460cbd2fdc9c5ff521d87a5f2d2b7ccd55f1ba81bfb3906e7ca923d1c1e": { "437e57c81c3f0872003cb47aa8df2359ae68ecc690d887ec26b6e38a740144f6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.922Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "9396d5bd50d716c07ee78824c8b338348d0c6eea75190ff0d8f4d1f70a6d92bd" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.922Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "9167e54acfdf70e9ecbbbc732c5ffce61d92be6b5dbf7032e126c00f7bf7bfe1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.925Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "d479c2fe0dc8c0ea513cd749ee41985de8b730eba039b746e694253e03c23326" } } }, "ad780b9bfd73ed606b7968549e04e8b3334085724088340ad05f2447559d540f": { "2bddef7ed07c45258897c9370efaa505180d67c313bb2d16ef2c830e5636aa00": { "jp": { - "updatedAt": "2025-12-02T22:57:45.262Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "a8e7ef842ff8960a1fa33d98d059a72c14eb538a90144a86ae34bfc538a0c894" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.262Z" + "updatedAt": "2025-12-04T20:16:57.989Z", + "postProcessHash": "ab8724c194d83d71ead6579f107c4c4c513907057048b74a1339776792cdf33f" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.936Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "d313b64bcf9a377701cd5c8d37aabca63fa293c6dabe91b593fbdb193dcb8b68" } } }, "ae79c700aca5153218493e8a943d16630b2f7ea345ab07e3105236857b43d93b": { "b1e073c8374abc5e997e5c6b5beb49db3202f0731072d2c28d7fbb0d58ae5e38": { "jp": { - "updatedAt": "2025-12-02T22:57:45.273Z" + "updatedAt": "2025-12-04T20:16:57.995Z", + "postProcessHash": "be6e10f5ad3cd913986200ba5313d56d95fd20d4f09a4c470481d65e87a275d6" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.270Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "71918a8b2e540c66ad8eb67dbd7571f6be10be9a101077f39fbb590eba7702de" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.271Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "23ff635818cc2f345d93b8454bb43e54b7795c73f9a5fbd7c41dd36fea601821" } } }, "cad443b0bb3344ed063f7aa4c7fc2b79aced5e32830119e2376d8bc59ea14c52": { "7d224b4658e83885570c772a1a61546603db3deadf2539b9ba2ed630cb97e6a6": { "jp": { - "updatedAt": "2025-12-02T22:57:28.921Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "2ceefce04ffb0694dcede58b65e6bc3f584f63af1c476c005c3ba69b0681ad58" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.918Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "4baa3386d48a5df9aaee378d023d5b47abb5bf6d1ccc3c8ded54f9d947124dd8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.922Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "46da44c1b548528a24954e49e2bb7338079bd3965c6b0bb7838a96d08e7729d0" } } }, "ceefbdcea6747301b15ae01324b1afd1ac12aa220ed2fe99add6fbe53f6c7269": { "5840e875e6ec0ff5abbf5480df1b95d85a50786763ab037f67b711d24e4e67c7": { "jp": { - "updatedAt": "2025-12-02T22:57:28.920Z" + "updatedAt": "2025-12-04T20:16:57.972Z", + "postProcessHash": "4d82cdeeb64d7528c55a9b20653eb13bb0c88a7cb0440919427d4ea49c2f3a02" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.915Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "3f229ad78931abbf0ce78e847e5b255bc3480901f102ac1225dbc77819a55051" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.925Z" + "updatedAt": "2025-12-04T20:16:57.973Z", + "postProcessHash": "beaeb1c0510d9cc44fb0e9e0f19c97617e2e171797f0e8c9b1d40c5dcb44614b" } } }, "d4d0c35c5f0beed1c59fef3df7f5bfb3c862a52553491c973702a3bc2127649b": { "57ffcbf7d6cac66182cfea77cf8aba9e7c9e489b22f114253119e9ff7f8c1f83": { "jp": { - "updatedAt": "2025-12-02T22:57:28.905Z" + "updatedAt": "2025-12-04T20:16:57.965Z", + "postProcessHash": "c747eeccac4ad64e673b826b973232bf8fda23017a0bf4bda7184a75ea1c50d7" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.930Z" + "updatedAt": "2025-12-04T20:16:57.978Z", + "postProcessHash": "6761e3a9136b3f04dfb4959659c033d7868ad6de4fad5a67856181787bca68c7" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.931Z" + "updatedAt": "2025-12-04T20:16:57.980Z", + "postProcessHash": "8dd15bfd773d922efabf72c8b6066f064cd4909171ab6fde399352cf1cb6aed1" } } }, "e14b170922435b64e35287ad9833a81f16ff54cafad9dec0721b50d4150e5eff": { "a7e402c7578841050808aadfed7d6deea52ece0e68f8352e2e942645abf29aa1": { "jp": { - "updatedAt": "2025-12-02T22:57:28.935Z" + "updatedAt": "2025-12-04T20:16:57.968Z", + "postProcessHash": "a6953d113273aaaf5f9cd703e87b279b4191d2c6925050fca0e97811613ba5c4" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.936Z" + "updatedAt": "2025-12-04T20:16:57.969Z", + "postProcessHash": "cc7399a1177f64d4266fe77442152db5e030f4423e01c0b46db6571d37853efe" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.263Z" + "updatedAt": "2025-12-04T20:16:57.990Z", + "postProcessHash": "249050770e8ba433100f495b721cfcd972b66d79aca737d1e8f4334d3b0698c1" } } }, "f613caf640545aa0661fb14392a49a46e530351b4d92bd137405952d82f5b4c8": { "d8b96ae66a4502def2f78fdd03f27807df147056c6b3fc7bc330500d5a9451ba": { "jp": { - "updatedAt": "2025-12-02T22:57:28.918Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "0e8b26dd5b1e873d9eab512084bfa901fb3a3426b8615cef9a4b8f20f803b2aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.917Z" + "updatedAt": "2025-12-04T20:16:57.970Z", + "postProcessHash": "5bf2cf700b10fd148eb1ef5c5968aeb7ce8346c381fe84963dd7ebeef294b729" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.919Z" + "updatedAt": "2025-12-04T20:16:57.971Z", + "postProcessHash": "f558a308f7e5e125364863d7ae132277ce6e42ce3062a74cddc32eff5b474337" } } }, "648b00935dbd871b726296d698650b456ca7a653fa072fd74ce364e23a608928": { "ebc9c5357fa68d5d160cb6ddf6f938a834ac5bfc24d527684b3b9feaa9bc6a60": { "jp": { - "updatedAt": "2025-12-02T22:57:45.269Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "aa393e1ab562a4e50991927c50fdbf9a901863c6d2f595dceee2b83749f13612" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.267Z" + "updatedAt": "2025-12-04T20:16:57.992Z", + "postProcessHash": "7434826143cbf1fd2618fcbf2d7098ff1d8f6501ee54dc1c692b7982e045697b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.267Z" + "updatedAt": "2025-12-04T20:16:57.992Z", + "postProcessHash": "ef7456f9afb2c0a32928a0d4f22decaad436bb7e700ae28a1ee0c582dc2d1d57" } } }, "6d063f7195776042aa3f0a6d982cef56abab4e4b689ea926e2fc79ed09f5a2ff": { "cdca3b6d03d5aff13d620991a578cf9aae185e67396d308d55838c9401281d25": { "jp": { - "updatedAt": "2025-12-02T22:57:45.270Z" + "updatedAt": "2025-12-04T20:16:58.016Z", + "postProcessHash": "bb751cc4d1e54e875c52006b54e91cd5c9807e28e1bb0bf1bd7a4c8fdee6ef8b" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.271Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "14490cec8cb6f1b85cd7232bee78f8a502d315d882b8bb92ac60be619f6466b8" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.269Z" + "updatedAt": "2025-12-04T20:16:57.993Z", + "postProcessHash": "4ef36ed6a257a406313ec91a7559450737d378d88b4e0edf2ce54cfc254aa8e3" } } }, @@ -22221,117 +27181,144 @@ }, "000b1489bccc8788cf74aa6329f6c98ad06511f167f46f1b934a958a5c6ce2b4": { "ru": { - "updatedAt": "2025-12-02T22:57:28.912Z" + "updatedAt": "2025-12-04T20:16:57.965Z", + "postProcessHash": "f1a6e9f5e808e64096b5eb06777a9fd630a8e0b0613a766c022a4f1327c78d1a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.913Z" + "updatedAt": "2025-12-04T20:16:57.965Z", + "postProcessHash": "842b3a0eb92fe1f815ffcd1a3d32ab257e797edddb7ac763e54ba23a8aaedae2" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.913Z" + "updatedAt": "2025-12-04T20:16:57.966Z", + "postProcessHash": "b632dd29c7f5132925c1246ed5664daad9f1dd2673d73d4cc689a280623d337f" } } }, "99b41ad75a6b23d70cb86b644a533c095785f9bb812c802ab52b650473d678ce": { "aa16d1a33d3312895cbf47d1ede82586dfb4df0a3507111d6cc8823a5446a979": { "jp": { - "updatedAt": "2025-12-02T22:57:28.944Z" + "updatedAt": "2025-12-04T20:16:58.017Z", + "postProcessHash": "32ce42679a2c92d69a092e8b1a9c411c4905801b729f74b0290da8383e209201" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.941Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "6c169ae4d235a9c1d0dfb4562680280faa583409ffe0def6f758e109e58e28b2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.942Z" + "updatedAt": "2025-12-04T20:16:58.016Z", + "postProcessHash": "7c9c40b3e5b49e192277c1beed49ad527e4478e74e0094f006074cd0aac0f4a3" } } }, "be4a5f793e39d6e7b18691ba8685878af8c580f898c9f09efc5b93e0979b3902": { "b95eddde3a53a14028e00000ea72057696b55e352e2a30cb66fda415c9ba5d5e": { "jp": { - "updatedAt": "2025-12-02T22:57:45.268Z" + "updatedAt": "2025-12-04T20:16:57.992Z", + "postProcessHash": "ebe04f6b0e5add0052a46c4d98f037461beb28f2cea4d340dc0e8d9b671af0da" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.273Z" + "updatedAt": "2025-12-04T20:16:57.995Z", + "postProcessHash": "35062a645098ab1cb42eb22ea43f02c0ad74c81ace097c9abb272aa41ed6c219" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.272Z" + "updatedAt": "2025-12-04T20:16:57.994Z", + "postProcessHash": "9c40422306542d1b33bd23f9ecc50d931d4acc34ad63b373ab07fc00b0644d58" } } }, "c6fb4739e8e0ce948c34c03ed0f585498d9b45c24d566dfb8456926c4160207b": { "1d24888ce8aa77edfe5838c52a804ab3149a5d9497f036556a3e08576311a7ea": { "jp": { - "updatedAt": "2025-12-02T22:57:28.943Z" + "updatedAt": "2025-12-04T20:16:58.016Z", + "postProcessHash": "206c86770d60be69d78c5041054e095526a3550ca874a551f77b1fc18f589b05" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.943Z" + "updatedAt": "2025-12-04T20:16:58.017Z", + "postProcessHash": "447b1d3c7230d123b3a8612b66738be2957ac64e17db606124af37b5914d2f1b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.943Z" + "updatedAt": "2025-12-04T20:16:58.017Z", + "postProcessHash": "e52cc34ea054e84b2b867b68a6ac9a2d817ff02ed573315a5175d52f0e08c58d" } } }, "d917e72b0a533c5af5b78c94fe1c05954dfd7ee48fb7ef7ab50f924f25fd68d2": { "b98abd6c9ba813c4b4a7cd9bc3018c8d18d3b4e71c0ec5233cf5d8da0a0f0441": { "jp": { - "updatedAt": "2025-12-02T22:57:45.274Z" + "updatedAt": "2025-12-04T20:16:57.995Z", + "postProcessHash": "6d7e1e04dc4de4b7762b9c2add2cb0513137fa933e485911757fddf10ff36100" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.277Z" + "updatedAt": "2025-12-04T20:16:57.997Z", + "postProcessHash": "a25b67ac981a813e93454a065c02f474a7db097493dd107887a4dfecfa883dcf" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.277Z" + "updatedAt": "2025-12-04T20:16:57.997Z", + "postProcessHash": "8c2d80fbca315caf65320cbdb9619ebf8416e3818e4d3e39e7b71bd675cd1cd7" } } }, "e05df611d62735d38ef4d916bb8f4ebe7a8d79a8773dcc1e94584527d5291d29": { "6ed109f9852559b92ce5667c817e8c2bc706b8ada65ecb41dd89ea0a07d5a71d": { "jp": { - "updatedAt": "2025-12-02T22:57:28.941Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "b236d87529da82a82c443d416b4d3f238299db9ffcc9d45ef6c2f3318c021580" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.941Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "ba16ea7423c96bbf6ae7981c949a17edadef3b84f8b242805232c3ad44a315ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.941Z" + "updatedAt": "2025-12-04T20:16:58.015Z", + "postProcessHash": "e910f76cd143d052e6fde292752e8dbb209fc9a9650cc2f075077d3956a809df" } } }, "8e4cc87be65a0de0b75cdf694f1e368b68e721094e28ad05d1ab2af1aa7c97c2": { "b4c7e25600e2e0bab1150a0a7777cdce0d61b9c3e50a9c73e33bae121c92cbba": { "jp": { - "updatedAt": "2025-12-02T22:57:45.340Z" + "updatedAt": "2025-12-04T20:16:58.102Z", + "postProcessHash": "713a33d07f67aa81c5e94df9357525eb992cd84e5b8de4b33c75065ae90183e7" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.340Z" + "updatedAt": "2025-12-04T20:16:58.102Z", + "postProcessHash": "5cb63e2cbfc805a66153f2a0b7e0eb50431e18a659f8dd0df4e892bb4889105b" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.340Z" + "updatedAt": "2025-12-04T20:16:58.102Z", + "postProcessHash": "fcdb53f0eef5ee6d7bcf511efe0e736257a95d4c5d0341c48b91fd888eebe2f4" } } }, "9dbbdc5c5acc11dc5874d8f84c2ec9210659a18cdd63bcc17e5b9addd0e11761": { "ca5dbd38b58fcc4d7a89bbb3e287de8dd7982f758f2a8e314589026ceed00758": { "jp": { - "updatedAt": "2025-12-02T22:57:45.334Z" + "updatedAt": "2025-12-04T20:16:58.078Z", + "postProcessHash": "72aac5265ba0e946d0a5714203f68b03fa3e12de2b70363bc1853794f46c5d5f" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.357Z" + "updatedAt": "2025-12-04T20:16:58.114Z", + "postProcessHash": "d0d5c65fb4be1b4d607387f7433e1efab92bd7c402186625d19603b205f0460b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.990Z" + "updatedAt": "2025-12-04T20:16:58.077Z", + "postProcessHash": "ef1caaf6885cdaad84bb404e6a152ab1f7f0cbb3c3fb7e190f4453cfa676f118" } } }, "a1ae550295a483325655e321e7db058409614a56e29a23b67cbb7b001c387ca1": { "8978ba1f0ad1f751ccb53c78a3aacb61cbebe5e747e9d35fcdd7d9a45f55b790": { "jp": { - "updatedAt": "2025-12-02T22:57:28.991Z" + "updatedAt": "2025-12-04T20:16:58.077Z", + "postProcessHash": "7f3bcd4393f59fbfb6d61e50595b4f619303931f2fd4b1fb0045b25aa8c067fc" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.336Z" + "updatedAt": "2025-12-04T20:16:58.078Z", + "postProcessHash": "be6f2c0efbd26f47ff866ca052b7bcd3556397e5e4d44a33fc127de0f2bdaee4" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.337Z" + "updatedAt": "2025-12-04T20:16:58.097Z", + "postProcessHash": "4d17055463206d0da16a17b80df59d29714f48198e709b60d85fcf69b5a4a6d3" } } }, @@ -22384,13 +27371,16 @@ }, "544caacf28d1d13be0e179fce871929bdd7c09bb92703dd56b759837518ff545": { "ru": { - "updatedAt": "2025-12-02T22:57:28.587Z" + "updatedAt": "2025-12-04T20:16:57.296Z", + "postProcessHash": "f61f247425677a4ef3c2da3e14f2f63b3af97e4d01ce43e682f0c253fa99dc4a" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.588Z" + "updatedAt": "2025-12-04T20:16:57.296Z", + "postProcessHash": "2536314589a3c6d12865da6e8b09f893360f610a913bb2e64a9c5b58b1dd6c1b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.588Z" + "updatedAt": "2025-12-04T20:16:57.296Z", + "postProcessHash": "818319fbb58b7264bf77fdef73c74e6d47b77a0742928a12f99095e04b3a8e2e" } } }, @@ -22405,13 +27395,16 @@ }, "4123bf4754603cd137b2c347ddc2ecbf727880d70156ebaba4224dfc6513ccdf": { "jp": { - "updatedAt": "2025-12-02T22:57:45.135Z" + "updatedAt": "2025-12-04T20:16:57.784Z", + "postProcessHash": "b09f38bb2497a5e2738410acd1fa3eceecc540d3197faefaec25321fedbf9b53" }, "zh": { - "updatedAt": "2025-12-02T22:57:45.139Z" + "updatedAt": "2025-12-04T20:16:57.786Z", + "postProcessHash": "8996dd0bffd21e07932ff2d132b2eb8f580bc28a13bde7ac0f1011ccff2dfafb" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.141Z" + "updatedAt": "2025-12-04T20:16:57.787Z", + "postProcessHash": "6622e66d490958fd3aa95bf0528a886b0ccd5141311d8c9d77e63857d2cd716e" } } }, @@ -22426,13 +27419,16 @@ }, "027d2488fe130352aa6298012c4d7a267915572c0674765bb593b91d195456d9": { "zh": { - "updatedAt": "2025-12-02T22:57:53.528Z" + "updatedAt": "2025-12-04T20:16:57.292Z", + "postProcessHash": "cddc24770a56c74f224fc8a924029eb2e83f8d58535d69f2831bf97238a2d949" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.529Z" + "updatedAt": "2025-12-04T20:16:57.293Z", + "postProcessHash": "ac201609e6222d857e5f1c6f0ae325b1d2288b3ad77ea808d0906c0823c2c050" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.851Z" + "updatedAt": "2025-12-04T20:16:57.266Z", + "postProcessHash": "24582a05f2069fa46e18a7cb381f2fe2345e0ad28b3db4593ee7cc26e3bc840a" } }, "b63564ec20730ecb181b7257817dbde4f1541c1b85a389cbe3cd4e6e203c48c5": { @@ -22458,13 +27454,16 @@ }, "2020a467b74c2031b09501bd31ebb2d005e1c3d366aa4673be3ded168b7cf3c3": { "jp": { - "updatedAt": "2025-12-02T22:57:28.758Z" + "updatedAt": "2025-12-04T20:16:57.800Z", + "postProcessHash": "75c60a85252e8ac6bebbcb87449ac0d95c378b782a3cf89015b17b0037fe5aac" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.761Z" + "updatedAt": "2025-12-04T20:16:57.801Z", + "postProcessHash": "4926ee8caf607b5c9d11526c23729c9b4696ad0118d4b4ade61fabb5d9f18702" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.762Z" + "updatedAt": "2025-12-04T20:16:57.802Z", + "postProcessHash": "b32ebbb92a17c951dd50a1d149a4f823a45853441a35c933d264ff73d93c2f3f" } } }, @@ -22490,13 +27489,16 @@ }, "1e00e7ce8c07b67a72f3c30424ca0c2d930cacc231adf5a1336f323772ff2edc": { "jp": { - "updatedAt": "2025-12-02T22:57:12.903Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "d58f98a14dd94e685f166a44d468cdc1c0de1c4652f89710c26549e79f0d4093" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.903Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "af0a63611bf0bf1021afc1ccc48db0131596e4a09654a210756d9b4fbae92911" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.904Z" + "updatedAt": "2025-12-04T20:16:57.100Z", + "postProcessHash": "2a27c63a71c49cc6722c8f17686cfe9fb45fabd5c0daf4c1594727e1e296ba10" } } }, @@ -22522,13 +27524,16 @@ }, "a99976a7a738ebb33cada2f4d924528e1f6779ca2332591b2c1eaf27105ec883": { "zh": { - "updatedAt": "2025-12-02T22:57:44.786Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "b40dbbba7e989fa9cae3d49b1ad3584a08ea3cb1bb6948112e26d1b0cabb39ff" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.445Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "35247abc761d249da7c7cd1664d0eb0ec0717f3cd6840fc13a796bce7c6c6364" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.445Z" + "updatedAt": "2025-12-04T20:16:57.211Z", + "postProcessHash": "a3ae6705ce3d926d87770e140ba2b303777239252aa6fb61f48c5ccb6771a084" } } }, @@ -22554,13 +27559,16 @@ }, "82debe159b38d56f0f7e43e16823ebbfccd913c0fde77cb1d097d676eb7fedb7": { "zh": { - "updatedAt": "2025-12-02T22:57:45.100Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "5c48a6036ce8e595022a70b9e858d66352a6518534b20c1ec4e85167e058fcf3" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.100Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "0a3ebe55e7cce6d0b2ba9332a43f6bd4eaec3bb4d75a676ad183d96b7bee0249" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.100Z" + "updatedAt": "2025-12-04T20:16:57.725Z", + "postProcessHash": "8b4bf1c526076b08848728fd206d4819f32b5cf7115256d25af040ce33c2d199" } } }, @@ -22586,13 +27594,16 @@ }, "18ed02e06f16dfce881d97046fffde26c9f0db28c8ce1161a1f73a89b58682a6": { "zh": { - "updatedAt": "2025-12-02T22:57:45.209Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "b94a749587f8245881eb320e74216a80c7f65f80a755716e944d145f632095a5" }, "jp": { - "updatedAt": "2025-12-02T22:57:45.208Z" + "updatedAt": "2025-12-04T20:16:57.913Z", + "postProcessHash": "da396aae55b7ccc27cba684a3234b5302bbfcc71fc924b8b44ec47f97c396fb6" }, "ru": { - "updatedAt": "2025-12-02T22:57:45.209Z" + "updatedAt": "2025-12-04T20:16:57.889Z", + "postProcessHash": "cb3ad973df28701bb8f4875fabcfd0e62e58786f5800c5d924dca3f687c3216c" } }, "74c98dffdc89494df3b2003df2cb04d33db014bcc399c607e4568610d46b2932": { @@ -22637,26 +27648,32 @@ }, "0af616e387db07695b2962dde0bbbd92c2ccccdb78cfa45a093fafcc97b3918c": { "jp": { - "updatedAt": "2025-12-02T22:57:44.966Z" + "updatedAt": "2025-12-04T20:16:57.581Z", + "postProcessHash": "e5779be869a7afe8b83fdaeac50fca27c4a92a937316811accb2fd746d1cd2ba" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.966Z" + "updatedAt": "2025-12-04T20:16:57.581Z", + "postProcessHash": "ea77db6ccfc387f20723fbdcae62dfb32442a45fd78f30f7bd556e4c516f10f3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.967Z" + "updatedAt": "2025-12-04T20:16:57.601Z", + "postProcessHash": "a2060bc28db6d7d600c8c1c10a4fb4eb1ec63bf24e66502014a9a3b41f3282f4" } } }, "466fe68cf77ba8d2f7e6b11a62dcea8f2b8466f8161a1a4fb8352442e971815f": { "0fb852baff9f99f784eb97ea0fe1e81f329d845d7e142f0cf03f1c59b7c10b6e": { "ru": { - "updatedAt": "2025-12-02T22:57:12.823Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "a2f861b06cedc87cff28f0e87a874f0ca6810222da9f8e6f3d226dc39ee35ee6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.554Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "35ad145cf388d16f5d4a4011168a1fc7287db6205d3337996d98df0726ff1b0f" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.816Z" + "updatedAt": "2025-12-04T20:16:56.985Z", + "postProcessHash": "e2ef30245dbda7c76ba90432299bb9dba01f4246d5ec47d7fb135e93ffb2a722" } }, "35ad145cf388d16f5d4a4011168a1fc7287db6205d3337996d98df0726ff1b0f": { @@ -22671,26 +27688,32 @@ "16c5698666ea7909d9e1753e9b13a5de1a08200f19d637afa8cab711a0379f73": { "38ea5377628be1984cefdabbe1181d528ddf34276864ec19a7193979c8dca03a": { "zh": { - "updatedAt": "2025-12-02T22:57:12.824Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "38ea5377628be1984cefdabbe1181d528ddf34276864ec19a7193979c8dca03a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.826Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "38ea5377628be1984cefdabbe1181d528ddf34276864ec19a7193979c8dca03a" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.822Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "38ea5377628be1984cefdabbe1181d528ddf34276864ec19a7193979c8dca03a" } } }, "85911f3bccb6d5539862e976203980d7d51391821089a818a002e7424e1242da": { "d7b1a435f7e4fe293383e5e8731be7cd7008caf825855a2e246a89ce3676aa9a": { "zh": { - "updatedAt": "2025-12-02T22:57:12.825Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "14f020971de21060c9dbbdd97881e50c823b8decb3af6933b6786b810c784b05" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.557Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "4ca4488b6240f0969f58775cdd28b38ea19b72d09ed672806dd6066698e103b1" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.820Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "2e8935b774707a1fc1094bda03142082e4ee9cad3e7528e694913863d61304b0" } }, "4ca4488b6240f0969f58775cdd28b38ea19b72d09ed672806dd6066698e103b1": { @@ -22705,13 +27728,16 @@ "237a635525e427bffb1c840b646e1b41486b8ccabc7712217a3d66d8c582f1b8": { "727edae2b97b38f4fc6c0b0dd353075d4fe831d345dda64ac9471ceaf897e490": { "zh": { - "updatedAt": "2025-12-02T22:57:12.825Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "b1a18bb55dc19c1ae6d59cc1d7b85fd42acff628d3ca1636bfb8236189f4e211" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.823Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "b1a18bb55dc19c1ae6d59cc1d7b85fd42acff628d3ca1636bfb8236189f4e211" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.822Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "b1a18bb55dc19c1ae6d59cc1d7b85fd42acff628d3ca1636bfb8236189f4e211" } }, "b1a18bb55dc19c1ae6d59cc1d7b85fd42acff628d3ca1636bfb8236189f4e211": { @@ -22729,13 +27755,16 @@ "7f4450440bea714d4def4ce9d273c25160fbc93f8195d945039db1f03871b626": { "98ef39e86680ea8421985ec9e48a11480382a84780d7c51e21ba7c7c08ba5de3": { "zh": { - "updatedAt": "2025-12-02T22:57:12.793Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "f6d43fb3b1bf3cb544f95ff4148624ff3f59c0d98fc9699c1d5e9a552673770f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.793Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "046a9d5b6025a5438969893155368430145da58084c1a1cdb51b2f442bd31728" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.790Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "5405f106a4b9fb9d494ea8c8b80b6348537da13d5055b9c84176e33b694b0168" } }, "046a9d5b6025a5438969893155368430145da58084c1a1cdb51b2f442bd31728": { @@ -22753,13 +27782,16 @@ "96339230d0b0662c9043872f701165e62b1dd1a9ee98448c3678014c12742331": { "f9dcd7d2195374981d74d8864cbac9660f4fe55a672e340bfa424e86bd032bd1": { "zh": { - "updatedAt": "2025-12-02T22:57:12.826Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "17fc1ce3e7724e08185d6334367acd1551db3403000c885aee694b8cee1b6ffb" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.818Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "6e8878bf1b9d227317cb48a45ab9134707488c84ad6fd609253a2e8ba3d90635" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.817Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "e30256fb827d3f41b02c4a19420ee45e6f4b22c9f126bc600b0e5d0ad8d78885" } }, "6e8878bf1b9d227317cb48a45ab9134707488c84ad6fd609253a2e8ba3d90635": { @@ -22774,13 +27806,16 @@ "7b1152a9f1bfab485338afd2d917ac4d27b6ac598d4df8c416b5d34f5f2f2dc6": { "e85d9475b25d51b62300a450688edb90649a6b929805c4c6c7dc02c5c82425fb": { "ru": { - "updatedAt": "2025-12-02T22:57:12.793Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "4414fe2db8210cb07462ea37b5f252d3d1a08aea83c8048ed4bdfdf8e502b069" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.780Z" + "updatedAt": "2025-12-04T20:16:56.982Z", + "postProcessHash": "77b3f7333c54264798591573269c51eb187a6530077b8c89d00454a32b3814c7" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.794Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "a1eab45bcabefaf3193bf256217b0716bcdd3e9659c09f81d14218a16fb060f8" } }, "77b3f7333c54264798591573269c51eb187a6530077b8c89d00454a32b3814c7": { @@ -22795,13 +27830,16 @@ "ff3c9f598e696982267c2ce9a91a552bebc66583c1163dc1c4b27f82c5102f1d": { "128e8ba5fd3b5e0981c42ebd31c5b3e87b6845262805a4f4bff3b70534bfda44": { "ru": { - "updatedAt": "2025-12-02T22:57:12.827Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "ce53f996740d70842ef68af1655918f3eb1bc9df687c4df73d2d93719080f97c" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.817Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "21345561752f0d94aea8069e30373e2f370e4ba9287cafef0820ff4937dc0a05" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.813Z" + "updatedAt": "2025-12-04T20:16:56.981Z", + "postProcessHash": "aede207cd5ce2c77e07aaa05b0def28eb8fc4c9a032f7e14fee2a8f389750a35" } }, "21345561752f0d94aea8069e30373e2f370e4ba9287cafef0820ff4937dc0a05": { @@ -22816,13 +27854,16 @@ "0361e95538168e72e0cf9076b4f8a823f82bca2acba30f30499d1d7ab6a5509f": { "d46f5caa45acdc3ea0cac4ee761116eca50f70acb1faa2569b6101636d3704f8": { "zh": { - "updatedAt": "2025-12-02T22:57:12.827Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "f0a97c134d3228bd813b9fb2631caeb8537d464977843611fc44a3fd1c12d58e" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.555Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "7eff5614e108ffbe8fdbb7cb7a60ce43f1c34abb8dce2015fa7c6e289db7874f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.556Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "8b472718c3b01cc58d0348751f5b161326d90be2ff4adeed0263bb244d13b231" } }, "7eff5614e108ffbe8fdbb7cb7a60ce43f1c34abb8dce2015fa7c6e289db7874f": { @@ -22837,13 +27878,16 @@ "4914840b74cd4cd05b93446005c1a3f9b45c7e7816eb8b20c953782a78417420": { "66ffb1d1eb8cc149ea48f7ecfeda0ca180b36051bed03928a1992c631dc4c19a": { "zh": { - "updatedAt": "2025-12-02T22:57:44.553Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "2387c90b6734fabbf5e34855d9b7087a56a8459199666d9324c3f8ecb8753f8c" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.814Z" + "updatedAt": "2025-12-04T20:16:56.983Z", + "postProcessHash": "40abf84cb8f86fa1a58b9ec5523ea457c40aaf25e3b348ce068ffc50600529bd" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.821Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "ebfbd37d08e77529f334617e1c65036b6807197b47ecf072231bef4ded2bb5d9" } }, "40abf84cb8f86fa1a58b9ec5523ea457c40aaf25e3b348ce068ffc50600529bd": { @@ -22858,13 +27902,16 @@ "7c40f4e2df36269b352d83d988edf0d606726b28f6527552e7eea3bbecafdef3": { "199bb81cde4d12c23b1adc97c7e2bce05a479079d23a4bb65c6826ef95452990": { "ru": { - "updatedAt": "2025-12-02T22:57:12.794Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "8993be5bf11b6be7d72aa4c611b0e7bad5fc512439bb9d1b6e206fbbbd7cb05a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.787Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "370a91b8533a723d2e4b1549c35c6735c837f2f50c1c4d609903126372f45d30" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.794Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "f100216dddcd5aa6d5b2a119631e6cd22084bdd495347e2d2db5afc49cf02124" } }, "370a91b8533a723d2e4b1549c35c6735c837f2f50c1c4d609903126372f45d30": { @@ -22879,13 +27926,16 @@ "1eff56196650aabbed5f57974122db842d54e3093cc55755e2f4b980a957f4ac": { "598e57a0788cdc232382a72f993fe05e0d9a2ec8e815e0b23e6780d39b245171": { "zh": { - "updatedAt": "2025-12-02T22:57:44.555Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "029477b5ba4e00e1984a59313be1408fa79f1ed3c7374fc6e054cd934ae87d9f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.816Z" + "updatedAt": "2025-12-04T20:16:56.985Z", + "postProcessHash": "4adeee00af2b7dc1689945fa1a3ea72870eb8d82635d23c24f5afacdaee2d9cc" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.821Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "1b1d29c91919156ad092d3fafffbdffcefecd1464f1867980a3effbd3fd97ed9" } }, "4adeee00af2b7dc1689945fa1a3ea72870eb8d82635d23c24f5afacdaee2d9cc": { @@ -22900,13 +27950,16 @@ "3c95fa2e161d494b4ae0ef9bf3131f3b028f13b824f5b7ede9ad688d11b58387": { "904fe0150e0e8c168afe250519fee5a4c27e23da832c312dcab667da64fa503d": { "zh": { - "updatedAt": "2025-12-02T22:57:44.556Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "663fef6d20480babb22d86296b66037222d2a2650250a2874eb7d07cc37b3547" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.815Z" + "updatedAt": "2025-12-04T20:16:56.983Z", + "postProcessHash": "296904d83e2f05abd0d201b63756e4993fc070bdb04cab19f7310a5f4982f1f8" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.824Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "fb4b61ca36d8a883539ca6dce8170010ea218fcd51a23bc648986737dcd53384" } }, "296904d83e2f05abd0d201b63756e4993fc070bdb04cab19f7310a5f4982f1f8": { @@ -22921,13 +27974,16 @@ "19260fee9e23907e67f7f4589d997bab22cbabd4ffa0aa96806703a3b19aad78": { "1352a2dbb90191a61432180810a0431b454c526d658886e1c33fdb1c71cfc2bc": { "zh": { - "updatedAt": "2025-12-02T22:57:12.813Z" + "updatedAt": "2025-12-04T20:16:56.981Z", + "postProcessHash": "a935a5a09bc41067f7e9c61a4009822ddbfaeaf43ca1b00e268f2e7ea3a4e899" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.819Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "7032d2644260720142d73bb5705b9bb1dd26018cb12c421cb43c6bd87452858c" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.814Z" + "updatedAt": "2025-12-04T20:16:56.983Z", + "postProcessHash": "19e5d22dfabc6b3fdd63975fc744155573e26a0e5e36750f12a91be21edd7e34" } }, "7032d2644260720142d73bb5705b9bb1dd26018cb12c421cb43c6bd87452858c": { @@ -22942,13 +27998,16 @@ "c71190c424029f1f3166b0dc0c975e43b747cc77aaa7477e6c8834baafd715ec": { "40fb6fb53bc03ff95d4c2a5b88f33db598b6bbba4a8c8273a31dff8b7c9a3fcd": { "zh": { - "updatedAt": "2025-12-02T22:57:12.779Z" + "updatedAt": "2025-12-04T20:16:56.982Z", + "postProcessHash": "05e82ce73958e29dd50beb65180a9ece7ed4537018031676393d39297799eccb" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.789Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "35eb622d4a1673d7a2b49ac6d4fbe5151e7dad205dad7c16e0e36879a5bbb7da" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.789Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "54be6d29af47684e74d868f4b06c2de53bcb843f4152ac7556e0c16fc88d749a" } }, "35eb622d4a1673d7a2b49ac6d4fbe5151e7dad205dad7c16e0e36879a5bbb7da": { @@ -22963,13 +28022,16 @@ "3490c72ebec2d9960e4cc311de931030fc0f1de3f2421d0d2a30876926a983e9": { "20143fdffbf6f144ae3f0a848c2c4135b1dd5359078f18a35f86e5ad0368f0bc": { "ru": { - "updatedAt": "2025-12-02T22:57:12.783Z" + "updatedAt": "2025-12-04T20:16:56.984Z", + "postProcessHash": "0e900ad1be8eb2f429c4841464e0b4498a05a4709ef5a077723cbb0fbe51a0d8" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.791Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "716d4abd1d53aff3d2fbee3ec30720b9388d98d71e41c650e6551e5ee79417a5" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.791Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "97b5b0db4d259d57c1a14c378a7372983c27ab7a03505119698e6d7525f02926" } }, "716d4abd1d53aff3d2fbee3ec30720b9388d98d71e41c650e6551e5ee79417a5": { @@ -22984,13 +28046,16 @@ "df1cbab9f5b7839553ad76ad0b3799099daaf2d5817b6bc1eea8369de5c5842a": { "3a49b42cc312e4959cc3883b924f895ba1f241473240bcbd42a5ff859048c600": { "zh": { - "updatedAt": "2025-12-02T22:57:44.568Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "5f07fd1c30c64c511b3526fef59eacc817affb1338679fe14f3a924d4969b25a" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.581Z" + "updatedAt": "2025-12-04T20:16:57.005Z", + "postProcessHash": "f9d645e97cee935ded980b682b04f4dfb10ed1ef757f1c0a7fa4cacd7075b0c7" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.568Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "d942958f2d16a67f98516c8ba7d5e09365cf17b1713224474006387c823f1f17" } }, "66576350fa60992186887698075dc59ba76bb735288c2751fa40b91ce10698f2": { @@ -23005,13 +28070,16 @@ "d133c163191364466953c00a3494895f7b213291fa7eec0a3286c15ab6588c48": { "5b79efc25b16535ce983e05832f4052257d44d2790af29323a727be1048bc054": { "ru": { - "updatedAt": "2025-12-02T22:57:12.784Z" + "updatedAt": "2025-12-04T20:16:56.984Z", + "postProcessHash": "1c12f6285ada04ba5ce9852d4510890ef1abfe836ea02241cb3472a9e193134d" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.782Z" + "updatedAt": "2025-12-04T20:16:56.983Z", + "postProcessHash": "de7e11301702e7f96f6fbd025a9103d0ed8c30b19a1bb2d879dbd161c1061ad6" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.781Z" + "updatedAt": "2025-12-04T20:16:56.982Z", + "postProcessHash": "179118fead23799b810921a3e169f3bea8d6683c40c860a67c860b97a38b3b54" } }, "de7e11301702e7f96f6fbd025a9103d0ed8c30b19a1bb2d879dbd161c1061ad6": { @@ -23026,13 +28094,16 @@ "5ae13595aec14e94efae48ed27bd30882ef99ca22e926c6eecac01f4a69b6e60": { "4c6c9c998098906955cd0a416322eaf10b8ceb9a33df69bb90b4e0206e58399d": { "ru": { - "updatedAt": "2025-12-02T22:57:12.784Z" + "updatedAt": "2025-12-04T20:16:56.984Z", + "postProcessHash": "e095111c0493f9d0840646462377d2df8319e71207872038a26812e8e26fc1b7" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.787Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "832ebe1b67ef5ee07034a93035676b1d6ba9f009d34428f33f25ec2daaa43771" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.783Z" + "updatedAt": "2025-12-04T20:16:56.984Z", + "postProcessHash": "be346f653831042915548f58d754323148c15bcc68ce4d4da3f3f1cb6843b67e" } }, "832ebe1b67ef5ee07034a93035676b1d6ba9f009d34428f33f25ec2daaa43771": { @@ -23047,13 +28118,16 @@ "52f1e721b650aa5a8bb67053afa7caf447a7332e92f416526d36e8941d726d04": { "8c41257fcdc2d116e76c9a1609bc65adf58513acff260b8f2aa36d74bccf31da": { "zh": { - "updatedAt": "2025-12-02T22:57:12.815Z" + "updatedAt": "2025-12-04T20:16:56.985Z", + "postProcessHash": "12bd4e4ebf5e50aa1242bea2b6d42c8a189578955d473976c1297fe908646b19" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.819Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "630e5b84780be36656bc937645ed65fb88179d11247d1e85ea1205ed29e6f931" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.820Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "82e84b3390f9f8829658dd58e2d16c166aaf2e96fa5c3350c51960492bd5f800" } }, "630e5b84780be36656bc937645ed65fb88179d11247d1e85ea1205ed29e6f931": { @@ -23068,13 +28142,16 @@ "5a0ce1710868a408e43b0c9859a80ada3b08b93b0d26cb45f2ea004556e9d2b3": { "ccdecf590d1994e9c17ae91e353b32d2f66c08e379ce1eeb73f06a674afd8375": { "ru": { - "updatedAt": "2025-12-02T22:57:12.785Z" + "updatedAt": "2025-12-04T20:16:56.985Z", + "postProcessHash": "f6556e33a7a453d18a0af0e707e23c87b926a1ed049f3aa11215b62fd2069a41" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.786Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "a2d654d0961b9427057876a5b47403d5864939d9a0cc302f7941e73ea9093498" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.786Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "cec459b53ed81777f738433c26adec08ac1214e5d748451d10347b50bf3fdc2f" } }, "a2d654d0961b9427057876a5b47403d5864939d9a0cc302f7941e73ea9093498": { @@ -23089,13 +28166,16 @@ "9b3e13e23b506d9d9ec9b2c5fbf8b9d2a62e1de7d0175c5f6330498124203aac": { "86c47ff8f3b3666e1a6b49b2c8302b448389e1e3b41ab3b1450e055082821549": { "ru": { - "updatedAt": "2025-12-02T22:57:12.780Z" + "updatedAt": "2025-12-04T20:16:56.982Z", + "postProcessHash": "c0defa7229fee6b27c2a2a3244e13cc4c0ed6f0f4c6f6605a86a70bf36b99648" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.782Z" + "updatedAt": "2025-12-04T20:16:56.983Z", + "postProcessHash": "8c1816d77d3551c7d6dd5710ccc8274f66e5809dd3cea3606629893483ebfef7" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.792Z" + "updatedAt": "2025-12-04T20:16:56.991Z", + "postProcessHash": "96a408123b4168c543a77c179b015d8f620e6307650d53bdf90c2f19ec9ab2a9" } }, "8c1816d77d3551c7d6dd5710ccc8274f66e5809dd3cea3606629893483ebfef7": { @@ -23110,13 +28190,16 @@ "30f843a3827d19f26bae893b6a89699d15924309d3ee0d771f1309eb391c8171": { "a5eb46f97ff75367e3c2a77e86b555adee47157db34a73cbb68c4faa8e14d033": { "ru": { - "updatedAt": "2025-12-02T22:57:12.817Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "da0c38fbfabd7c396006ed747fbe0f6aa5c0dfd8e816c461ffaab1707e3a8e7b" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.791Z" + "updatedAt": "2025-12-04T20:16:56.989Z", + "postProcessHash": "655ba8e4e20f3b5f89cae3033f51649118b5face2393e69b8ed2d63f7c170bed" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.825Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "89278bc5a6e84647177b28cdc478fb16c00eb59607b5d452a4714361a554537e" } }, "655ba8e4e20f3b5f89cae3033f51649118b5face2393e69b8ed2d63f7c170bed": { @@ -23131,13 +28214,16 @@ "15cacb127be1afdc884be3ff13c61ff48d4ae41e28740309f5f445002fb0fa90": { "a9c8fa4f53951ce4026e170171a0517a80777e9037e5bb2f16eab83d3ffaa9cc": { "zh": { - "updatedAt": "2025-12-02T22:57:12.788Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "98be0adb514ab99e364abdbbab872ca95c8b60c312d3df36ed607421c9f38c2b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.792Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "c6b1ffeb8a927241e2108dbeb02a8cbb166d5b270f1e7cdf770147d6ef83a7d2" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.785Z" + "updatedAt": "2025-12-04T20:16:56.985Z", + "postProcessHash": "100a6e4170215f9fe15c083b750d4be45500a2d7d7846dc224623388d9b9ad91" } }, "c6b1ffeb8a927241e2108dbeb02a8cbb166d5b270f1e7cdf770147d6ef83a7d2": { @@ -23152,13 +28238,16 @@ "941b4aa0aa9dbadd0a190a16a820e2bcff3884350dd172d2d70c5e4bc21490d1": { "429135ca177730d77f47327bd61c6aecd212a21d1a4625d711d13a6e0c6886bd": { "ru": { - "updatedAt": "2025-12-02T22:57:44.569Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "429135ca177730d77f47327bd61c6aecd212a21d1a4625d711d13a6e0c6886bd" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.570Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "d744ea0501987d0d0496e17c8100a30396b41d2cb02d4b4937b9c75678cffd0f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.569Z" + "updatedAt": "2025-12-04T20:16:56.998Z", + "postProcessHash": "429135ca177730d77f47327bd61c6aecd212a21d1a4625d711d13a6e0c6886bd" } }, "d744ea0501987d0d0496e17c8100a30396b41d2cb02d4b4937b9c75678cffd0f": { @@ -23173,13 +28262,16 @@ "43aa5066af84a8c935f0fb2dab57ea37c855c50a8c4bf2fe5da1196726ec9767": { "8102f53c258449f037fd5c8bfbe1d4547d061cf4c8af817be8f9e6c45a4504b0": { "ru": { - "updatedAt": "2025-12-02T22:57:12.818Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "c5c494264909d5f706f37779f7ef7d2444cb21e0a76c174cfa993154925c11f1" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.820Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "bc18044844f416597eef2c300fc30d72ea362c8100b916b3cde37fd6397a9e41" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.818Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "db0d66c6403ee0503be5db1613463d024904161282eccf111024893eea2c5ac1" } }, "bc18044844f416597eef2c300fc30d72ea362c8100b916b3cde37fd6397a9e41": { @@ -23194,13 +28286,16 @@ "a3a2fbdc5aafe02b0407589bc3e1a8e94202c17584b7025219f1bfd6b9bf4a39": { "4874e6e4325e8473fce83ceca9411bf266bf400e8eb78d3c9e8eec128469d820": { "zh": { - "updatedAt": "2025-12-02T22:57:12.819Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "dbe129e41240b3563a495e332c1a7537b68ce199d3cb0d5d8493e657c151ef94" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.823Z" + "updatedAt": "2025-12-04T20:16:56.990Z", + "postProcessHash": "1b128db269c12be2125d03f195c663118806c04caea0bed54648c79f2879ccee" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.570Z" + "updatedAt": "2025-12-04T20:16:56.999Z", + "postProcessHash": "77dc02c6a2dda09ee7e27e8c8b6c9974f8c700d5e3fc9d9a5857aa16e47a4bc5" } }, "1b128db269c12be2125d03f195c663118806c04caea0bed54648c79f2879ccee": { @@ -23215,13 +28310,16 @@ "4877e91053b08c2c45734e5085ccf9117e8354554dd8460e2ec3e3afe7aa0ab7": { "1e4f5fb2eb3f3d09c80229402157ba0cccbf2f37d7521185e9cbb71109edeb84": { "ru": { - "updatedAt": "2025-12-02T22:57:12.789Z" + "updatedAt": "2025-12-04T20:16:56.987Z", + "postProcessHash": "c416d8bbe6b0a14640ce093aaca194afac08b23afaa8a014e84781695a67e2cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.790Z" + "updatedAt": "2025-12-04T20:16:56.988Z", + "postProcessHash": "ff50c271592348dfa10d95b4d2fa83784b90178a9865e6dcf8c7996829ea7358" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.788Z" + "updatedAt": "2025-12-04T20:16:56.986Z", + "postProcessHash": "d8baabec471b1d32f76513bd2dff9b6de15793884ce64d64d293b113f1e10fab" } }, "ff50c271592348dfa10d95b4d2fa83784b90178a9865e6dcf8c7996829ea7358": { @@ -23236,117 +28334,144 @@ "a444951bd73cb75b037df1739eb17fc3c4057630058e2cd15b863d55feb1e497": { "be2b70c111bb68681c2eb58d9d87da824e86dac80806aaf1af31eb7e683ee46c": { "zh": { - "updatedAt": "2025-12-02T22:57:28.548Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "a69a8cb01764a876458f03b69a78516212b48aaa83276822a31cbe95a9280447" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.557Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "f5b00425366d9e5f9f1d74bd931c71c3afdeb85f0fe83d98f4796346c102a5c4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.562Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "41a6112aac463dfc4595f0588fc535754fdabd82c8456928dba0310427926364" } } }, "b61feee503868b9ae36d297816fda3d2e834c0f1ae6f4deeefcdd9b66b895886": { "4ef342336cc701c4e8d32cd01c1302bec119023fab8a7c695a4baae3e097696f": { "zh": { - "updatedAt": "2025-12-02T22:57:44.557Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "897d22cdc95fedfb5bc831477c535f31271b80876df77597db415be5838b4fe8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.563Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "61c41b1a73941c06c3deeb890532ae7848b0430f64dbbb45e0d8c7076d999431" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.564Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "bd5bbeaad62d6e4229654b54bce7fb7f79bb7acc0a4df7c38d5a8e1e36fae921" } } }, "b2e9e9045947db36c00975d8bf16f27ba366df3f4c68a977779fbf5a78b77948": { "046cb0e8076cf8c0b6c68469e0acc454e928a24cf0dfeb0b83292ecb2957f821": { "zh": { - "updatedAt": "2025-12-02T22:57:28.548Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "69f8c78a9dd7192f9cf0a1c5436633d814683918858fa04bb2f446ea9d679f7a" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.558Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "4b6701c830b320543ff22211f68de3dcb9496b2ae82fd87a53da2108a258f77f" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.556Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "50a15c131b000a30b244c4f99accb805712b621a48522d7106ef8d095399bfed" } } }, "a8580441e057aef43ff213b00764e321caa0062300adad449c1147c6a00554d7": { "803165c43e8eb2cc396419bba2e85a710e5a34fa1c1f8c024a4ef0cd296866fa": { "ru": { - "updatedAt": "2025-12-02T22:57:12.833Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "f31c14c692c2d439870e2c7150c30d1c0a074c5b4fb93ca45f9bae4a87c3d2b9" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.846Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "30f3a8d9094d10e45dcb119b0a61e90ea813d4635d0115fd60c9791d38e94584" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.834Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "ffb811afaa4dfdf9543f7657d891b8b239a93d69a0e4e11bc9e7e696d0cc60c6" } } }, "581f0a6e4c0d192c8606c68934251365ad7ea4136bd5acf7058f58a76f6d5710": { "ee59cd484bdaa73a60bc061cc701d580ffd417f73fdcd689e3fdd983d9f475d2": { "zh": { - "updatedAt": "2025-12-02T22:57:12.863Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "50c626934c76a76e2e75fa9d7c049c0df13b0b5725e77db2d25a13168fd3603f" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.864Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "8107a19e4db88645849555abe6df6ec8e3ef104ac58f8840dbb8a407706a8628" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.671Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "bfe276699542536286e67e0c4110add1d5cecdca9ff9eb4090bb3dbde9e278c1" } } }, "8d435bf9e6c99e8e1a52f439de6bcbecd2baf3265ece4535053d1e1416ca45c2": { "0c0d01e2f586c0d713dccf1bdfde13a36570342ea30a52d1914566a1af56d594": { "ru": { - "updatedAt": "2025-12-02T22:57:12.833Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "b5fdf0954a5691554dd1c793a5b4d719fcc7984c40ee13ec4f67410eab88c006" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.843Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "43b5ba9d9e7e98e549020b79dde6974e9828c53be7d59b16e36f99c5594da813" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.841Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "127f17089634bba7ad88f57be5d5a89bbe812d477de0e1f40a59c9dbcd2b000c" } } }, "ff15f334dd81c6f832484d8628568a040ff836d4668005abe916911afbffe911": { "5255a26915e56655751575c9c47141ed725215520f648de9ddb2650d95ec7c9d": { "ru": { - "updatedAt": "2025-12-02T22:57:28.548Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "c60574c31a4505724187b7b5cad83eee803524309299521a30db0ebf8bd17aec" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.556Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "3d6724177214f5e12e01de50929a105a2af94385a9af23c66014280f323017dc" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.557Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "ae0eaf09bfb68236c84975e8362f394e74b2ded55ef62b036fe243b01ff40435" } } }, "84d3a07f6bb23015f78e31d1cc93e61eaf670a2dcee7c14342d97b32fb037866": { "e5b0ff50a5b4e2b593b51ad0606dd79a8525ea9ba7bc58e22bd24ad8c5a925cc": { "ru": { - "updatedAt": "2025-12-02T22:57:28.549Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "07b6f56c0b906579b68fd0fd1a209a9c7877cf56147e377a2f16765f515d1293" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.562Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "579f32273f08ca6cc55b0d613a1c72d20317a18e17b2e7511a0e12b1ac2e0566" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.549Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "aba8a8a188565b506f8100d134b363a24314ce75b612fd5ae33b8c2954d405e2" } } }, "54d5d67f63f4e8a40581478b2c6f0684322d03116d22c84c5ebed5934c483f47": { "04a1c4adbd60bd15811afb47b49c06837b0eb88b3c5f243bc17465571d25d192": { "ru": { - "updatedAt": "2025-12-02T22:57:12.834Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "d218f953c26c5b85acae57c8b792f848e540d95a54ec75ebaeccaf5c959f27bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.856Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "3f3f40675eb3d2cdfdef0eabec565749eb3a9cf8823f2d4bad4b97b0f020b6fb" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.838Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "d6074fa42ba9268850aec20b31c2c5758215e6a3735326d3cb91c337fc550304" } } }, @@ -23364,741 +28489,912 @@ }, "44a2121418c10665853a536dedd7553eb6cfcbb6bb546a6e81e42e329c80cc55": { "zh": { - "updatedAt": "2025-12-02T22:57:12.858Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "d6bda9e56bd388821fca08d697b62460ee2da3f7144750f7341c3da0ba109704" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.858Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "e967ecd99a86a93a95b860e26598f57e5b23e313b0c5b3ddc1ea558ff60c3d8a" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.857Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "188be55915c40ada4125cd538e4ed34362e6f099e6d2ec846e272b53ab64805c" } } }, "e9c8787fbd5d3ab34de4fbc2069baaf46f6986970cc7b8edaffc49a991d61cf1": { "7b366931a91740ebcbb465a17f5142106ecae677c271c9b69d08fa475ef502a6": { "ru": { - "updatedAt": "2025-12-02T22:57:28.549Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "4ead8ffe4624b732fbe4dede640fc270488bd4812504ee0cd5dc64edd8f8d184" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.558Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "60d29a44e24697d707c709e7df3c3f9a75047a9f9278eb33ed079c76cc363d51" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.555Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "978677d1584966a27761d4f01adcb26eb6b587cf52f014de9038a11c40d5591f" } } }, "14c0bbca8f7e350393ed679d410ca4b9cd58e0c5ee29885f7e65beae7f51c703": { "82258f2bbaceee1cc2b71c162991c1eb92c67498d494693cd385b4bbbb78fedf": { "zh": { - "updatedAt": "2025-12-02T22:57:12.835Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "ae6c618166b1a02456839b9bf23203163673ed14963f4ac519ffe4a98d1be4d0" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.857Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "2532ed3a72f32bd9238b9338740f6f2f40f1bd69d66c019bec758d2438f63850" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.832Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "8ff38c82865454afc973e19b0f425ffb1cfc8af48c150949242c6e38334ad5ea" } } }, "dd1f243e110cd8cd4c72fabd62923f7077ed63859ba2c578b1561943fa5490a9": { "38b8464001ddae6ec2a702908a9a44c1549405c54b818345c5ee01e6079833f1": { "ru": { - "updatedAt": "2025-12-02T22:57:12.865Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "dfd988f9bd931d0f62129fc532f917b6063872e9db7ad418f6fd10456b767cd8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.668Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "1b51e44d2d6b2b3b520e9f9cab02bed1e37e7741d083185ba87232f90841282a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.667Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "da1985256a75ae72157e2fc4ca3bf9b061c57e073b251534027a7bb848495691" } } }, "ba14369199fbec0937cc2e6400083d328a65fa21e4191586d4474ff60d50b27a": { "687b275c30319ae8712f2bb22a713be7698df9bf60e3f0a3a92687b0ad5813e5": { "zh": { - "updatedAt": "2025-12-02T22:57:44.641Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "c8d9954849548a3263e9b0eb80061e7d4693c67e20195e446dda88803a55f4d5" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.644Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "87108a83b9186570711f4b9d3511971bfd4b6e9db4b994af38c60d7df0ef54b9" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.650Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "191d52b5d09ba978ab40b360d705b79e405e9e922b3819b2880923ebeeb1a806" } } }, "0aefd7962edb78c98f3d45305d81752ebb2eaf0195c75d6a1cd6e7ad0ef5617a": { "5d1d0d81a87bc16246cc6d195a4d9c9f3090b00692a1dcfb3dd44b533128b6dc": { "ru": { - "updatedAt": "2025-12-02T22:57:12.835Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "25a4d5a1ba038139d8a58c140bd372faf7ee59328c30058d47d051ce6b487902" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.844Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "d9c6b347d5ee84b8a02f64c39d37e1b8d26d18fe687b6f68b91296cca384e5f4" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.835Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "cfa04f69e3ac27506e0d411b8e1ca17c6b3edf6dc406d6909b5d933d2d8ae94c" } } }, "6b0a1864f6fd70f19415c4e085caeeff45b83244daed33758454b88d9859c692": { "ecc79a94c617ae9c2438b3b427bea3004cc3f1e8a3f90157b36f8157166a99c0": { "ru": { - "updatedAt": "2025-12-02T22:57:12.795Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "7328e13acaae794a00686895198fe25cfe5eb067f892c749f14f3899019cbe8f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.796Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "2cbfcd6a3a217c2df469624b3c932378563e2bf11ca3e6192c22d8bdad69eb17" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.797Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "5edb8c427f4061c4bf532253e23ff06bb241dea7f56f992c9ac0ae9894611a72" } } }, "543d200284e9587853538717503646bf5a945bb43ccdb3b059dbf4eac4c1219f": { "54eb6cb69d7901f33c8b60f1ebf53444695ba214c41ecd088af34c6dde0d4e44": { "ru": { - "updatedAt": "2025-12-02T22:57:12.865Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "09a774cec364865b453196c440ec674f37c8819f6e6e3299646b87a9b8cccd1d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.673Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "18300940bd3fbe0bce544b3433d34f143c978387e6c622e4704209dd63c3202e" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.865Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "d5784285e90f51c5e6d51300f6404fd3b1d4add13a1906de20009df03ec7a23c" } } }, "fb3d54543e5565bc4305346ef7c2d5312674405acb6e193ffaf4fb30ddd7ce71": { "df9135ddc19fc1bbbb29d708bd2c3afbd621e4a67a544ede4538a80aa5b420b7": { "zh": { - "updatedAt": "2025-12-02T22:57:12.866Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "d4e168bc9ba3c7299b663a8c967d8f663174e0d8101eb437e3f876f5f56a82aa" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.867Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "dc9dec5325ec3b560c03ebb60dfcb69614acd3f1defde76e30af6e73dffe845a" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.863Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "0428993b2a0635368d8d3708970e2d2625a646297617317fedf5f74acf530001" } } }, "14b4676b953c664afb277f933e119c8da2f742590c1a9a4bb7b2beee22a7eb7c": { "5ee021b8f49ccf1b18d5dd6f94a9b7418709365c4195a6b0854ae20f5132dd10": { "ru": { - "updatedAt": "2025-12-02T22:57:28.550Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "f7d3aea7e4adb0a83741c09f20eeb272ed66eb3ab09bf033773ae75e9dca8d83" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.552Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "322b9afc639a9106704da961eae5e243afcd2081ba6a9cf78c6a96da5e9a152e" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.553Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "f3d9410444da9ef0d3eac52ca588ad510152ca37a30a3077dabd32c7e3dac7a0" } } }, "0f67bde502826e1dba901c267e553e40b45a88ea2514fac35224b3011c9eee95": { "40ccc189c309d81655c42b58d6550569ed8e72b0cd53cc36991d1ab17eeb62a2": { "ru": { - "updatedAt": "2025-12-02T22:57:28.550Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "333599dc00bf0ed5eaa17824b51287ee845207e8952815a17db448d63e9e7e4c" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.563Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "8933570660f6547d74908b107d4fd178e63f1c1b44ce64193cddefecafc5ba04" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.556Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "ffeb0d89b04c883c01c798c5c901a15dc5addccdea6bfceeb6b9f47b7bb2e713" } } }, "93a056e5b771b1f20f3660dfb370f302960d593ccff14a5684b961c760cac61a": { "b34875547efada966d6f58a27a70b1a17213f7251649cd70a29b9fcfe4aeecfe": { "ru": { - "updatedAt": "2025-12-02T22:57:12.836Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "8fa36fc7b6781f18ea683d84f55e3c9cb77526c7a5a8450ba81e3a4361a069ea" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.839Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "569971bdf74c2ad9b5664b23a720d68ce2429d66f170ac4259d8bbd15c6826cd" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.837Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "d3a962ed96c2dc442dea308be94850e0805661c3eb6bbeedaf83ce49e38119ce" } } }, "ebc5db761ec12b7516bddcdbb93d868ef5c7d1458f56a4288fab25b5e45a980e": { "e20f9f94eb03e49c98c43e022936ac730a22ccaa64a4911703f457858a10f672": { "ru": { - "updatedAt": "2025-12-02T22:57:28.550Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "343dfcad413c71040fefbd674aefe668766405519d9e0fa38c00675d72fca7be" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.559Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "90820c9820c1c014f58535dd4658a4873488a2c657847aa99b35978254a5a72d" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.563Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "1759f76c0922f45c12f0a7a0e6a42b3b0e76f1ea4cb7d77a59b823b79afd6e9e" } } }, "f016a1612cced253f74884a4791ce47126fba584f3ee773967310982b7597b83": { "cc687fc17daeeb33c7c5bef1a2bc7ce51ba437f92c4354369ab58a024c2123b9": { "ru": { - "updatedAt": "2025-12-02T22:57:28.551Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "6a5618a14c838dcf5923004409d7e1f8ca812655a1cb031a9b1a26779e69a4bd" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.555Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "c7e64faae60d8f10709fe22b95cef0e05f7789874037f3b456094e010a62b97b" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.555Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "e2f69896abe6a444c8c20a9d5f88d9effa159344247f5f2de38d20e8181668f0" } } }, "f657cce435f5bbd4c37d13d06e137048b8588d93820f3ee19d2b600ed82b6819": { "f4e41d0b3fe1c04866d1690f92f407974255a1b7b269dd34af873b60f54ecb09": { "ru": { - "updatedAt": "2025-12-02T22:57:12.867Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "8c0214dd58224cfff2b7b1117bcdae1362d1f9f7cf1ae4cb3891e8367e01fb26" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.669Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "7729aa1914b3ae3ec6bde046f8562d1908bc2772e0696dfd0c1233d1972f3af5" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.863Z" + "updatedAt": "2025-12-04T20:16:57.097Z", + "postProcessHash": "524c0d09364249bac454cd7267ba2a9e7a79340cdfecbb81781a7d895c9eb8eb" } } }, "5a8a41312c127bc8ee51985dd35b7a34db3722502d9dd3b6517218f83ee15209": { "cdc27bc165065afbf272c456901edc7e818c1288e8bf98aa8115b3cc4184e430": { "ru": { - "updatedAt": "2025-12-02T22:57:12.836Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "aaab8ea39002d0c440fc5827168d98bdd949cdb200a865f5749e238545956d98" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.854Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "cf32a26a11746b8e1fc32a47f449e7379c3aa3c4f21b94569139b2d8fdc4d84b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.846Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "122e49ed8886133e84ce86d7a7fb826d78e948ba0d10f27affe3d84fda64aa58" } } }, "bb301384e711a26eac5ab620725ba3651e9a050418e5c4b03409244a6916096a": { "fa37176654ae0b31692c4310f41376cac060e1fac5de1cd5fa4a6795dccc88be": { "ru": { - "updatedAt": "2025-12-02T22:57:28.551Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "fa5471ff89c0226a38e50b999de92d42c1683d5883c57fed41822750f5c08a2b" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.832Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "66f378585eba035adc97329975619f798d8d03c0c7308c6577a26d4086d156cc" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.841Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "f5d1cba45eda9a17dd97ce648afa7a47048f5247cbbb5589efa18b257d58103f" } } }, "be5b2c5f34f09aeff162abaf45ccf882807b091723c8992305ab5dd6d9d85255": { "a4494efc6991ad7d0de3d84b86e624697071ddfce8e39ebd42923fd6777c8531": { "zh": { - "updatedAt": "2025-12-02T22:57:28.552Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "60c6f04bfec3891b311b6c98f310fe5bea538ceb93121b0908b1693775092c79" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.553Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "fa7f17ee6d18ea60185075cb5227ffa2b47976735734b1a15715589d333969ed" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.554Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "8619037edd0437b1205746861c21a460513f8e5c092fcb6d01fd73285becfd9f" } } }, "b7ac58ff02407e2eedc607e8ffaadc709667604b213c6400361a10c2a2c6e252": { "ae94f635f518e540a73bbd471cee47b91d539ed719fbffdaf358c667006c4bb0": { "zh": { - "updatedAt": "2025-12-02T22:57:28.552Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "9198365aca422eb022abf08a160a409a8aacdd6df5907b72e87f0f22cc06ca11" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.554Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "8d1884e5b30236d65b1be1d05c476116cea395b4553cdfa3549aeb943fc51286" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.550Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "3339b3303898d39914a3587ccb9abf36df71d277730266f66d6895714b8083b7" } } }, "f2566c10efb98a7e07538653cda7cc2135c5c1aaaef306a48e8e753ebc662a1e": { "86c47ff8f3b3666e1a6b49b2c8302b448389e1e3b41ab3b1450e055082821549": { "zh": { - "updatedAt": "2025-12-02T22:57:12.798Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "8c1816d77d3551c7d6dd5710ccc8274f66e5809dd3cea3606629893483ebfef7" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.798Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "c0defa7229fee6b27c2a2a3244e13cc4c0ed6f0f4c6f6605a86a70bf36b99648" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.810Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "96a408123b4168c543a77c179b015d8f620e6307650d53bdf90c2f19ec9ab2a9" } } }, "c3d6ae1d7c3ab47f1321484233d7e2d4c6960c431966f43a50c94da67e615da5": { "7fe2061b7ffe48c965db16b4f632dfa6a0cb32888881320b91a370311396c437": { "ru": { - "updatedAt": "2025-12-02T22:57:44.663Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "68c99d929f252ea3754819bd8550b2a2a7794a65733dec05c2fb631087e62dce" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.662Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "0f45fa5978c715f5b70bac97a40700cdb702e95641146e734337da0665b66347" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.662Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "cbe51f81ff245663766792d694b95ee09dd8d09a492fed40eee4f9251ec512f5" } } }, "6f8f89ce13c70fe1235d08203ef798a559154950245f81065ab893d0e5c542e3": { "f96e0b809311db6c2baef6eea1807c7d62c21afafa50f43dcaed5dc333127e20": { "zh": { - "updatedAt": "2025-12-02T22:57:12.838Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "ed9e3cf9f31df034560bdea315422808f234d9f9b3f28d82950b4b1f08cd6eb8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.853Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "d4e7c4f8edc510e1b92d0928a91c4688aede5e5a4e116a12437845e4e733b166" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.854Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "af42ac78de1b3ef4af093dcb3cab937585c253e6d5c6fffa8cc9537e39d3c8be" } } }, "857f78e82a54d7a2128693b3d739a16697e3d23a8ab3595b336d9da8d6d1d643": { "3fadea060a820d56c666c2cf5cdeb8e49e9c833dfa43de6b17bb735aecf7c763": { "ru": { - "updatedAt": "2025-12-02T22:57:12.838Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "8d42b622db5275d19c8b2345622ed89f140fdf3c8dc21016e057ced32b953a8a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.849Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "ae48f70ef7cd6be30bb28a567152140c98665d7b69810e4557151902877df6e6" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.854Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "0fa1d64f7d00e47668b5c6ed702b5207ba1b593291d6965c23fe6a95a4d21ad7" } } }, "98f9d0cfd669fd1fa447820ed42dde75e265419fd66cf20c9292293dd4a825b7": { "ef840aa109bf499596594d13130b402a3f00f31d42de8569556571fe1c214cfc": { "ru": { - "updatedAt": "2025-12-02T22:57:12.868Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "bd159b2a23c7f8c3497a7d329c4ea2ea4da4e350b1ddde5131a909664e590036" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.867Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "0a155db6dab1c70ab5658bf1841efacec4a7ee27ba36ceaba9bd014af7c9151f" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.864Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "70f45a1ef0d746002c2acedc01b7520bac040b80837e2c05be635833b07ce05e" } } }, "0ccba8d2db72b1884bbc46c41967afaeff1aa84c34d44e471d4f0a6956691e16": { "94c625175686dfb070b11d461168883b7020c135e87e95dc215bd6a1888c5c54": { "ru": { - "updatedAt": "2025-12-02T22:57:28.554Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "59bd1cb4c7bd530470c995b1e67c7dabdcf6213fb83ad1d543daf94722afcb69" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.556Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "38c35b4d3cca10d0853f982094bffdd938b92744c668ee76c54a1787b62b291c" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.557Z" + "updatedAt": "2025-12-04T20:16:57.065Z", + "postProcessHash": "dcb769031d0d88c31280de23e339ec94a17a9909c7584b9a1c2a904e2777a343" } } }, "c3624723e67987627989b19cf8887d0607b1cfe3b554bdb9b1a4afe0241fb796": { "394ce4286ff89f65fa6b50578d4a94d4eaf540883591642f71afb2825984bad3": { "zh": { - "updatedAt": "2025-12-02T22:57:44.571Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "a66cd8437683b26f7af6e64d619aabd6068a06add94b6fd451cb715efdb1c2f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.571Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "c5c0b3b4e4ca5818f4374dc058baaa8104f9fda8c2649e574f9970deaec7bd1b" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.572Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "3af582fd0bd98f1c13b20198470f199d3d7217b42d7bfa57ed3d1a32b3b7b0aa" } } }, "3f0eaac3f28ba8b2234626f11889b6f51135f12393d659a739adcfe6bb3acaee": { "b93542926f20e8394566dc0612022ddaf2939a3fdd8e5ae25b2ba31cb94de320": { "zh": { - "updatedAt": "2025-12-02T22:57:12.800Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "0e031ad4f20b9cbdf52611ad1fc97d0490095f64cf9bee205cae5dd123428d3c" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.811Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "bf7a090c88fc25b1be905f3362b42619b3c968fac506f55e335238ba09e63dbe" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.797Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "76517663c1b5e21fe516f402de4b06334a38c51059015c4b6d06b8b251767fc5" } } }, "101a525d5bb936cf99909df3325b1ed7ac0b685ee9889c47f517b4323eba52db": { "fead6f3f426b4d09ad7d10dd975751d5778ec0e92cce0f8ec88ce01950911970": { "zh": { - "updatedAt": "2025-12-02T22:57:44.561Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "a66cd8437683b26f7af6e64d619aabd6068a06add94b6fd451cb715efdb1c2f7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.562Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "7c771e7995d98b5339ba5b4491ecc81ed23ca94fc99827cae5103402b977ec9a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.558Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "3af582fd0bd98f1c13b20198470f199d3d7217b42d7bfa57ed3d1a32b3b7b0aa" } } }, "6bc2d77e531374ac95719fbbe878154b867b374b36d2f4e8485da1fa3a3820c6": { "d33c2c466bd3a7370a079c2bfd8752318550c559a12158bcc434cabdaec31040": { "zh": { - "updatedAt": "2025-12-02T22:57:12.839Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "97d63ebbe33eacb75318984f1bb96c5deb2efab5476d64cd29a50d40ad17119b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.847Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "21f5b073027d0a295a20551badaa77de4bb3cb364434c2320e6f222f61e6fbea" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.832Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "14953c1f212d148bc7b7dc2d3994f16f195b190f0ee1a6321cdb983ff93c32ed" } } }, "43bdb45dd285638fe98614183eaf90571d4631d1e726c04b99db3c3faa08af32": { "4ba84b799e9b0e8d9b223c47606c717ef7d6ddd565986bc7b238eb33165681f5": { "ru": { - "updatedAt": "2025-12-02T22:57:12.869Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "061bbbdef7b4442bb86b67d55ec21cec63215c4283cfac8c7cf7744e58f0c06a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.673Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "a8265952d50ebcfdc89789be02d25b8e6fd34cc838316ac41eaacf191c8f1c7e" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.670Z" + "updatedAt": "2025-12-04T20:16:57.088Z", + "postProcessHash": "a25eb0c3381a78811592f7adf2c919fe3de5884654029b3932871920950b1114" } } }, "fbc3d920f0695e12f892f5ecdcfa4bc88cf0bb49809defb12c39db77838dee89": { "505618685d75d6489d64b01bd2297e8b2e4ce44b92900a9edcf4d95a5eebb475": { "ru": { - "updatedAt": "2025-12-02T22:57:12.801Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "71584fd55158787ba134cbb8d3a0c3613fb8627d2740bffb9b96c977dc3ed2de" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.812Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "9f9ab179d137856b44f914b9f1952d53f661c975160f1af482bbf8f15595bba4" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.810Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "6b37e8d88ff04277cda84645346cffc2e4efbaec3a1127dc857f68a3f9f2f387" } } }, "67e6b09bfe484e48895cf047e4050cb1f08398f2f779e27c7acf3ff69d9c5e8d": { "7b905336c6f753917b4e006f53075b8ba27cb105a18643882989eab9b01e424f": { "ru": { - "updatedAt": "2025-12-02T22:57:12.840Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "492d0f5d8d0aaf468917753fb9f24560fec8ed2b2bf0cd4a22c35bb08165cc5f" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.855Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "74c7e38513e5e670975cf582e5e4335ce7795bd2dcdce704e81dc8eca802e45b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.834Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "f53d5bde6532d08d476d40468e4ed0f725e2d2c9b51b7a93f6628213e3fbb99d" } } }, "736363d0859d8864ef39d3c2b3906d5ee9e8520ec754a5daaa253102669dbfe3": { "4c2ab8cb337c681d306ce35ffbf49cc6acb8d68b78b1f946b2757bbefd07e898": { "zh": { - "updatedAt": "2025-12-02T22:57:12.840Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "9a818e8efcf144aed66c1b1f400b3420628916ac5a200a0980347f5bfe80824d" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.847Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "0dd508fc178a876624b3a6ee87cc3dce094bf54a591f560039c6ce4956a5ab0d" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.843Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "6103f6733c2182c6a52501f1695ed0c67a515b0d263b5d4530942613f2b88d1b" } } }, "f9122446fb42667abd4d27483874244c927449544300488e6be5e19f0f5c5196": { "fc2f22b778e933aded713c592fc3c7f36b5925e3b1dddf870b9f00d0d86f3078": { "ru": { - "updatedAt": "2025-12-02T22:57:12.842Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "caa3c48541fe7aaf1918b518972ba1174ba7aa83280c337617c2020bf250cb34" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.847Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "b59296574bb970fdb8ce9d451e4416985b7ad291ff333397defd7ebe7b53709b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.833Z" + "updatedAt": "2025-12-04T20:16:57.075Z", + "postProcessHash": "7ab50d47c3d979a4e3c4b017a8a567adc7540b2a6ea1300ebe42604e657c5154" } } }, "235b40c46d2961005ce3297b1e97ffe8edc82de828ff56822b9e32359796e9a9": { "c5ef2e83c2e151559f9dd5524371a9d5b3447d2d1d74ee4818d09823d6de408d": { "zh": { - "updatedAt": "2025-12-02T22:57:28.524Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "519f0f8085c6634af570c30a243224affddd15b814021513cc2a5008a6659332" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.558Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "572d5ed8dc3508cfda34ba0960889ddf1f165b5cd4cc2dbab076864f1e407c77" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.830Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "8a8fa81e297ebd605e32db3a795d8ff577af712ba31a991b09f3760322f76400" } } }, "462cdde9af0d98973a369e372516b17fe788292eab3b5888894d73e9cbffb6cd": { "d745f7b346b2c1bf0d164fbdb236d9160be09038c4c9ffee5d2fe13aaa441118": { "zh": { - "updatedAt": "2025-12-02T22:57:44.650Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "de63337cd3cbaa8841cd41506eaa864fcaec48cec5d74792f5ecf4c5e8ad5e5d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.653Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "f5f72bc47f30e9d554030fbf022a7dc4e63babb4dff5880b006b4c8f2b1e24c4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.647Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "2f3ee22a3a89cb53a24567da1aa1d4557cb714c57210a9f0fedd825a5c51118b" } } }, "710ad55c0afad6985a560346d1622540e29d92eadcee6064888e0cacbfeda384": { "54f1a9cd08afe76cfdeea722af528c57303609afdc34748e3328885c439ce7bf": { "ru": { - "updatedAt": "2025-12-02T22:57:44.565Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "fdd83de4abe81a4ed4bb122a604e23067c32dffcd83dba38657e520419b3ae3e" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.560Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "d35efe2e0066999e95c6338a2d6836d4ecc51a7416f08e3412a4befa518da96f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.561Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "b12595353bc9683f72c5322d26ffdf1c3fb19f79e2d1da0b1ef02686a14bebc4" } } }, "0fb5c4c89db0cb83f6bd1cdef9a19071c391929cb24660f2f66de45b10763ba3": { "23aae78ddaf4de455a27e50918cb30da7db97d56977cd4dbe8df7b2e1cd49fc4": { "ru": { - "updatedAt": "2025-12-02T22:57:12.813Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "04191fe3fee59a776d496d7a2ccc8c8402fc28c0e5df8dcbebc0ae1f63933d1a" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.808Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "7ddf021182820cb62a8d1a7b6d5f2e40827ccf8e55664b97716aa90a96208bcc" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.811Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "89bd734baf8aba3cf4c713e0df328b7f6a39b086b4811cd4df5760d775952e02" } } }, "45c65db56b87943c8cc881cc85fe81f875a263a988b758817095b2448ebeab1c": { "ef02a49eb6596c142aa773eb78cf22212510b6f1bb9809d02c025e4d34ab82d7": { "zh": { - "updatedAt": "2025-12-02T22:57:28.525Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "4804ad65fb57bdb9aae2087131d4f787992f6dccc19a450e62271cfa5c710c08" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.662Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "30ce8b03c55a5a9b0353c2e07d37c4fb5b4a099916c00a6611284da91756faea" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.525Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "9d7cdeabcb3f07fa3fce79db2fc40799c79179bae0f2e83b3ce69ae1e3c3aa11" } } }, "b58d28384b38660cb355b5217eb858f4bc83ad7155278c40ae4663b230c74fd8": { "f5263d91719fc0aa0d4dc51eba8629ecf707553c3c6fd5144e8f1ca748775d75": { "zh": { - "updatedAt": "2025-12-02T22:57:44.625Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "cdf9dce53ddaf9dba1e246859b599821a0ef153ab4c5acd68408082637706b2d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.634Z" + "updatedAt": "2025-12-04T20:16:57.041Z", + "postProcessHash": "ceac3d8cff8871746ae78dacf2ff3d4af6e139977fd74cff746880ff3e14b226" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.621Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "a7c30d29fcbdf3fd2b581e7fa039d2ea8d752b662b5fb2f5b3078dc67be51d6c" } } }, "8d7c4ba98d5f5bbc6e42b2591da7f2b20f246b15396b0ab2075839fef18b5697": { "157c626f8a13dd4dc09e8313f1bf33c397d35bf379c354eb9d973e648827bef2": { "zh": { - "updatedAt": "2025-12-02T22:57:44.650Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "634870082c95d1522b45f62a7857160bdc67886b683879bb41d0c890fd99adf8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.659Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "2fa50f3fc2507386389b9a857e9e5ad2630e09d9fe5d9c2b57b28a3bc7025273" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.645Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "86f4bc498b5d9b8ee1dd49a68754613cb4a82ebe9517132cb39e331dfa168506" } } }, "4d0528f558f80f4881563682077f001ad134becf467e305c86fc84dd7697b089": { "42d9d42562a4f705923103bf4a3b7173addf1f1dd5adc163a37dbd936aa49889": { "ru": { - "updatedAt": "2025-12-02T22:57:28.526Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "5459db3b31761f3ca76ac094a0185198c84ad1144c88192a14e656f78307d20e" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.520Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "42a1736ec7187aa3cafff4334d578962e384feb006b9bf55434a01547cebf836" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.664Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "fb96153f83493290a1266b006f0bb1e762a46d537480eb58a9d879e8c5b2375a" } } }, "a5d93e69125f512b3e1f00266e424585b846115536039af5f58cae578c2829e3": { "ecacb8f11638f831b9c20da459d9a74e871ae3943e5721f34aba4985e3a9d9eb": { "zh": { - "updatedAt": "2025-12-02T22:57:44.602Z" + "updatedAt": "2025-12-04T20:16:57.023Z", + "postProcessHash": "f2d38856c09a7f650086f8fda185c29472cc15fca22e4cc6a7d5ca0bf79be929" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.600Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "61416b99de4acf86a88713bfba1053e4128c12050492982a6a97df1d26acdd1e" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.603Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "7bfb420411dd27ed774c6384024f85f04b9d1c9f2c493bb98fecb28cd0e2ac5b" } } }, "5bb6610775ffe555ff909d1e5c0cb423ff2c15c044240729c60b0ebe39bbca30": { "d2a5526c06779a9c79d3b4da1256e4feac0aed57c3171d923a4e08990a784158": { "ru": { - "updatedAt": "2025-12-02T22:57:28.526Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "896699c2a91c0403f0c75aff2e8e39126d295ad1fc5ed6b429c59ec2763b27df" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.665Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "d96c713f32d024be1339439e06ae2058473d9435581f4b05eb4e17e7b481cf6e" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.545Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "d4926dd360f6873412aa0cbd75360ab853ef071498311d02bda96b1526beff10" } } }, "2c61f03a4fe808580cff4e8aa1a6939d84eb12b9a43724b98bab278d020bb194": { "4158e73583a46ee61d2835723076f3fd91bdae28b86fb6f4d6ab8870a8146937": { "ru": { - "updatedAt": "2025-12-02T22:57:44.625Z" + "updatedAt": "2025-12-04T20:16:57.024Z", + "postProcessHash": "bb27c0e8977f8883fc2bacb1f2138b7e10bb5a6de31b1dacded084da94bc0316" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.624Z" + "updatedAt": "2025-12-04T20:16:57.022Z", + "postProcessHash": "786adbbb891856df3581844d21e00478e36dfbf842f3c248032df20ecb3f9680" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.623Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "fe25581377d35b8251e8905f61193ae88841991ce7250353d8ab0b3e5c1118df" } } }, "8fb2e5e5d61ff6b4830012f315c75ccd22ef6f64c4ee7685c2cd3215aabfe79d": { "c393d1a8b5995f5444564d2d762f97bb4815829fdfb74c4739bd527681d89cee": { "zh": { - "updatedAt": "2025-12-02T22:57:28.527Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "fce2531e47b053738c341a053a4f1d5a7a259fa5416e8faa549949d51b616f30" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.531Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "d5cab276a8a68815b617467166bae5efa3fd5a25c8a8a98dde613df12f0166f6" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.529Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "7e615d0aae97110e6a8b2113e47faea783e1149ada9e3db30dcdcfada708785d" } } }, "1a54cbb6d0259ab1c0a7866c16289a6efb190e3d138af3035a9b892ce04da57d": { "35875b5d8355a345f2dea01781d4a86cccffca2873f0f1c8151df687559a6ee2": { "ru": { - "updatedAt": "2025-12-02T22:57:44.651Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "d2834ccf8d420fcb7db1791af2c366819503f1cae26bb00ff87acf9844376d8a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.656Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "353653f590f914f0e36c71ea1015177fa6319fb43e1990c3375afba58d480b19" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.647Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "2b2d57a64e6e26bc189f091a8ddf381e8e68492f32a6fbb76c295d67f2d91412" } } }, "c5c96baff0600024d6bbb610d9cae24faf4a22e4f54fbcc16da6eea5801d716e": { "75a61fac01b9a0c4dc6479a31dfe0ccf020bf8c906301ce66ddb70adc32e62a1": { "ru": { - "updatedAt": "2025-12-02T22:57:28.564Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "60de08b9e73291dc6c1821537073c223774439ffb9ce26854d2a030e8c02c31a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.559Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "fbddc0aab46e0e38e842a90822d1797618d82eb5afc9e1d022e431d9a547fbef" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.828Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "2c2835f42eb3989f137eb031dd860631073f7b6eb88cdc373a3ba01593d5b27f" } } }, "be5b364ee73eb51fe865a890d10236c2eae4146ef19afc9755721c110139579f": { "e55f970b0157d55548b665f2a95fc93e3875eadfb7a385687eb591b21d592f97": { "zh": { - "updatedAt": "2025-12-02T22:57:44.573Z" + "updatedAt": "2025-12-04T20:16:57.008Z", + "postProcessHash": "e56c845f189f57f778d546fd7b2102b9ed58a2c25d9c8e0dbb92151e3ba279a7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.570Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "aebd0815e50ec9585a3367a1ef158aaefb63ebb42803f65547dbe770a4339bf9" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.572Z" + "updatedAt": "2025-12-04T20:16:57.011Z", + "postProcessHash": "6351a2fe0f0e41d9b085dad38a029886e4ef6d20c99581d6fdc43ba41b92b195" } } }, "4449f60ff9c38182ac376f1ec8ad4f5c377a1d189bf6b8bd0b3f294437ebd1a5": { "b4657b26faf846e566012308f61103c34dbe662b80add135f7d0720222c74ea5": { "zh": { - "updatedAt": "2025-12-02T22:57:44.651Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "8073a43fae69ede3a1ae51b2be353a2bf6836ee6bc9657c6471282179c661f61" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.641Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "5eb68f4888d23e43f2a29ca42ac2029ee1873a27b20ea38b669d7c7872cfdd03" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.648Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "2eea3a7779228eb500f0b41920165c8dc1bf404adf8e44b1365663b944211de5" } } }, "809f990c2475c0e585de5f7677ad5e69d2c480395ed833dfa2922067881e3350": { "1534d3d5fab78c52b36945dc4157e83845141abc6b963eed5bb780b27e5e23e2": { "zh": { - "updatedAt": "2025-12-02T22:57:44.573Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "304d553f81d8f32b45f191a97f030e329dec694c341cebf2f8d4cc6081dfb897" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.577Z" + "updatedAt": "2025-12-04T20:16:57.009Z", + "postProcessHash": "632ec9152907e487aa044775fdfeb4adea2ef628229fbeb0ec7d5111aac09d97" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.578Z" + "updatedAt": "2025-12-04T20:16:57.010Z", + "postProcessHash": "c4ad0bd0aae172ef66ba1eed1dc722613f248c6cc00edca869716b93af48db9b" } } }, "8184344ce9b79685863100b25d75f907caba31a6f26b64caf68881e98ea41913": { "8fe3205e82057a29dc0d8aaa2e33ec896cd304ef416bcfb7264bf8da1fbaaa77": { "zh": { - "updatedAt": "2025-12-02T22:57:44.672Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "03a5f5a40f8dc3934220239ca07ac797f8920bb49158151891f1bc9ce44975cd" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.672Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "d2e64b9ff8d18e67bf0e1b1836e9f5cd5bf7b8d5bf5ec71314aefb01d386f5ad" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.669Z" + "updatedAt": "2025-12-04T20:16:57.098Z", + "postProcessHash": "eed284bc3b5f8f5a17ec5f24a1394fdf2dbc3bfd2df7f75f7edb55b647ce0162" } } }, "0c03db74eb0923183ef12e6e957c01e6d8255d17051d0474807d2dfe15494516": { "8d293de1b22941bb10fe562a4e677c7c7472f7d882ef5aadce39c9033dabb63f": { "zh": { - "updatedAt": "2025-12-02T22:57:12.829Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "73d55400f41032841cca8bcfbd887d6421af028cf505d45ff03338f36f727988" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.829Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "40468e117ddc0eca1aa83dca340adb06436325664cd746635eff46ea5f1092f6" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.564Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "64e00eca368f5b1c24f57423cb65e33e9cda41cc4d35a0422b80e06c226dfd62" } } }, "a1c0860ae09b803ff5ed9c9a0c438bd6b2800982753e32c40c910f32979fca1d": { "48ad888591a6dabb0298398a02a18436095ab5c603d344f9156ff7e7ccdb28ae": { "zh": { - "updatedAt": "2025-12-02T22:57:12.855Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "792d1caba3a0258d816ae731fa188a4112af233ef30eb0ade0b591db73f76271" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.849Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "2e1542763054cf7a72ec74976e88e4243e6d66d200e85ef26acc31b6bc656946" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.835Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "25dd126d05d40e6d16e0aaaf096cf838a65034b4f00e945e2d7060079be677a8" } } }, "86a43cc92512a5c918f3385b494d3169c660273f3661eb8dafdc49055b720698": { "60b60a413c29322369042c265eefb3d9aa56d79f8c71fe607cd1ac9eeb60e393": { "zh": { - "updatedAt": "2025-12-02T22:57:12.856Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "ded3161d5311eaee733d7805cd602dbf10699324922d3a0022f76fc063a926e9" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.856Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "3531ba8b2d1f83b7281805e2e08d7f6d69e5998385d3a5ca0050f885a9594b48" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.837Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "071f842f3610436b701fd6b38bd298de096c98654e5a89031c2827a5c22f7738" } } }, "036300ef3b2f4858d6615f663b03ca7a594a026409e0fe0ca41882745b846afc": { "1ad91e7f68dcee666ce7f7d2260270095678629c6052b5b84bf68dc6d54020c4": { "ru": { - "updatedAt": "2025-12-02T22:57:44.575Z" + "updatedAt": "2025-12-04T20:16:57.010Z", + "postProcessHash": "3c2888d7a09e1627ba18e35a9310b6818aa6febecebe1c99265aa6699bee9ec6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.572Z" + "updatedAt": "2025-12-04T20:16:57.010Z", + "postProcessHash": "de04856dad3d8bc67d26f47fd7e33db5e0730335c39fd675e1b193ef9dc5cadf" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.574Z" + "updatedAt": "2025-12-04T20:16:57.009Z", + "postProcessHash": "83f08dfcd635bcf146e52a19e0188e81904d73447650c5799a628e1f6548834f" } } }, "586898784b2000de57eead4932f64db3ae6900471f06aee84b184f3bf8efdf12": { "9c727f0fda6cea3eb8d9add0737f40fd7c2a246e0b779e6a2ea7559741c3af0b": { "zh": { - "updatedAt": "2025-12-02T22:57:44.575Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "55daf6e6e1636043971af6d76ef01f5ce2a1c834320168eefcd3f636da6b9fe7" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.574Z" + "updatedAt": "2025-12-04T20:16:57.011Z", + "postProcessHash": "2a93c2b6162732704de3342ed51f84e6da676ed91b6b248751f2802b84540b09" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.576Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "9cf4cc880cf7f0fcd8a1fd165aa8baa1ce1e12e2aa8f14fcdef93927c4e3b6cb" } } }, @@ -24116,611 +29412,752 @@ }, "5962997760b38b2cb309d629f1dcf48964113a84f277bdc508e98c8bad0fa965": { "zh": { - "updatedAt": "2025-12-02T22:57:44.612Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "e57be5a38a35056e0731c1c24daadd1eadf503373a393ec690f196c7d1cab9ae" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.612Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "5d2c2c3ea0ac2596c96eab4ad2d20d1a72a42fe6d80985d5544abdf8ab2259ad" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.612Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "6def404d595e172f0aa922586d045d28eb0f06e23225e801949ed59e974fee7e" } } }, "e79b575c27312875f3076748b2d4de3bfd78216748310c894e316b5c6b915aa6": { "7a7699a4379151bff326d63b86c2e5f5b0c36a7de56625710bbef094f9488e4d": { "zh": { - "updatedAt": "2025-12-02T22:57:12.839Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "3d54240fe31b763edad2e0363a2da20ca7d0492cd7987091c22c8f1621b1f19a" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.855Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "18246fac39c18429893bfabf32cf0c8993b4a61c6236d5bb4e6f97459582422b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.840Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "2dd83f5a4c2ce5580f27d2ebbfecf5cf162227dac9a63a831b43a22f109ac7a9" } } }, "c74acd4897e7b7ee4b2df0bff72a35b3b8acbfe976eaa8215f2fcfc031f94ccf": { "720c459362ca150d27eb7701d7e48ce41817e1142bf4ebb8b4e2a87705715ada": { "ru": { - "updatedAt": "2025-12-02T22:57:44.621Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "1b4e6cda6189456452254857006b4403498ef4b666c02b1af1328a0022cc1cac" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.628Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "4b6198ef3035f486e26a5153cdbf0c1f9f3fe99a2e51dde5e6b6512898a6806a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.629Z" + "updatedAt": "2025-12-04T20:16:57.034Z", + "postProcessHash": "6767b83b8b66f340b28a7000993c89ea59d8a039e5bcc60676b2e2de3a5bb9c3" } } }, "503329b0d4a76ca6bed899e9672f8b552300a0c87af309f4216ae734b9861fd2": { "675e12d63a5beef8dc9c071b80bc5249b9dc320e87ed8e63ab1dba75742d1c49": { "zh": { - "updatedAt": "2025-12-02T22:57:44.622Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "b704c606b356f3a171fba70c6ce71bc5c099843b54d30e01dd00ac95741bdb85" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.625Z" + "updatedAt": "2025-12-04T20:16:57.022Z", + "postProcessHash": "2d93877d4177e77fb15d317a85a027c58ac401aa16a3a0e0d36b8d7086cb3b83" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.624Z" + "updatedAt": "2025-12-04T20:16:57.022Z", + "postProcessHash": "5201ebeebeb0f38fad123dec53571d050f556a411b01764acc191901d2166595" } } }, "90f0e15a1b59f060a6f0f952d87af6522508eab261e93dd1ff9d2f135297bc7b": { "b323a03a283828a8dd2bdb1310eabc167e779d51e7e53bc928a0c3475022c6ed": { "zh": { - "updatedAt": "2025-12-02T22:57:44.667Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "af197fd421aad318b5acfc40e94f1353338a16286ce2b7940867b5014bebc20c" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.672Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "ecdd44f368cab2d6b3163104185a7ec046bc8e500863629786ec0844ac478849" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.668Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "5ebd0ae88693b6ace149c505ce402968900d3041055c87ce07b329fb941a2dcf" } } }, "e1777c4c468ab2516b850e57b6f6bc5a611e182371ea737b4494074aa581da40": { "c93f95ca1da1b0eee11a33d644aec21a8b55b826129592b9eba161908812b369": { "ru": { - "updatedAt": "2025-12-02T22:57:44.665Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "836ee4e7f66847d750737872d9d4a59991e855597fbc8e3bd71bf78abec1c59b" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.523Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "a9b5953af0eb576e8ccd31667226ac22e5dc58fbf9a8b1ffc47658b1f929d2b6" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.545Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "4ad47bfdc7bcdbcdece3681127236bbd12268bb44b79e46724f82eeecd344f11" } } }, "64e0092d1db56a02e6a5bca8f0b5056cf1f521390ec3925bb3e50df81aa7ac85": { "9a5dd87bf7b220294da0bc415b255ea64029a767c79b1e6a895b5d3d57801055": { "zh": { - "updatedAt": "2025-12-02T22:57:28.519Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "6705213bb38bb5cebc1c31867a4fb1a8ac67499ddc647d49a75b98258dea0828" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.521Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "c0424f2e2a53b60e9ff3116e0f534d15b1c7ce7d255c701b96fcdcec4890c2c6" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.522Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "14f815cddbeb52ab3431dc67df5448f3427371ee5a0030625cea835f6ec3961f" } } }, "d012409948884982e8bdf1e450327b34af2546383469b4fd132b635459e6f305": { "95aa9403608d32399c22cc7fc263d9ab30a605eea3844947170400f89d7e71d1": { "ru": { - "updatedAt": "2025-12-02T22:57:28.525Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "2943af782b90e30c378c93d5a5f7a78c15d59e0ac6a8c5a8411ed22d9a3d56bc" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.519Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "f27c0c2cbf8ea5537615adc29303017de4f60a5a4e9ace846574deb87dd3e36b" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.665Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "565173b9963589c92d99543cdde600cc8e43f420fe4f56d8f9bf2208312684d9" } } }, "00174dfb396f321fadf5749558a865565bf4dae8cc5d6fa8f305ef68a7f1c6b2": { "d2f79ac832b7a2d7aaa410633fb001b9e95f4660cc65da2bdbe34ab52df0894a": { "ru": { - "updatedAt": "2025-12-02T22:57:28.527Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "a1273037a41f448e543a49cc6b332dcdbdaa76e4508491ee3da2a5c2167f6c54" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.663Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "3884ace469a387c8b6ac18e67d74191724952ebc944ff8743ce8561ae5b26bc4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.667Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "695036bf7e01eeee7d215530e63fc8ddaacffc258d6ea3f9d4f62d2125600d51" } } }, "774db99efcf187fd85ea22f0f07cfb6cf5fb6cc68251b2913b976e914e74a951": { "cc59400f1e7b6cc7c2ce5902dae7bd2a641bff181193f2f3f16b2cc24b094add": { "zh": { - "updatedAt": "2025-12-02T22:57:44.626Z" + "updatedAt": "2025-12-04T20:16:57.028Z", + "postProcessHash": "8906063e9e65110a81c13e0b9c9d2e387932429614545c441dfd986b3d5e3ebe" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.632Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "27ee50bd3c1244c541a78248e2ae5bba4ed1718c59c5eae4a9500df066c0aefa" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.621Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "7021bcca7d62ee663124e800241dbcb400244493f00cc139410d912657fc36d2" } } }, "4da7a2a8dcc0e8244d17285e749f8d2f66e7c939010b06d93f9506b5e0443395": { "5d4659d3e6e8c514f951b33a0e387bbd5340061d0fa6ede0b8d63a27a889570a": { "zh": { - "updatedAt": "2025-12-02T22:57:28.528Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "4f6e6c2465dcb5e419cfb4490536a5e632782fa47282d656d2cc41f1a0173de0" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.535Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "af43641fbb074d300c972b709809ed688e65ec09542fe184fd5297f1a0367746" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.666Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "7c5b8ba9f23aaf091dc52813bddf19fbfb93d209eee0c1e6613c74cd0a30f883" } } }, "8a9dc951991e7089ccd4e1eedd2df9ce190a4888a63408845057666bec28693d": { "3ea6e01fdab2aaecd5561d6a3738320c4c955d0937ec5157cb9ac2e69e3fa30b": { "ru": { - "updatedAt": "2025-12-02T22:57:12.827Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "525b1b5904bd3a4f1ede52fea7480e84df3a895b478e17d8501531e38f4ba0bb" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.829Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "1bd517214395f0da20913a0edf5248fdcdd6177ded8d65d15996da8f3678e9e4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.565Z" + "updatedAt": "2025-12-04T20:16:57.069Z", + "postProcessHash": "5ac84e9d462918541c5dda231eb1f52b194d0965af30acd1b0f408126ccbc350" } } }, "e0416bafda40f9b0abd3190774a6d8b8b6fecab49f9676913bac6e5e053b382e": { "aa3e533069b101ec06bf29cb5c1935709f54b0a36858f4636f093f238b277647": { "zh": { - "updatedAt": "2025-12-02T22:57:44.651Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "6654fea34bc9331452f1f41f95104e4eed60a2895b61576c1e18c6de058c5261" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.645Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "7b6402f41ca1c97095c4db7170bc95383e5618534977b9876f767883e3da27c9" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.661Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "85db98062773c3ced6f8d4c9d920d46b6af70c01ebfc2f063b2897538f311ce0" } } }, "6bec8fb9d627bbc8d58479b40c1ff2e2105bf84d0574e514ce2d4a909b35d280": { "9892fa9d4ee47152dab0a70403163228e13146e378a484ac01ec35395c96a186": { "zh": { - "updatedAt": "2025-12-02T22:57:12.853Z" + "updatedAt": "2025-12-04T20:16:57.081Z", + "postProcessHash": "3f97f5ce3a5135a571368b8ec9dfc44acfe51f8829295ae38a0216ebe2c79852" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.843Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "340ef0ab5180da2ea399e1184659d15f4d800815ffcf1760699f636c136b4d75" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.842Z" + "updatedAt": "2025-12-04T20:16:57.083Z", + "postProcessHash": "6b47d544d572cac89965bbe6eca8b89de878792837d01762c3545866eac862a7" } } }, "d600a99ead8b0977fbdf31462c610327f9207f07a47047e4cfafebac76ac6789": { "ba98a569e23d5a0b5a2bee157907242c18d05d010d12a96d4526528db77500b5": { "zh": { - "updatedAt": "2025-12-02T22:57:44.626Z" + "updatedAt": "2025-12-04T20:16:57.028Z", + "postProcessHash": "b440aba6aedb14d8b49197e5b73a22be15fb4100ffaa22b66edc8fe9700a50b3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.628Z" + "updatedAt": "2025-12-04T20:16:57.033Z", + "postProcessHash": "78385117d611fbb9ccc8936ca1a5e91fadc350a437c5e6213143383b1d4681c8" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.649Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "79f0ac36b636ffa27a801c3b37c5a05877df71e945e61ffe2075b52802e4c1a4" } } }, "6c4d95e5c9add2129eec07c7db776b15731e42064678712cecf1b19d27e9fe1e": { "26bab87ac6555b58f09e971a206121597dc934bf1607e0bc1d1c1ca74b3c8ab5": { "zh": { - "updatedAt": "2025-12-02T22:57:44.626Z" + "updatedAt": "2025-12-04T20:16:57.029Z", + "postProcessHash": "03b3c36f8339c6f2fdba3e82b24c140e8cbf1e911121e4e0b6eaecae31c9b1b8" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.620Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "52821f095c2415db10c2dca47b1c3996708e19d4220cf820c1348f82239305b9" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.628Z" + "updatedAt": "2025-12-04T20:16:57.033Z", + "postProcessHash": "2bffbbf8c4e580a60031e783c9b80a917217cdf1b1300cb72155bfaf975d9ab6" } } }, "c97c8d3fc1255144232e48ef1068845cf9a505bf268924eb00d02e4a764b06d4": { "cbf44b30af8d393437b434943a6b72c84ddfbb0c5021ffa6ee01fcee470fce64": { "zh": { - "updatedAt": "2025-12-02T22:57:44.603Z" + "updatedAt": "2025-12-04T20:16:57.031Z", + "postProcessHash": "16318728cd79e90b5a7be57591fab378a0bbd83a947052bc7c5f0b5745651673" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.601Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "d070a83781226d5f96f72ae5d7f9ce826325e7c75aa5e539ad4d19e1060d1adb" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.602Z" + "updatedAt": "2025-12-04T20:16:57.028Z", + "postProcessHash": "d337dec79e7617affc078de5ea0ae84861cb8eb5ad6098536fd3a0f593bca97c" } } }, "aa965228754f809fd54c3e57e8b77d0a2e9c4a048e0e68cef7ae8c333114457a": { "f9ce484d23646e185c37dd955d8f8211aaac0ff9716bb25cc7a6c1dfc7722732": { "zh": { - "updatedAt": "2025-12-02T22:57:44.652Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "9503c3d1f69c8825baef824f1528bc0bb2b3e8dfe0478382f2da8df3ebc509e0" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.659Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "b8d1cb08f2dcc58a31109d5a9acb65feb9decfaba3a792cccacf273b77ee7607" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.642Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "65c95b387a59ecae0ba7efbb103fbd9defdbc7fa7980b825d124594c9ae80945" } } }, "db4a603afaa721633684ab401b86356ad8252b3e4987d3d7f3a1c55750046ef3": { "c71c72e22f263d7e5cb4b6bc6151025b50d1a6999e50ff20143e7d9570eab7e8": { "zh": { - "updatedAt": "2025-12-02T22:57:44.652Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "f080705bab0cf6c730ca1b98f4b2d1cace5f5ea774a545034c4b22cd807fb41f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.654Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "a83f89a51480901146348052c7bf4d86bb0100e769064d0a7f0470f84f68a630" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.655Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "00ef1cff36e55b37922eb94720f2f12a81e849ca99532e0c56a261d4bff40328" } } }, "cde7d435635c91652b2386bf1619cb7ad40c1a75333e02d9abeca5d374b5fcd2": { "7ed8aea2f22f07b5e6da1bc31a668115f599b57278bd5f78ed4d027851ee59f9": { "ru": { - "updatedAt": "2025-12-02T22:57:44.653Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "3cb9d9cb5d5691f81f47d7d526def332c2ec3c80d1d18f50bd6f6cf123863c2b" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.640Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "4d6b555a5612f38c948aded9f09f1833c7f83d0f16535d41fac77b625d50250c" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.646Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "8ce0421aebfb1435ed08e9a1ca65eb5d3a54babb7d02039c007557f3898925a0" } } }, "baa5800841c33574a763c76d84029b7167e28cd0e383b549d3c87bdde30230b1": { "4e66ec48e4681668b3829e07df4225df08079780a33326c20145dbd63d2cf115": { "ru": { - "updatedAt": "2025-12-02T22:57:12.828Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "a3e1431145e4820cfeac27708b19095df00f2180627147668b67ba010acc53cb" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.560Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "4b44f18b0ddedb74045f61c47a88c174bb7714e89989b92f29093af11ae54d97" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.561Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "928622a94290e5f9b1135290c7e7ecfecec4a052cf03ce15d427f01e1bdebea1" } } }, "6999f92f0023fe1dd1e922ddaaf1df722f316e49e43a1f46219683d3add8c812": { "9280cf92c0f64187017d3e623d9d06cf5122c9cca98da66abea3317bbf634e3b": { "zh": { - "updatedAt": "2025-12-02T22:57:28.530Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "6ff97b094763880e16d5d718c47c75784ae23f08beb66d4d12f81058fb0dd861" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.663Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "897b020f973f736f9193a6c129f226d928b1ecb6e212f973125498e8e7cf40cc" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.529Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "3d36090e5d2c4b45b214577349e9f255fec1b189eed042a130f6ec2b593e4a44" } } }, "534c97b1abac407b7ffecba5c237d20ca3ad4c270a44ed90b44e77de585a610d": { "7ba7deb86c597b598ca684677abf36c48f1d224dfbe3c8465bb1e2b40a280f81": { "ru": { - "updatedAt": "2025-12-02T22:57:44.604Z" + "updatedAt": "2025-12-04T20:16:57.032Z", + "postProcessHash": "302200241db8ec029ce1fc1be1327078a42f95f7dc85e57fab7aae64a6f4e229" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.604Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "7d132f8ed0294f97dc3f86f7fa91d2d4082704018343dbfbc7ca190d18431acc" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.608Z" + "updatedAt": "2025-12-04T20:16:57.044Z", + "postProcessHash": "13f52d5e4ad688267a5106fc924dbc7eaff735fdb1c1e8666f68d4f6414883ea" } } }, "2c9a0b8bcdb6bc350cecc5d8a2a8571d9ab75452db549ce31e1fdb37159adb97": { "a30a7c92ea08285c067ff0984feefbb3785f4b1f14d7715bfc668fb4bbc9261f": { "ru": { - "updatedAt": "2025-12-02T22:57:44.627Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "163ee88c0791f2cd745adeb64a5fce934748418d31dff380d5d842e49f43ed6d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.630Z" + "updatedAt": "2025-12-04T20:16:57.036Z", + "postProcessHash": "309f6768c941e49d6bd873adbb734378676147513e83acad9f3362887e026322" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.627Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "3282d25f8c692eda0d520714d586580287fdf9d60bce6a95efe8f86598642dbe" } } }, "89191e0f0f3ac7ad6fcbe90e008723be94527b1dc5730c24b0ef28b7567b621a": { "db61043ee1c3c508cdf7d9dd474714bef6965ab628e609c3b20ddf986ef02cc9": { "zh": { - "updatedAt": "2025-12-02T22:57:44.654Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "3a60ec5c6f32e2ed28830463ddf8cb36ca67363c82cbdcf1c20a922abb8a666f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.660Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "bd104930afdc23bbdef920b1961eefa1416d8526ddbbed7763859b2e49ca7224" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.661Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "1ca59c0d5b9e148aa49b1b71892dd8f7756410aada66cac7c4e4325108c62d79" } } }, "e9514b207fd2f0999e54604bcc5f81ff6fdaee6511cc23ec24b5e33bcbd7a748": { "9824c5507b882758b8df0cd7ac8ec6f8ec745839288f88d8cad0156e2ed55258": { "zh": { - "updatedAt": "2025-12-02T22:57:44.654Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "99e9cd074d76d74d290b9d6d4126a078557e51309a89f6103debf219016fdc92" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.643Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "12b5bec61f8b4c4ea8e3add4e50e71502d03e77656bb8692b2611015ffc5728d" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.644Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "0dbbed0b0696605e7e49845d0710f792f6ab4dd5a146d59be9dde9a2caac8fff" } } }, "bb75403cac8908b2d1e0f7435d3c432ee901f13dfdca991fb73204120a85338c": { "0a7663696896ca536cf8c5b6b0059cce8944689bcec816f2b5c5b41720cbd804": { "zh": { - "updatedAt": "2025-12-02T22:57:28.531Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "bb171f436c182f20fa87920fbd5526429a924f8fb07d16208b586fb167ddf36f" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.537Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "db1b290173ecf75bb6219030a729ffc935b53aee3ce33ec9ab8cdb7d3642a673" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.521Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "9e5201f807825a3dbd9bc4122798d7fce0d6cbe7a829d0f0c5332919796d5c2f" } } }, "c66448d10c048389547620c0efc34decc72e9f80bc32daa2c49d957e3c02fa1b": { "1f29d5a37e6fed39b5f9602645e28d9fa470dce74a39a6c598dbd0a16867a37c": { "ru": { - "updatedAt": "2025-12-02T22:57:28.532Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "fb65084f202689f4ee34716a2315054086519aa0cbfe0c514ca7d1c7017d83a1" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.523Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "1af7c5cd828772a70e5756398de03007e47190f46d1528189efceb2618fdec6c" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.528Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "550c171f01618c3f88ebee516dd1c84e88a03ab69928b3ce2e5c8ad50661a6ac" } } }, "7a098dff053dea397b97871863eca7199375f5d95f819134c433310d813f3ae4": { "ea322771a5ea71a865948471da4a31d3c932f43e7f418fbd44d17ba4dd564761": { "zh": { - "updatedAt": "2025-12-02T22:57:44.605Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "cf9d893544817147e146efc1771b9a1ca0f0e5eeba0d457eaa5484318fce0877" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.607Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "1fb2f666517a50ad85b57c6756b3c2fbcbe69aab1e666c39312a266b641a761b" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.606Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "71f525fa17cfbbf6538717b0e1d2b1e002b8322e2cebf89397fa71892eb86fc6" } } }, "a36c558e3cc8eb2a3b03c01a4286bfac9d72237977464d90e7395a10cf2209e0": { "94ce7d6626e94f915dc3f8c3c80748074f7c1a750f5800beccd7406817b5d19f": { "zh": { - "updatedAt": "2025-12-02T22:57:44.655Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "03874ad730b8ccc983836110b6604480a9737c1837b6bfeb35232bd6b437a31f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.660Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "5c669066a06859611730bd1e474aa246bd1ab5c3a3af81808bfb645c65afd830" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.653Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "1d1e815a48422873ec06758a996fd1e2361d7667443f494ffbe8db3e0bcffb89" } } }, "68ae98d78891d0611568e05de511ec72306b7b3511df399280a7ae2c79b3ee06": { "33c7517467d660435f217ea64c4bf7d1325b67636ba929b3ced122cbffac2355": { "zh": { - "updatedAt": "2025-12-02T22:57:44.574Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "4594edfe86ecb060298ba8335788c3640098f88f47dc0c9d395ae171a6e962bc" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.577Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "ebab7dd27fca2e09d7ac04a138a5490bc5a9f4535402f122012e1ba7d066bd56" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.576Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "82cdc6d922cd3ae6acaca325d604fb683f0de235fdf3bdff892a7bbb0b5b4d4b" } } }, "561284460b1fb2a4c59ce07e83be4fee1a8ff052b66a64ff66141a296715102c": { "30382cd05cdfc447ce68389ab117d0b72fb4faf154b6c67bed6c57d0ed565d98": { "ru": { - "updatedAt": "2025-12-02T22:57:28.534Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "c2ec88bbed1ffe486869335e72b7c26cc9a05fffcbb098a5636659b0fbb4cd3d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.538Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "78225e046c02cb6715aac32fb41f664e99a36fecbf97f4ddfddc520a8f888b11" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.545Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "2c9a0809aab254d80deefe22e1f422c665e84c1b9eaa75cba91d857bd0ea9f7c" } } }, "8b014d0b3ce023d8c15fd8c5eb2d350cacf9cf3c41dd4b69ff25dd2351d35db0": { "891d96677ae497189e4ef48d65804e3b886d35381aa01b9dd409f5c32ee066aa": { "ru": { - "updatedAt": "2025-12-02T22:57:28.535Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "db2275d81a8a3a743c52a25b99297136dd54e81cb112c2ea658fc6e0c235bf02" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.544Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "88d16b7eeb4ba7bd09d8419a2806607334aecccb9ff20174da139fb528ed8fd4" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.536Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "1b90d5674bc43919be31dc5a42d57be339a0e221000bd70017b3245af84d6d20" } } }, "82fa28546b5677c4d98f580e1e222959c159ae3d9905e0932fbfebe2ebde8218": { "5207e407e3f1eccc511c0aaa51164bd35e4d15543e26e8e004002a81d42f5b90": { "ru": { - "updatedAt": "2025-12-02T22:57:44.605Z" + "updatedAt": "2025-12-04T20:16:57.033Z", + "postProcessHash": "0e6d9fafac4b1c04ee98d47978a9852b82f14b5d9e48bebb383a365fbeaa02c3" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.606Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "25b9e10658488ec6e14643aeba99834cce0e847f4e40dc8e1b42ce8a48ae56e9" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.608Z" + "updatedAt": "2025-12-04T20:16:57.044Z", + "postProcessHash": "66cbadbb1d85cdc4ed7875ed010ca5639fd440cf4fe3522bed349a999756c542" } } }, "9e8c51287c7f24817ec004d3002c1ce3b305ec30df1100b9d028e5ebc63461bd": { "afbdd8bf1a036d21dd54275c5ec03df46552510b37adf7a05917d6570967651d": { "ru": { - "updatedAt": "2025-12-02T22:57:44.629Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "83d19586289b3184132966e6d4f19dcaf3f6aeaa704c626db16cbdf9180edf07" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.638Z" + "updatedAt": "2025-12-04T20:16:57.045Z", + "postProcessHash": "0a0cbf386d1f932aaf70ca4512a0316ec965267f6804e06dc00ec997aacfc6b0" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.636Z" + "updatedAt": "2025-12-04T20:16:57.042Z", + "postProcessHash": "7c4abcff6123195246443d21f5e8de84806bb2d8cacf71b69764beddd8d931f7" } } }, "2e86bca26b2ac693c6c25e4a60919c546b7872ba88d487d37cba83528dd4c1c0": { "82625a723fba7e62c237b3557661bd75bff3e41b4de031a888fc315f70bf8f60": { "ru": { - "updatedAt": "2025-12-02T22:57:44.656Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "0c74ef48906eda3d722896d96351cb0202b348bff5692813cfc602f6434924a8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.644Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "c94e329d96a4bc5e1fa15e93b3a0908e0f47b06633a44b0c917f3ef8cfdae6ab" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.660Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "ff110cd5a0a09f204d9600a9952215dc751b5fa312144864f47bd764d03cce1d" } } }, "03f4f6675beb54a72bd7e3d62bec8c07f1c24ef51dcd84e88ba10e86e3a5a9b7": { "eb1beb44798239cd7a4b527f6d7acf65bd7638560f8fda08cbea63789789cbab": { "ru": { - "updatedAt": "2025-12-02T22:57:44.630Z" + "updatedAt": "2025-12-04T20:16:57.034Z", + "postProcessHash": "d4e1d3f3a88823acc2cbaffc89f728261f36b891ff7328529017d519f4eef950" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.624Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "5047f562d1ddee3f7676e832e3fc524b46e90aa608de48b80f0c6c3fcdf7926a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.622Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "c0c69bd145c09734a0bc829c2ee4cf005b3390000d72d973e89699a1b4d43a1d" } } }, "32ffa87be40ab5f31e20d44d8997706429f8284873cee16bf953aa7c8a533e87": { "987df6e0573b5dadab1d721fb8b42546edd8a72a4c4ef547c90da774cfdc0384": { "zh": { - "updatedAt": "2025-12-02T22:57:44.656Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "4706ac0730d08f6f1ab820b3dd23f714ceac70570c4ddbee03d9e03c2454e913" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.649Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "c1c4898801d8e733cb618ea11acb6b5304ecd7aebbf2e3ceaed32c48cadcbaec" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.651Z" + "updatedAt": "2025-12-04T20:16:57.055Z", + "postProcessHash": "9c6d049c1e5b458e861d7ea8cbb6cf42e379708db3305aca6458273bc6c6c9cf" } } }, "a2e55a90379e6ffc005d5cc760c9bf50e3a6631ad77cd354c2d442860ad851ea": { "a0801c6bb244ad72c6b1b26969b590462545f49f3c2c06d4078fe79f62be5841": { "ru": { - "updatedAt": "2025-12-02T22:57:44.630Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "a7421101de676d8725f1e9a90719830f048cf7c4f55bba40e976fce06b8a79fc" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.633Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "4f90b32df9ea27d54ae0bfbfbb7d26993d436a1d5d3d5f3eee31df9ce95ceaf6" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.626Z" + "updatedAt": "2025-12-04T20:16:57.027Z", + "postProcessHash": "b9baea647e814630f53f82c26636515f4cd26d5922935bb2ef9c526c09add42d" } } }, "9d795e71c1e5b5159a55b2c1f0aef5b2b5ba275de3636e7962e76d9cac324863": { "e14d02d5377204ff07364b01b4777caa9edee903a191d54d14cd619978c349a5": { "ru": { - "updatedAt": "2025-12-02T22:57:44.631Z" + "updatedAt": "2025-12-04T20:16:57.036Z", + "postProcessHash": "2279678f1560b47713bfc1034590c8ee60d992a32bae2cdff103f6d896c36d61" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.634Z" + "updatedAt": "2025-12-04T20:16:57.038Z", + "postProcessHash": "2b1903bd41c7b9784cd52bf624f62a29c5b76b0e21c8fafc9759546102fa7c90" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.638Z" + "updatedAt": "2025-12-04T20:16:57.048Z", + "postProcessHash": "f23ccfee69eaad1203b82c7da68c9b04735c08f67313ff56801750938b33e103" } } }, "459dcfc8cfcb0c798eda34051037eaf36f0e8bdbf413d5ca0f86faf6d1ae4e24": { "f469d58719f2670441a26ddce21a692caf6821dcb698ad90eba442b062adb5aa": { "ru": { - "updatedAt": "2025-12-02T22:57:44.658Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "ff040cd9fe24236f09181c6e594796a5034f5e98af4de461a9878c4ae1381a30" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.646Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "da574e6ef07be0f3b7b069fb682901bfc2807ab85e8cb83074e2b232b131c843" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.655Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "6b42ca4edbef3cdb1f388d141f95a14eb620e4391540825aeb078d8904beb08d" } } }, "6aaffe8268bf79e8f4faf87000cd0de5d6453e5299b00696c4cf31cfb8d96d5b": { "ddba7dec037a2fad87464c00483c81011ad76357b5c4963561b6fb33a626d74e": { "zh": { - "updatedAt": "2025-12-02T22:57:44.582Z" + "updatedAt": "2025-12-04T20:16:57.011Z", + "postProcessHash": "1da093c7a1809b339d48a2c25456bfd63287b0439f38cb355bba81199fb9315b" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.581Z" + "updatedAt": "2025-12-04T20:16:57.010Z", + "postProcessHash": "7e5b2190eced9ea67d4376578b68958e7363832170b7ddc87109c4bc739b1a09" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.576Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "083a96f41054b505ca0e0b8d8415c28d3907966b1a707aaa4e11f76b11aceaa6" } } }, "299acd2896dbdcc7fc9ec56b51b4a1990b56dd0fe41acb3e57f9cae1bd915ac7": { "99ca8337276f2850a682286f3aa13f69597377997f305892b1182845150c4e2e": { "zh": { - "updatedAt": "2025-12-02T22:57:12.831Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "ef3949259dc24e21fb79e3802c4eea5274192430dfbeb2dfdb7f019999a7cfa5" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.560Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "cdd3554a34db824fba17a52aaaf6976ad99f2df2f092f67a98a24f68d9efa182" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.551Z" + "updatedAt": "2025-12-04T20:16:57.064Z", + "postProcessHash": "0e310b0ef2e824e076993166fa73b95de34030cd34c0eb690fa3ee7bf3f890ea" } } }, "3246877b14617a738e90832de040052391f7c8fc094ca44b2455eef30fbf314e": { "d6d3906022ccc3319721785ef9aa9f57093fc737336e72eddec0d952f2c844d7": { "zh": { - "updatedAt": "2025-12-02T22:57:44.674Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "0eb77c505f7837923faa463f4268dc3c00e3ca7cc1f11780fc2c3fd6713a825d" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.675Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "ef5b34e961c19a6018f60d58cdafb465c5d9c4108984679e3af00c764bbc5642" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.868Z" + "updatedAt": "2025-12-04T20:16:57.078Z", + "postProcessHash": "0c3ad89659eaf31e2f42216e60abe0e45e8a3e180a193469c59f20da91122c51" } } }, "3635e79a2e76bb297d10c5dd4637f4fd94275c1ba1081c959a4f02a8d8049bf6": { "69cff4cb3337c445b437475f175d0c1ab8c863e57aa050035a2284326ea56533": { "zh": { - "updatedAt": "2025-12-02T22:57:44.664Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "f22406ca08031b6a3394f44a875745cd9ec929db1612c0396b877c977f95d487" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.522Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "a885628c01b5094ed2e356525b3ea3994fdae390b171b6ab920331cad8ff971f" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.537Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "e1e174150adfaf8e8721b473f6dad372592fef8313bb8a881ffd04a62701f46c" } } }, "c3ae4d87db64f55d260d37bff7580e0a1ff638a6c1bebc984889a0f53e882bd1": { "c8ec9fc9c8400c3e7fc2098760f4d554623fe5eaab093ad69821218853b4e3b8": { "ru": { - "updatedAt": "2025-12-02T22:57:44.620Z" + "updatedAt": "2025-12-04T20:16:57.021Z", + "postProcessHash": "e274ae697a0536ab47490a0e2a9d6feeb6ee0d10126b80b4b4e57679f84bff97" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.635Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "3a7e99e7ece61d7d9df531889a6b6d11f05f85d73edcca2f254ca4e2be75e26a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.635Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "d4a109f15fe81e5bb6cdd804ea6c63f86c09823965630a614d3278e0698b12e5" } } }, "7974b5d9fc7e953fa4aecd07c2f6f9176f90a9a89310ebe7fcb27dff7fdf734a": { "b66740bd12022ccefeb425eba94ee09c08528b3a5b347793bb597e953e4f21b2": { "ru": { - "updatedAt": "2025-12-02T22:57:44.664Z" + "updatedAt": "2025-12-04T20:16:57.051Z", + "postProcessHash": "4fb540261ff285b927f800bb66e4a283bb2310aa16009e7c772a4fe0f13bbae2" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.533Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "fa25483e2e6957cb19a580f845a22206763bc0daccca7617ecc4517a8cc7e375" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.530Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "ad3f47fa1324975d1fc0061e3b6e88c57ecf19055f9e0f3e1829841545129dbc" } } }, @@ -24738,494 +30175,608 @@ }, "83c1d1eeea98d87ba3da3231d2806cc372567257839dc62fd43ae5ea90f39c3c": { "zh": { - "updatedAt": "2025-12-02T22:57:44.616Z" + "updatedAt": "2025-12-04T20:16:57.011Z", + "postProcessHash": "4be7b915d531026bac17cc0c58f374b06623b727697945f53b908e6ab6b34918" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.616Z" + "updatedAt": "2025-12-04T20:16:57.011Z", + "postProcessHash": "342fc35f8b80414635b94f25378ec526b7e2779a24172d6c5e83301a76afd6cd" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.616Z" + "updatedAt": "2025-12-04T20:16:57.012Z", + "postProcessHash": "4b6981e212a7a3ce487c1b7ba441a5477ac0d0dd3d2a78d2ce6bd992e6585d5c" } } }, "51f9cca65edfee082630f0b1fb8e3a29f4ab177d7d5452a9abc2e1f9b56e3c53": { "96fa3e43effb19ba6584f2d1ae472b68548bb3a136e72cc23135e36bd3bd7b5a": { "zh": { - "updatedAt": "2025-12-02T22:57:12.830Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "8c88ff5a375c1ceeffa3f48165c097d6293b3928a22952eaf81e32b21c7c9dc8" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.831Z" + "updatedAt": "2025-12-04T20:16:57.082Z", + "postProcessHash": "e1e4ef61478d32dbc33d8384fe98356d1e9b78dc34b7451bc1ef490a95103b18" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.561Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "48f4ac19ee29aef219cd098515d1ad16b0def9da6a95c000f705025a34a8e224" } } }, "0f09f5442f4b4bac183a39fe7c4ebb5f27e3e93b8fbdd22c1bf04db43e598523": { "8dd4d3197218cd45163cf27ba0c5e57b39a8db91e1ae9ccb34b1ee6871418db0": { "ru": { - "updatedAt": "2025-12-02T22:57:44.605Z" + "updatedAt": "2025-12-04T20:16:57.035Z", + "postProcessHash": "3353fd85956bc7ce0c103255a234f7a6dea763aade2040db6e1b6280745d41c6" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.607Z" + "updatedAt": "2025-12-04T20:16:57.042Z", + "postProcessHash": "9a31f945da5957be59092f4b6a3da06aff530461ae6e473a849ae1899830392a" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.609Z" + "updatedAt": "2025-12-04T20:16:57.044Z", + "postProcessHash": "2855a909b86f3e628bbef82144b20cf1e1330bbf64e71b377402e8a44159458e" } } }, "04bd894d54eb7791d6c20efe6b82643d60ba5f94079895df60cd832a967a8b72": { "b4b191db3e0a1686174b935b4a408eec87a5d10accead9bfce53f6fdb0c78147": { "ru": { - "updatedAt": "2025-12-02T22:57:44.631Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "ea8955a39b360c5789c530c3b03d98948318d928fbf66f5dee4500c82e04d74c" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.632Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "a9a3abe3c958caaa8bcbf17296846516ff0bddb242f35491e4949add096d9555" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.632Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "b88a666d313b0230b7cfba55682a3baf805cfa102aec28c09fe178c1094f4a31" } } }, "ee7cf7082c51841ba27fc19b990495b38b92128a79d2a323ecbca6bb723f0e8e": { "7deda54447cba9acce76845c952c2c7f4ee86488c276f4a335c96e4c55dc6bcd": { "ru": { - "updatedAt": "2025-12-02T22:57:44.657Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "00c3737a47fe87076367c5f47d9902ed50003b8ca68e540b557b4ae3dc6bacbe" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.659Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "9be2b5c0b3aa8227805010464eed85f607305f134224b9c9f01260fde9c24152" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.658Z" + "updatedAt": "2025-12-04T20:16:57.058Z", + "postProcessHash": "fda4b48e08eead05bcf991f16a558e29fc34cff1ef0f1f86ba18974e19fb7119" } } }, "cbfa6856b07360063ce643d5dc0c1d3cc2418e2639de759af00c6f665fc517e4": { "0140ef2e17d32f74a3b543e6327533884c8025b049e9fdc7af2a729378577a5e": { "ru": { - "updatedAt": "2025-12-02T22:57:44.657Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "14ca6cdc3fbc019800d1ab2b245d5f75cfa28484e93aeb91e9a82682a7f0f194" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.658Z" + "updatedAt": "2025-12-04T20:16:57.057Z", + "postProcessHash": "6aa71ec4bfb64ac6141da1d33875df04ac54b1fc3ac904c98504f035d296bc4d" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.661Z" + "updatedAt": "2025-12-04T20:16:57.059Z", + "postProcessHash": "08b00891741a87bd19db685cd391471622da0a3b2abbe958d09f345186bd2947" } } }, "c91f782ae583639337bdc49114576cfdd9c9355b699a68919bf1bd023713faef": { "bec2f91a18ab29d790a84a8d99cfc87824936240769c4e0889827b57e2472e09": { "zh": { - "updatedAt": "2025-12-02T22:57:12.857Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "96472858f38cb719004a2b08f4486e9d1249be74b467d9efbf04ff6eeac79ca5" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.845Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "60e3bb97e6840743363aa52c613a740f152ed9859f31336eec9841ab6a5f350c" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.842Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "bce25c2d42faafc7a5ddf46e0bc226db46499d2f2b49ff7faf8865925997ecf1" } } }, "abdc65a73d328d0f6587eba73db81db937a7f67106eeb840b67ebf52e35e6379": { "3d443c4abc73eddf8e334725cfa0abf5cbeb70f4475566a8d40953e253b629bc": { "zh": { - "updatedAt": "2025-12-02T22:57:44.636Z" + "updatedAt": "2025-12-04T20:16:57.043Z", + "postProcessHash": "6e8c387b0796e22e05587e69f740cff2b1340c55bc22aeab98aea2dfd82f39f9" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.636Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "2e7fb04929626238e46e7478f004ee07245271484452287ecc0a24fb33d66526" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.634Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "21ffa1a2a97ca3e11a0eeb8796e4f9af209867a8691e79d4055e79f3fcf17e4e" } } }, "6f7b91f9de26806b740498adc5e643a9126c17702c3c29691e1666087c366cf0": { "a1903aea52e9e31c6386a9cb6e37a8b774a6be1ff6724d1c7674a90cee7e9059": { "ru": { - "updatedAt": "2025-12-02T22:57:44.637Z" + "updatedAt": "2025-12-04T20:16:57.044Z", + "postProcessHash": "63e323b7a9cdc92f8061142ab94aecdf1ce358bc8e4e05e385256a1fda422a35" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.637Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "308f6f80a5c3e319de9fcc7671b550441f7ca5755c92e7238cc24bdcbd2c6eeb" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.633Z" + "updatedAt": "2025-12-04T20:16:57.038Z", + "postProcessHash": "1577fddb467308eec116f91adb6256ce793e2ebd7b92dc8129729789c7119677" } } }, "fae1576558dadb0c932329389ce8fbcbeee0d35379cb6c996673cd93aad35a13": { "3c3975cd182172060059f7637ba3d00c8b28a90dce27de128e912a0c986041da": { "ru": { - "updatedAt": "2025-12-02T22:57:28.545Z" + "updatedAt": "2025-12-04T20:16:57.072Z", + "postProcessHash": "614cb17c66181d2920e36c452dc405016e814644c75aacb604bf1c252b4301f8" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.546Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "93c9299c18f94784625059e2cb2f1ba46790078c405a3a2a63ab9069a9c9b633" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.537Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "08591097cde9ae41f9e8b63b28285e0acc00a05857d8f8aae827612cb334958d" } } }, "3f14c9de32cc2309c896fed678c5b28a7dbf39af5a00bc45e0fd013b9c4d05d5": { "30c6636556ee6c7c353538457f6b3b57a9f5c21c15e651b2997b487922e38fc3": { "ru": { - "updatedAt": "2025-12-02T22:57:44.675Z" + "updatedAt": "2025-12-04T20:16:57.090Z", + "postProcessHash": "834ec5a0adcb1a1341efe7d9314b190b7a42687672519a15428962cc2960eec8" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.670Z" + "updatedAt": "2025-12-04T20:16:57.088Z", + "postProcessHash": "74c57d7e0b7dd734f111d295a1e656005038dbc7f20581049e3874dcc590218f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.674Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "4b2b33173acdf7ee3fed1eac99bba3cd097ed32b6ea6f75f77273ce38c1014a4" } } }, "6bed7e7a83ecb81ba1dd2bac10ae908f5dca2985a1372a02ea6f37edc19fb8d6": { "d69df1442a7aad94ba9096815aac2b779c3a23eed85dba10c8cf5e643215acf7": { "ru": { - "updatedAt": "2025-12-02T22:57:28.546Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "aab08b4a2418477525a978f4a46238f5406d7e7a0d8e64c1566339d1bf635516" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.547Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "c460555dbc9fe001deff35d0e77d1690c8c7bc974d77a2df3febb00afa194997" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.546Z" + "updatedAt": "2025-12-04T20:16:57.073Z", + "postProcessHash": "a22032f600c4ba2e5190be2b59d67387d0600901cdd6b7ac41c8bc301f3c5799" } } }, "3e0601c102f0cd71b8eb284da75b1cb579b66391d37fa681cf6d4bc5e1cc1d58": { "4eeb3b260eb5599be93bf2151af54a52820bc5b7145e432d1d16218f6b0c376b": { "zh": { - "updatedAt": "2025-12-02T22:57:44.609Z" + "updatedAt": "2025-12-04T20:16:57.045Z", + "postProcessHash": "8c9ca6ca37c11b59d146adcadcd4cb1f20ff8d52b39dc6ea83b7a256ea7d8303" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.610Z" + "updatedAt": "2025-12-04T20:16:57.046Z", + "postProcessHash": "1d4f02786441290e20258a1dfc1c1ec5510a202d8700ba77ddf00d579bafd78f" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.611Z" + "updatedAt": "2025-12-04T20:16:57.050Z", + "postProcessHash": "b30603cd557bf396848e02bf31809a26f493192fca1378ca4d6ae1b999ff6258" } } }, "8b38fc05c0c3883d9a4ec8bbf5caa1bbc4260e946b23ad31bf5c97563bd88229": { "58e3bcd0e949f466dc2d6e918d912d126143beea61afa2ee594bb6cb9d60e88d": { "zh": { - "updatedAt": "2025-12-02T22:57:44.610Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "1731093040ac25cb68ca0a1fdb6a57f29dc4a7911a56a30d7bb24b6b4a9f5c03" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.601Z" + "updatedAt": "2025-12-04T20:16:57.049Z", + "postProcessHash": "0b6b2804cca5aa642b3158e4015fb42946fcb775f91b8bb4926ba7dfc2555031" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.607Z" + "updatedAt": "2025-12-04T20:16:57.037Z", + "postProcessHash": "47395f8db473d16f7e3b9ec6d5b79df68df9f702f1fe26967b30fa7fd3d1c38f" } } }, "11f0d2e2fe5381cbdabf5c8d911e42f98d764106a83601de0c96203590ad4cc5": { "125142acfba42f104cc8a667d2cd001ded4684ba6896567aa756cbbcdfe1e975": { "zh": { - "updatedAt": "2025-12-02T22:57:44.639Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "2477a8a58578761413878829cc64b27377ca0fdfa28280b3cd6fa3d69e4c1274" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.639Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "86d52a4b9b355e03afe56b506a0b8529264225d06081c47a1260235ffef3f7a3" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.640Z" + "updatedAt": "2025-12-04T20:16:57.047Z", + "postProcessHash": "e3028efa7992da8a533449c0fdf05f89e0e7a409a1ba4e56b32efb4b0161866a" } } }, "6152b4089faf21cb920f0b0e0f015947f4aa6a6539cc24579a8054117329f175": { "58de10c3764c8ae20317dce26cff68631d85677a41b3f5dbd50c51245bb6c66d": { "zh": { - "updatedAt": "2025-12-02T22:57:44.579Z" + "updatedAt": "2025-12-04T20:16:57.004Z", + "postProcessHash": "c67d5eba05d200c6caa7c5a51bb8eaaecd050dcc26bb5669bb6b97be68e44b7f" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.580Z" + "updatedAt": "2025-12-04T20:16:57.010Z", + "postProcessHash": "585139921121661f916a0b00dba292134c5b6b5aff7ebac71bdc45a241972012" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.578Z" + "updatedAt": "2025-12-04T20:16:57.009Z", + "postProcessHash": "c8ca33bb49005fa7bedd89c3f3fac3dc8e599f2f4e4125fccfb49b8d392271e6" } } }, "bca14edd411fa9f3a8a9611aaacff6972d87258f38acd6410fdf5b4d4cdbaa55": { "6bdb09ec322273b515c242a0a196c778ff9876e649fa65392b8031cb787249d3": { "zh": { - "updatedAt": "2025-12-02T22:57:44.557Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "6bdb09ec322273b515c242a0a196c778ff9876e649fa65392b8031cb787249d3" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.558Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "6bdb09ec322273b515c242a0a196c778ff9876e649fa65392b8031cb787249d3" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.559Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "6bdb09ec322273b515c242a0a196c778ff9876e649fa65392b8031cb787249d3" } } }, "90b37c7973739db627d82b16799b1a59ebcb776db33ad8298491b0bbbed6c3de": { "73ba6fad372ebd5b4ddf82f283b3e7b1f303a8f02d8ddee4e4e8d3c0290b12ee": { "zh": { - "updatedAt": "2025-12-02T22:57:44.559Z" + "updatedAt": "2025-12-04T20:16:57.000Z", + "postProcessHash": "55e7685c978f9f7a6b36fb713a38304e4c08eb71cda0df1ebbfcc979e63fa6bf" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.562Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "8100e6f8d0ab8d8023c169f0486dbbd94353250f189a4614e5859b0534e0bcf4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.560Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "b77ca66101de198fe0ac93e0ccb2bdc6a3497a2cc4460b833be2c6089bf9925c" } } }, "5dcc85853637a46f967236f293c74ce6629e743899ffb1d793ba5c7ffae90dbf": { "6777f02cb4aba6cf43d71fcfd0acc7ed50b7a116661de2ebd8193b82df093941": { "zh": { - "updatedAt": "2025-12-02T22:57:12.798Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "4f3e071ab43d54334a281547d68d413eb24ea409b21f7f47cc8f36dc277086a3" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.807Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "d702f68396baf04208f935336dd190abdea35259a8d6531ff9c6854f7d0cc043" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.799Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "9222eb2bf08a08673a1c668fdfe0cfaa7218f760bffe30e99c4918bc60b72cf2" } } }, "094593271a536e71ddc700495b0cf0f1048d6ab6c2dad60a929934f0798430ea": { "3dd2ef060c7a1cfaa56099a332e54eba203c50d4214e0f5bf98d281ff70e8d9e": { "ru": { - "updatedAt": "2025-12-02T22:57:12.799Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "6cc87da9c91a53af1b6b3ed64af0be54966751ffc0e92099d5616c86c01a18b8" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.812Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "0ae3d2a2bc5fd527cc13604c36521dd771b97cc73861fb9abbaa076e80c20494" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.809Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "8bbb77c630dec564d3eb6b95ee25f257a110ff3dd150e119152ef7144ef6be08" } } }, "27e2bd6338f55fdbb9b18fcf67e0a0a67489a58d4e1c0e9ebb6902b05fc36aac": { "8929ff1edb2d47de0f53425237359fc7c4a1036ef99e001d0d30c2d13140051c": { "ru": { - "updatedAt": "2025-12-02T22:57:12.801Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "279d818ef873112952ca03b8c0de74d25684de610d95feaa94cd7de419dd8e99" }, "zh": { - "updatedAt": "2025-12-02T22:57:12.807Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "e7bd8465230bcc3452c8e715fe1035900c7623e68a13c5a066b9ce0ce3d9bf82" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.796Z" + "updatedAt": "2025-12-04T20:16:56.993Z", + "postProcessHash": "a4b7ae7f67d2c3df826d81472ef0867d05fb658bb4a3e2ad5944c88f4aa6e654" } } }, "19a3aba2f96aa29e64f1d6662e4c6e8d99c98fade3c4d0aa0badaed1632f4c7c": { "dc4c51508caf2bb72e5375d6abe27b369e6eacb14cc00c78c196a37458e79501": { "ru": { - "updatedAt": "2025-12-02T22:57:44.561Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "dc4c51508caf2bb72e5375d6abe27b369e6eacb14cc00c78c196a37458e79501" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.564Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "dc4c51508caf2bb72e5375d6abe27b369e6eacb14cc00c78c196a37458e79501" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.565Z" + "updatedAt": "2025-12-04T20:16:57.003Z", + "postProcessHash": "dc4c51508caf2bb72e5375d6abe27b369e6eacb14cc00c78c196a37458e79501" } } }, "dfad0bc3b6417f406a00ff9ef3820a57dfc8f664400a7ce0134d81da437d7e07": { "79123cc58b0a88edb3bafb181767cf704d4908d66876b9628ebccd1e31728887": { "zh": { - "updatedAt": "2025-12-02T22:57:12.806Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "0ae3d2a2bc5fd527cc13604c36521dd771b97cc73861fb9abbaa076e80c20494" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.809Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "6cc87da9c91a53af1b6b3ed64af0be54966751ffc0e92099d5616c86c01a18b8" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.795Z" + "updatedAt": "2025-12-04T20:16:56.992Z", + "postProcessHash": "dccec3e19c928e7694d31705709fa3b1a826da9b8f3d16f56bea9732a9d1d8e2" } } }, "4b87a5344a9b716648c77706eed8254331cf4a6ce21d8a43d267f67270734d1f": { "fb4dfb8f9e647f53e63097ab00045af768eb9222f514d424b3a57634d8f3681e": { "ru": { - "updatedAt": "2025-12-02T22:57:44.563Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "d30022e6dec69de3b3dd2260dc515847f66daef723cbef66d737105a9699b19d" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.563Z" + "updatedAt": "2025-12-04T20:16:57.002Z", + "postProcessHash": "e095122f42e952d7924c73930f58a3774753f564b3ff4a9c3edf2ade722fe2f4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.559Z" + "updatedAt": "2025-12-04T20:16:57.001Z", + "postProcessHash": "7e380bb32746664f5dc46776c078cb16ddd0dfaec7c6ecba65e66c10f3be2e9f" } } }, "f0a5d6e46b2ddd583ab900563a42b7687a1b4924afd5d0cb5260268c8952f6d0": { "3a8f69d0d17e9065a46d4d7456a503262e2f2a05ac3d4b37f49520b5f716b1c3": { "zh": { - "updatedAt": "2025-12-02T22:57:28.559Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "cdd8fea899b5bdef0a7f4127a6d20bb75bdeca9b60bb966cbc36f23c8099a651" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.560Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "42b68726239b044c826e76f5b31713e1716ce44b0b6c19fc42f3235beae2d010" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.547Z" + "updatedAt": "2025-12-04T20:16:57.063Z", + "postProcessHash": "55e6605f35579b95042119d65466d978fd994860b70f049c16cc8c6d865c60e8" } } }, "9027438a5e9e30a2c6e8e4d197b479cebf29c05aaa3a716589f591c0ff697c0d": { "d5d6ea5e34429a4a6f22bad136f5d5eb712bbb922cae22a6c870b906c7befadf": { "zh": { - "updatedAt": "2025-12-02T22:57:44.668Z" + "updatedAt": "2025-12-04T20:16:57.079Z", + "postProcessHash": "d174d80b968e05d57ef6cb0f23d12dcd93b7cd927ba524a0b92b22ed0ee26840" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.669Z" + "updatedAt": "2025-12-04T20:16:57.088Z", + "postProcessHash": "e5da9ec1e2c5745ee867a1f854f35ca12ff74dab586cf48e577866d4996543cc" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.866Z" + "updatedAt": "2025-12-04T20:16:57.076Z", + "postProcessHash": "fe90b32749302d19c1c3f961b9638c72f90b399fe13e780c3233f08804a31105" } } }, "492b567700669f175480e40ecf1c553c463e77a0bb36e30e76eb3628c35e7db3": { "84c653bd2e6590cbd982437c2304ff4818581c1e60afb256437642c4a3dc66c5": { "ru": { - "updatedAt": "2025-12-02T22:57:44.666Z" + "updatedAt": "2025-12-04T20:16:57.074Z", + "postProcessHash": "605260c7c7fad78e50b94866aef89c35f6e2719e3fc655dd39756be33d5069c3" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.523Z" + "updatedAt": "2025-12-04T20:16:57.067Z", + "postProcessHash": "80378545aadfe422c53caee6b30dee36dd54199f88913764efd6964fd3143094" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.533Z" + "updatedAt": "2025-12-04T20:16:57.070Z", + "postProcessHash": "31aef518ed57ee8ab02fdf4902bd2e827828dacd2482f03a18bbdb29f5ee4a2f" } } }, "dbc3d877611d9d2c9a27f2ea076decc1afc5907f3c3c02044504a307308653af": { "79b34ec963ce2ab8bc60d33c073caf0fc42c9aed7f3b97c1ed638390938960de": { "zh": { - "updatedAt": "2025-12-02T22:57:44.643Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "8b8619fe48a942e76600cc1f22f4064e1c3b8f47823855c1b570c393516f4459" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.648Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "fc6661cb434f45d0af02152f3d424c222bfa7962ad80665b75c1533ad63083e3" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.652Z" + "updatedAt": "2025-12-04T20:16:57.062Z", + "postProcessHash": "59c70ca936d3311dfe57de8d905526e54a8302096c5faf9279e5eead416036fa" } } }, "e5b56f33a8458d42151bcbd638d4692704a7d1f97fb2c4ed94143ff1e460a418": { "7eab19fd44668e93c10760c5fe2d6a1421e507a9cec55dfd91ed0fcab85c27f1": { "ru": { - "updatedAt": "2025-12-02T22:57:44.643Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "858b36c1f2dc89f0040b3532f200a90fd64038d18ff1395750b0be71191849d0" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.646Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "f0154aba4a8879e6389f451b4027fe0f5a1b6d6e648fceadb4ba8171bf5d6081" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.642Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "3aebc8161b6b784a161b6562957dfdda69ada72924dc083b391c262699d68eeb" } } }, "3f3b14a0c691ae2b5345864fd4ad20a184225db1e35ffcbd455da1aeec5f0d48": { "a9c8fa4f53951ce4026e170171a0517a80777e9037e5bb2f16eab83d3ffaa9cc": { "zh": { - "updatedAt": "2025-12-02T22:57:12.811Z" + "updatedAt": "2025-12-04T20:16:56.996Z", + "postProcessHash": "98be0adb514ab99e364abdbbab872ca95c8b60c312d3df36ed607421c9f38c2b" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.808Z" + "updatedAt": "2025-12-04T20:16:56.995Z", + "postProcessHash": "c6b1ffeb8a927241e2108dbeb02a8cbb166d5b270f1e7cdf770147d6ef83a7d2" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.800Z" + "updatedAt": "2025-12-04T20:16:56.994Z", + "postProcessHash": "484551e9ac0b82f75a73cf226db475edde9fbf9f15c772cb60460ca3bd544e55" } } }, "5b41c30593068b713e26045c49b89ef31bda4b2d25564fc71eeafadaa3a88b3b": { "ecb137fd1463f816c7efffc5bf4c604e7cfa7735755e22327018e286ec755267": { "zh": { - "updatedAt": "2025-12-02T22:57:12.844Z" + "updatedAt": "2025-12-04T20:16:57.085Z", + "postProcessHash": "71228041f3ec2db0eaea6df41f727fe8d9fb0c2be58156f7e40029f6cbfeaafd" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.848Z" + "updatedAt": "2025-12-04T20:16:57.080Z", + "postProcessHash": "935e2df6e42ef5eae97f3b0f34534f4ddba200017d97d3170e7db9c138c0cc8b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.836Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "8ee6a597195d2d62144b70385821d6d3faa909ba10aaaba5092fb6cbbe5f9d8e" } } }, "7c145e871f942571130b488686f2c93299c7784ad34d23a45c99e2947f75208c": { "193be2e12900fc92f5c6cf9d55c9d419bf67397ce7c166154cf4356eaee3bb11": { "zh": { - "updatedAt": "2025-12-02T22:57:12.845Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "84717246d459b05e668b44cda8f6d98373e4ad33dc1b061182c57ee0bd17bf6e" }, "ru": { - "updatedAt": "2025-12-02T22:57:12.848Z" + "updatedAt": "2025-12-04T20:16:57.084Z", + "postProcessHash": "e8092285f85de8c0fef7d0d689529703a23d3563750f97888238640fc9e81345" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.837Z" + "updatedAt": "2025-12-04T20:16:57.077Z", + "postProcessHash": "c716efae67f791d006333fb0f57dbadc4ece9e0eaa66931176f3fa87c2382a19" } } }, "f5b83279dab37d495f5c4fd259883e2f59a812c65ccc8ed0c351f21a2028e710": { "caa363689f97df04d5bdb8cc80dfede581f616ede687804ff5915657268592d2": { "ru": { - "updatedAt": "2025-12-02T22:57:44.671Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "6a2c0ad1bdac3f2b0003235f737b0f22c9ea609aa45a68ffb137059978476c05" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.671Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "c36543e2adf44d48030e6c03e8f6e8b3839e46aec8b066e1356325acfdd1b129" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.674Z" + "updatedAt": "2025-12-04T20:16:57.089Z", + "postProcessHash": "0801238959e627da999d8278755a3887bbcbfc083f7a0870498f6e39dad17d97" } } }, "bdeb28bdbd403e8a7dbfd53a18daf2d16a5ec80e2b272afff63299b084ee54d4": { "8d2b2934162408394b787a0c9376fd5fc5d3b70e883799983cb40e9cd3caec2b": { "ru": { - "updatedAt": "2025-12-02T22:57:28.563Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "eb8a98ada416e3412051b09bd302a439a0f2447cbe3593464e701c38186d586d" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.561Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "67c035438edce41a547ba289bea4d3ba574c382a7710ce710b315d1886a6d57b" }, "jp": { - "updatedAt": "2025-12-02T22:57:12.830Z" + "updatedAt": "2025-12-04T20:16:57.071Z", + "postProcessHash": "61d36899ec8eae83f9ef7362f78061dfcc8ae137ea5210f1806f0b63f5da2b87" } } }, "6d9be1cdfeaef3b95b6937fe4da26361e0723bbb44069a88774c3f6c426953ff": { "27c7a63e2afca841ae5e7d6fe8b9f6f3c513769116043f854361c07302afa76a": { "ru": { - "updatedAt": "2025-12-02T22:57:44.623Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "1006dd599af5606b93631939ee5182204dda210f40eaca9230c1a08979332500" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.623Z" + "updatedAt": "2025-12-04T20:16:57.060Z", + "postProcessHash": "66c062ee29acb9e54a87c9e3fc3c673fe6e290e74f2f50ff249f50be3046b6a4" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.622Z" + "updatedAt": "2025-12-04T20:16:57.061Z", + "postProcessHash": "1a8c1a420d54a13339e50585372228446f47e39f99df6bdede1a0c1db91df283" } } }, "08f3b123bce337ae576d91effb4c4e0aa8ce5818f4196baa0ba59915bd0d269e": { "a29ff4b6f7e821d9ae449a998417a11cc1c6705210186befa92aa45136de5da9": { "ru": { - "updatedAt": "2025-12-02T22:57:44.645Z" + "updatedAt": "2025-12-04T20:16:57.053Z", + "postProcessHash": "44d1a2ae998ad3cad0b11c2d2a5116bb4a071a5a82910874917afc080d13c0c0" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.648Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "7ddfedd4f1a84318fe59fed15148613e40a0cab80b5f240d51b2dcb0876442a7" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.655Z" + "updatedAt": "2025-12-04T20:16:57.056Z", + "postProcessHash": "7c296de0f54e7682022f49c1685b45c553b7b55f0435e812ffff18e2d9236460" } } }, "0e3c84ac0dcb64d166a9e4cad32d3420219fe50fe305e36aa358456c172e2cf7": { "318568dae18d539030ba9900a07a5c387e0ffd38a7b84468080ad1adcdccfc39": { "ru": { - "updatedAt": "2025-12-02T22:57:28.520Z" + "updatedAt": "2025-12-04T20:16:57.066Z", + "postProcessHash": "00c46ce01f15ad9fdeab2f72691d4fab5783030e67224ce2bc3767c72f58a420" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.524Z" + "updatedAt": "2025-12-04T20:16:57.068Z", + "postProcessHash": "2b9ed3b6e9abe2f5307e8e4356e714558f9d57c5e41f54eb4943c1e669090501" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.666Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "30e8ebe13f8b17e8593ba3dd75e594be4c97930c33a5e77c1286bbfe51aeaa97" } } }, "808e737b87d86c00037ee9499555e8d37bc7fd2e51f5ef796a4a104d5f453b14": { "4719caa724ba0e2a9f5dae16a0fe1e64ccb82cd37762f0d2649a253c1acc65eb": { "zh": { - "updatedAt": "2025-12-02T22:57:44.646Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "dbd1e9397e889a9c7f66a1d040df799ad545fe0ff2974b8186c5a2a90b1bd100" }, "ru": { - "updatedAt": "2025-12-02T22:57:44.649Z" + "updatedAt": "2025-12-04T20:16:57.054Z", + "postProcessHash": "010fabb308c8285f625f3d634f1941a9e92cd0b5a1a170e9886d7c0caddc17c2" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.642Z" + "updatedAt": "2025-12-04T20:16:57.052Z", + "postProcessHash": "4d677cfe542defa2ba654cec1ee86d16b46db800cef2f502ee7fbdb7f4dd8cdc" } } }, @@ -25239,52 +30790,64 @@ "66bbf0d8525a651b70357530fa66ca0f30073bb1460c57979838338b1c0d8749": { "9a8d534c4d4974d982e6c1d6d31787e905d1215b8eade3bf1524a2e15a6fa2c0": { "jp": { - "updatedAt": "2025-12-02T22:57:28.958Z" + "updatedAt": "2025-12-04T20:16:58.007Z", + "postProcessHash": "818e4633c64fa1a38d9ff6344004d822542d1c75f9ca903990b2dcf9352de3c1" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.959Z" + "updatedAt": "2025-12-04T20:16:58.007Z", + "postProcessHash": "3074f61c403e9ebcde3ca106438c8b0831b3ed985093f361160cce11270f7c5a" }, "zh": { - "updatedAt": "2025-12-02T22:57:28.959Z" + "updatedAt": "2025-12-04T20:16:58.007Z", + "postProcessHash": "d198a414e81c93bac339df3d0c993ae01ae0b8e4b92cda3d4cc695d15b0343b6" } } }, "004214d1284aa8fe518aa336fea065f912357ada315224cb8c2c05631ff36449": { "1f8a48298f98b2b8149a2007bd5ec17c019f567f61d697e95322a03baab80eba": { "ru": { - "updatedAt": "2025-12-02T22:57:44.813Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "8ab59d531a0f8bfeb4e02dae478702f215be910deadfdcf075371cf2a6557076" }, "jp": { - "updatedAt": "2025-12-02T22:57:44.813Z" + "updatedAt": "2025-12-04T20:16:57.232Z", + "postProcessHash": "121388e7b53fdf557c939937c2e0567b38ef9a19aef02c417160cdc13ca3e55a" }, "zh": { - "updatedAt": "2025-12-02T22:57:44.814Z" + "updatedAt": "2025-12-04T20:16:57.265Z", + "postProcessHash": "c5711362aadc931422448731f50ed222c7123e7cdcae547136368bae2efa9752" } } }, "0e53576e2d568d32de6d52366ca09d6d3d3b9244b9b950224874f63b7def8e0b": { "4f03aabba29c1d276fbd55f6443c7d1e281d6d6d68ae1d946877245547ca2157": { "zh": { - "updatedAt": "2025-12-02T22:57:28.747Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "6a351e20558b6425dae5c0dd4c9dd4d76e78135a678346264493846826c7795a" }, "jp": { - "updatedAt": "2025-12-02T22:57:28.748Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "9374f3df5c0b7a3c476f022124892fd637f67d6abece5bef9a553610072963f2" }, "ru": { - "updatedAt": "2025-12-02T22:57:28.748Z" + "updatedAt": "2025-12-04T20:16:57.724Z", + "postProcessHash": "da35376e1f288ce3c7c7dc29ccd29ae448afc5e42a2ff36e5d34b2b89f6cffd4" } } }, "2ed9dd905d29a21da9ab592836fca34a8ed19a4db4490a78058048382b846b88": { "48c110e866cf2a664e3dab6a338d16c6af206ae8ef6c018c3017a83854065ed6": { "zh": { - "updatedAt": "2025-12-02T22:57:53.564Z" + "updatedAt": "2025-12-04T20:16:58.024Z", + "postProcessHash": "85bbca18e559edf4f82155f736f8f834e720ba9578601b8ec7b68c8e1499d61a" }, "jp": { - "updatedAt": "2025-12-02T22:57:53.564Z" + "updatedAt": "2025-12-04T20:16:58.024Z", + "postProcessHash": "184917ddc8ce0e0945644630ea8c8d0bb18384a8874c3063419f1ac6952a267d" }, "ru": { - "updatedAt": "2025-12-02T22:57:53.567Z" + "updatedAt": "2025-12-04T20:16:58.025Z", + "postProcessHash": "85f233e1234be55a3c231a18a5bf5902315fb78d73a68fe43fca7826f3f601a3" } } }, @@ -25299,6 +30862,52 @@ "zh": { "updatedAt": "2025-12-02T22:57:28.587Z" } + }, + "555a83de7b1fedbbe92860951f89c33d800c65562d765e18c773348463b9d4b1": { + "ru": { + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "066af3c924b8a1250e88d2a27f16f53b507e0e9617a80a2a2b39f5ae891cb83e" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "b4cb559387b02cea9e01ec9df786d8fc50eec0d6812133631e63900a442aa537" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.347Z", + "postProcessHash": "4e83cadff41e2a0c707e45df7b711b0e0e964bf60b1a4b03f5e71b666d40d8b1" + } + } + }, + "4512096e994b4597d26312dcc20aaa261a25d919afc1ca576d81943eb7dba5e8": { + "576c6b22e45a20fa5f3f50de6a9f620190ca1b7786a288fd05ea72e443dae788": { + "ru": { + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "029d7772bdb14d0cc2ca4ecd00965c2dd784bf2737c76648f3b83e2759635ab2" + }, + "zh": { + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "87b394d8ba23742c70b4892f614c9b2af257a3ed57ec33b408023a5a240c200b" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:57.192Z", + "postProcessHash": "89a6704010a470d3ca4894395bb7ba76be226d708320c4469a6d9e73d2719fac" + } + } + }, + "9b80672ce4026210dce9b9fc8dcdd3cc250a0a78eb37e87ce9abe3344154ce7e": { + "9f070809829dec1b3881cdd3a76e90a0533684952dfa123150bed063c8156ad3": { + "zh": { + "updatedAt": "2025-12-04T20:16:58.129Z", + "postProcessHash": "0994ddd9d6117cf05d60f206499af364ed1976ebcdf1aa459354f33559442e6a" + }, + "ru": { + "updatedAt": "2025-12-04T20:16:58.129Z", + "postProcessHash": "2c6541332e0756a46b93d6c3f0ad0489bedce7ac204bb4a42658148411433625" + }, + "jp": { + "updatedAt": "2025-12-04T20:16:58.129Z", + "postProcessHash": "1b9b45c62e0d849d1af07fdbdaf20aad3a08a4a6a8cd0c474fc00bc1ccf9732b" + } } } } diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md index 438b3eaf87d..eebc4c2cbc1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_add_superset_detail.md @@ -13,7 +13,6 @@ Supersetは[Docker Composeを使用したローカルへのSupersetインスト 以下のコマンドは、GitHubリポジトリ`superset`のトップレベルディレクトリから実行してください。 ::: - ## ClickHouse Connect の公式ドライバー {#official-clickhouse-connect-driver} Superset のデプロイメントで ClickHouse Connect ドライバーを利用できるようにするには、ローカルの requirements ファイルに追加します。 @@ -22,7 +21,6 @@ Superset のデプロイメントで ClickHouse Connect ドライバーを利用 echo "clickhouse-connect" >> ./docker/requirements-local.txt ``` - ## Mapbox {#mapbox} これは任意です。Mapbox の API キーがなくても Superset で位置情報データをプロットできますが、キーを追加するよう促すメッセージが表示され、地図の背景画像は表示されません(データポイントのみが表示されます)。Mapbox には、必要に応じて利用できる無料プランがあります。 @@ -35,7 +33,6 @@ API キーを Superset で利用できるようにします: echo "MAPBOX_API_KEY=pk.SAMPLE-実際のキーに置き換えてください" >> docker/.env-non-dev ``` - ## Superset バージョン 2.0.0 のデプロイ {#deploy-superset-version-200} リリース 2.0.0 をデプロイするには、次のコマンドを実行します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md index b74ac693fe0..076bcc502a2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_users-and-roles-common.md @@ -32,7 +32,6 @@ DROP TABLE db1.table1; DROP DATABASE db1; ``` - ## 管理者以外のユーザー {#non-admin-users} ユーザーには必要な権限のみを付与し、全員を管理者ユーザーにすべきではありません。本ドキュメントの以降では、シナリオ例と必要なロールについて説明します。 @@ -172,14 +171,12 @@ CREATE USER row_user IDENTIFIED BY 'password'; ON db1.table1 FOR SELECT USING 1 TO clickhouse_admin, column1_users; ``` - :::note テーブルにポリシーをアタッチすると、システムはそのポリシーを適用し、定義されたユーザーとロールのみがテーブルに対する操作を実行できます。その他のすべてのユーザーは、すべての操作が拒否されます。制限的な行ポリシーを他のユーザーに適用しないようにするには、他のユーザーとロールが通常のアクセスまたはその他のタイプのアクセスを持てるように、別のポリシーを定義する必要があります。 ::: - ## 検証 {#verification} ### 列制限ユーザーを使用したロール権限のテスト {#testing-role-privileges-with-column-restricted-user} @@ -293,8 +290,6 @@ CREATE USER row_user IDENTIFIED BY 'password'; - - ## ユーザーとロールの変更 {#modifying-users-and-roles} ユーザーには、必要な権限の組み合わせを実現するために複数のロールを割り当てることができます。複数のロールを使用する場合、システムはそれらのロールを組み合わせて権限を決定し、その結果、ロールの権限は累積されます。 @@ -365,8 +360,6 @@ CREATE USER row_user IDENTIFIED BY 'password'; ``` - - ## トラブルシューティング {#troubleshooting} 権限が重なり合ったり組み合わさったりして、予期しない結果を生む場合があります。そのようなときは、管理者アカウントを使用して次のコマンドを実行し、問題の原因を切り分けることができます。 @@ -429,7 +422,6 @@ Query id: 0d3b5846-95c7-4e62-9cdd-91d82b14b80b └─────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## ロール、ポリシー、ユーザーを管理するためのコマンド例 {#example-commands-to-manage-roles-policies-and-users} 次のコマンドは以下の目的で使用できます: @@ -474,7 +466,6 @@ DROP ROLE A_rows_users; DROP USER row_user; ``` - ## まとめ {#summary} この記事では、SQL ユーザーおよびロール作成の基本を説明し、ユーザーおよびロールに対する権限を設定・変更する手順を示しました。各トピックの詳細については、ユーザーガイドおよびリファレンスドキュメントを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md index 79b3094f3e4..344f52224e1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/beta-and-experimental-features.md @@ -46,7 +46,6 @@ ClickHouse はオープンソースであるため、ClickHouse の従業員だ {/*AUTOGENERATED_START*/ } - ## ベータ版設定 {#beta-settings} | 名前 | デフォルト | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md index 98be2bb6211..3f48a6e1031 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/cloud.md @@ -8,15 +8,11 @@ keywords: ['ClickHouse Cloud', 'クラウドデータベース', 'マネージ doc_type: 'reference' --- - - # ClickHouse Cloud {#clickhouse-cloud} ClickHouse Cloud は、人気の高いオープンソース OLAP データベースである ClickHouse の開発元によって提供されるクラウドサービスです。 [無料トライアルを開始](https://console.clickhouse.cloud/signUp)して ClickHouse Cloud を体験できます。 - - ## ClickHouse Cloud の利点 {#clickhouse-cloud-benefits} ClickHouse Cloud を利用する主な利点は次のとおりです。 @@ -28,8 +24,6 @@ ClickHouse Cloud を利用する主な利点は次のとおりです。 - **総保有コスト (TCO)**: 価格対性能比に優れ、管理上のオーバーヘッドも最小限に抑えられます。 - **幅広いエコシステム**: 既存のデータコネクタ、可視化ツール、SQL クライアントや各種言語クライアントをそのまま利用できます。 - - {/* ## OSS と ClickHouse Cloud の比較 @@ -51,7 +45,6 @@ ClickHouse Cloud を利用する主な利点は次のとおりです。 | **マネージドサービス** | クラウドマネージドサービスを利用することで、チームは ClickHouse のサイズ設定、セットアップ、保守といった運用上のオーバーヘッドを気にすることなく、ビジネス成果に集中し、市場投入までの時間を短縮できます。 | ❌ | ✅ | */ } - ## ClickHouse Cloud はどのバージョンの ClickHouse を使用していますか? {#what-version-of-clickhouse-does-clickhouse-cloud-use} ClickHouse Cloud は、お使いのサービスを継続的に新しいバージョンへアップグレードします。コアとなるデータベースバージョンをオープンソースとして公開した後、クラウドのステージング環境で追加の検証を行い、通常は本番環境へのロールアウトまでに 6〜8 週間かかります。ロールアウトは、クラウドサービスプロバイダー、サービス種別、リージョンごとに段階的に行われます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md index f3b5ac5fd18..7bbd4655daa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/about-us/distinctive-features.md @@ -8,12 +8,8 @@ keywords: ['compression', 'secondary-indexes','column-oriented'] doc_type: 'guide' --- - - # ClickHouse の特長 {#distinctive-features-of-clickhouse} - - ## 真のカラム指向データベース管理システム {#true-column-oriented-database-management-system} 真のカラム指向 DBMS では、値と一緒に余分なデータは一切保存されません。これは、値の長さを示す「数値」を値の隣に保存しないようにするためには、固定長の値をサポートしている必要があることを意味します。例えば、10 億個の `UInt8` 型の値は、非圧縮で約 1 GB を消費するのが本来であり、そうでない場合は CPU 使用率に大きく影響します。データを非圧縮であっても(いかなる「余分な情報」も含めずに)コンパクトに保存することは不可欠です。というのも、伸長処理の速度(CPU 使用率)は主に非圧縮データの量に依存するからです。 @@ -22,38 +18,28 @@ doc_type: 'guide' 最後に、ClickHouse は単一のデータベースではなく、データベース管理システムです。サーバーを再構成・再起動することなく、実行時にテーブルやデータベースを作成し、データをロードし、クエリを実行することができます。 - - ## データ圧縮 {#data-compression} 一部のカラム指向 DBMS ではデータ圧縮を使用していないものもあります。しかし、データ圧縮は優れたパフォーマンスを達成するうえで重要な役割を果たします。 ディスク容量と CPU 消費量のトレードオフが異なる効率的な汎用圧縮コーデックに加えて、ClickHouse は特定の種類のデータ向けの[専用コーデック](/sql-reference/statements/create/table.md#specialized-codecs)を提供しており、これにより ClickHouse は時系列データベースのような、よりニッチなデータベースと十分に競合し、さらにそれらを上回る性能を発揮できます。 - - ## データのディスク保存 {#disk-storage-of-data} データを主キーで物理的にソートした状態に保つことで、特定の値または値の範囲に基づくデータを、数十ミリ秒以内という低レイテンシで抽出することが可能になります。SAP HANA や Google PowerDrill のような一部のカラム指向 DBMS は、メモリ上でのみ動作します。このアプローチでは、リアルタイム分析に必要な以上のハードウェア予算の確保が必要になります。 ClickHouse は通常のハードディスク上で動作するように設計されているため、1 GB あたりのデータ保存コストは低く抑えられますが、SSD や追加の RAM が利用可能な場合にはそれらも十分に活用します。 - - ## 複数コアでの並列処理 {#parallel-processing-on-multiple-cores} 大規模なクエリは自然に並列実行され、現在のサーバーで利用可能な必要なリソースをすべて活用します。 - - ## 複数サーバーでの分散処理 {#distributed-processing-on-multiple-servers} 上で挙げた列指向 DBMS のほとんどは、分散クエリ処理をサポートしていません。 ClickHouse では、データは複数のシャードに分散して配置できます。各シャードは、フォールトトレランスのために使用されるレプリカのグループとすることができます。すべてのシャードが、ユーザーからは透過的に、クエリの並列実行に利用されます。 - - ## SQL サポート {#sql-support} ClickHouse は、ANSI SQL 標準と高い互換性を持つ SQL ベースの[宣言型クエリ言語](/sql-reference/)をサポートしています。 @@ -62,40 +48,28 @@ ClickHouse は、ANSI SQL 標準と高い互換性を持つ SQL ベースの[宣 相関(依存)サブクエリは現時点ではサポートされていませんが、将来的にサポートされる可能性があります。 - - ## ベクトル計算エンジン {#vector-engine} データはカラムごとに保存されるだけでなく、ベクトル(カラムの一部)単位で処理することで、CPU を高効率に活用できます。 - - ## リアルタイムなデータ挿入 {#real-time-data-updates} ClickHouse は主キーを持つテーブルをサポートしています。主キーの範囲に対してクエリを高速に実行するために、データは MergeTree を用いて段階的にソートされます。これにより、テーブルには継続的にデータを追加できます。新しいデータを取り込む際にもロックは取得されません。 - - ## プライマリインデックス {#primary-index} データをプライマリキーで物理的にソートしておくことで、特定の値や値の範囲に基づいてデータを抽出する際に、数十ミリ秒かからない低レイテンシで処理できるようになります。 - - ## セカンダリインデックス {#secondary-indexes} 他のデータベース管理システムとは異なり、ClickHouse のセカンダリインデックスは特定の行や行範囲を指すものではありません。代わりに、一部のデータパーツ内のすべての行がクエリのフィルタ条件に一致しないことを事前に判断し、それらを一切読み込まないようにします。このため、これらは[データスキップインデックス](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes)と呼ばれます。 - - ## オンラインクエリに適した設計 {#suitable-for-online-queries} 多くの OLAP データベース管理システムは、サブ秒レイテンシでのオンラインクエリ処理を目標としていません。他のシステムでは、レポート作成に数十秒、場合によっては数分かかることが許容されるケースもよくあります。さらに時間がかかることもあり、そのためにレポートをオフラインで準備しておく必要が生じます(事前に作成しておく、あるいは「後でもう一度アクセスしてください」と応答するなど)。 ClickHouse における「低レイテンシ」とは、ユーザーインターフェイスのページが読み込まれているまさにその瞬間に、事前に回答を準備しようとすることなく、遅延なしでクエリを処理できること、つまり *オンライン* で処理できることを意味します。 - - ## 近似計算のサポート {#support-for-approximated-calculations} ClickHouse は、精度とパフォーマンスをトレードオフするためのさまざまな方法を提供します。 @@ -104,28 +78,20 @@ ClickHouse は、精度とパフォーマンスをトレードオフするため 2. データの一部([SAMPLE](../sql-reference/statements/select/sample.md))に基づいてクエリを実行し、近似的な結果を取得する。この場合、ディスクから読み出すデータ量は比例して少なくなります。 3. すべてのキーではなく、ランダムに選ばれた限られた数のキーに対して集約を実行する。データ内でのキー分布に関して特定の条件が満たされている場合、より少ないリソースで十分に正確な結果を得ることができます。 - - ## アダプティブ結合アルゴリズム {#adaptive-join-algorithm} ClickHouse は複数テーブルを [JOIN](../sql-reference/statements/select/join.md) する際、基本的にハッシュ結合を優先しつつ、大きなテーブルが複数存在する場合にはマージ結合へフォールバックする形で、結合方法を適応的に選択します。 - - ## データレプリケーションとデータ整合性のサポート {#data-replication-and-data-integrity-support} ClickHouse は非同期マルチマスター型レプリケーションを使用します。利用可能な任意のレプリカに書き込まれた後、残りのすべてのレプリカはバックグラウンドでそのコピーを取得します。システムは、異なるレプリカ間でデータを同一に保ちます。ほとんどの障害からの復旧は自動的に行われ、複雑なケースでは半自動的に行われます。 詳細については、[Data replication](../engines/table-engines/mergetree-family/replication.md) セクションを参照してください。 - - ## ロールベースのアクセス制御 {#role-based-access-control} ClickHouse は SQL クエリを使用したユーザーアカウント管理機能を備えており、ANSI SQL 標準や一般的なリレーショナルデータベース管理システムで利用されているものと同様の [ロールベースアクセス制御の設定](/guides/sre/user-management/index.md) を行うことができます。 - - ## 欠点とみなされ得る機能 {#clickhouse-features-that-can-be-considered-disadvantages} 1. 完全なトランザクション機能がない。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md index 046daab67db..2c6f50a66fa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_async_inserts.md @@ -43,7 +43,6 @@ ClickHouse における非同期 INSERT は、クライアント側でバッチ 実際には、重複排除が有効で同一の INSERT がリトライされた場合(タイムアウトやネットワーク切断などが原因)、ClickHouse は重複を安全に無視できます。これにより冪等性が維持され、データの二重書き込みを回避できます。ただし、INSERT の検証やスキーマのパースはバッファフラッシュ時にのみ行われるため、型不一致のようなエラーはそのタイミングで初めて表面化する点には注意が必要です。 - ### 非同期インサートの有効化 {#enabling-asynchronous-inserts} 非同期インサートは、特定のユーザー単位、またはクエリ単位で有効化できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md index ae6a6618201..9c2c8142b0c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/json_type.md @@ -11,8 +11,6 @@ doc_type: 'reference' ClickHouse には、半構造化データおよび動的なデータ向けに設計されたネイティブの JSON カラム型が用意されています。**これはデータ形式ではなくカラム型である**ことを明確にしておくことが重要です。JSON は文字列として、あるいは [JSONEachRow](/interfaces/formats/JSONEachRow) のようなサポートされているフォーマット経由で ClickHouse に挿入できますが、それは JSON カラム型を使用していることを意味しません。ユーザーは、単に JSON を保存しているというだけではなく、データ構造が動的な場合にのみ JSON 型を使用すべきです。 - - ## JSON 型を使用するタイミング {#when-to-use-the-json-type} 次のようなデータには JSON 型を使用します: @@ -29,8 +27,6 @@ ClickHouse には、半構造化データおよび動的なデータ向けに設 アプローチを組み合わせることもできます。たとえば、予測可能なトップレベルのフィールドには静的なカラムを使用し、ペイロード内の動的な部分については 1 つの JSON カラムを使用するといった構成です。 - - ## JSON を使用する際の考慮事項とヒント {#considerations-and-tips-for-using-json} `JSON` 型は、パスをサブカラムにフラット化することで効率的な列指向ストレージを実現します。しかし、柔軟性には責任が伴います。効果的に使用するには、次の点に留意してください。 @@ -43,8 +39,6 @@ ClickHouse には、半構造化データおよび動的なデータ向けに設 型ヒントは、不要な型推論を避けるための仕組みにとどまりません。ストレージおよび処理における間接参照を完全に排除します。型ヒント付きの JSON パスは常に従来のカラムと同様に格納されるため、[**discriminator カラム**](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse#storage-extension-for-dynamically-changing-data)やクエリ実行時の動的な解決が不要になります。これは、適切に定義された型ヒントを用いれば、ネストされた JSON フィールドであっても、最初からトップレベルのフィールドとしてモデリングされていた場合と同等のパフォーマンスと効率を得られることを意味します。その結果、大部分が一貫しているものの JSON の柔軟性の恩恵も受けたいデータセットに対して、スキーマや取り込みパイプラインを再構成することなくパフォーマンスを維持する便利な方法として、型ヒントを利用できます。 ::: - - ## 高度な機能 {#advanced-features} * JSON カラムは、他のカラムと同様に **主キーとして使用できます**。サブカラムにはコーデックを指定できません。 @@ -54,8 +48,6 @@ ClickHouse には、半構造化データおよび動的なデータ向けに設 追加の詳細については [ClickHouse JSON ドキュメント](/sql-reference/data-types/newjson) を参照するか、ブログ記事 [A New Powerful JSON Data Type for ClickHouse](https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse) をご覧ください。 - - ## 例 {#examples} 次の JSON サンプルは、[Python PyPI データセット](https://clickpy.clickhouse.com/) に含まれる 1 行を表しています。 @@ -159,7 +151,6 @@ ORDER BY update_date ここでも、データを JSON 形式で挿入できます: - ```sql INSERT INTO arxiv FORMAT JSONEachRow {"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"Number Parsing at a Gigabyte per Second","comments":"Software at https://github.com/fastfloat/fast_float and\n https://github.com/lemire/simple_fastfloat_benchmark/","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"With disks and networks providing gigabytes per second ....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]]} @@ -236,7 +227,6 @@ ORDER BY doc.update_date このテーブルにデータを挿入し、そこから自動推論されたスキーマを [`JSONAllPathsWithTypes`](/sql-reference/functions/json-functions#JSONAllPathsWithTypes) 関数と [`PrettyJSONEachRow`](/interfaces/formats/PrettyJSONEachRow) 出力フォーマットを使って確認できます。 - ```sql INSERT INTO arxiv FORMAT JSONAsObject {"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"ギガバイト毎秒での数値パース","comments":"ソフトウェアは https://github.com/fastfloat/fast_float および\n https://github.com/lemire/simple_fastfloat_benchmark/ で入手可能","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"ディスクとネットワークが毎秒ギガバイトを提供する環境において....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]],"tags":{"tag_1":{"name":"ClickHouseユーザー","score":"A+","comment":"良い読み物、ClickHouseに適用可能"},"28_03_2025":{"name":"professor X","score":10,"comment":"あまり学ぶことがなかった","updates":[{"name":"professor X","comment":"ウルヴァリンの方がより興味深かった"}]}}} @@ -305,7 +295,6 @@ INSERT INTO arxiv FORMAT JSONEachRow {"id":"2101.11408","submitter":"Daniel Lemire","authors":"Daniel Lemire","title":"ギガバイト毎秒の数値パース","comments":"ソフトウェアは https://github.com/fastfloat/fast_float および\n https://github.com/lemire/simple_fastfloat_benchmark/ で入手可能","journal-ref":"Software: Practice and Experience 51 (8), 2021","doi":"10.1002/spe.2984","report-no":null,"categories":"cs.DS cs.MS","license":"http://creativecommons.org/licenses/by/4.0/","abstract":"ディスクとネットワークがギガバイト毎秒を提供する環境において....\n","versions":[{"created":"Mon, 11 Jan 2021 20:31:27 GMT","version":"v1"},{"created":"Sat, 30 Jan 2021 23:57:29 GMT","version":"v2"}],"update_date":"2022-11-07","authors_parsed":[["Lemire","Daniel",""]],"tags":{"tag_1":{"name":"ClickHouseユーザー","score":"A+","comment":"良い読み物、ClickHouseに適用可能"},"28_03_2025":{"name":"professor X","score":10,"comment":"あまり学ぶことがなかった","updates":[{"name":"professor X","comment":"ウルヴァリンの方がより興味深かった"}]}}} ``` - これでサブカラム `tags` の型を推論できるようになりました。 ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md index b6d21213f28..8b76f8ae1de 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/sizing-and-hardware-recommendations.md @@ -8,8 +8,6 @@ doc_type: 'guide' keywords: ['サイジング', 'ハードウェア', 'キャパシティプランニング', 'ベストプラクティス', 'パフォーマンス'] --- - - # サイジングとハードウェアの推奨事項 {#sizing-and-hardware-recommendations} このガイドでは、オープンソースユーザー向けのハードウェア、コンピューティングリソース、メモリ、およびディスク構成に関する一般的な推奨事項について説明します。セットアップを簡素化したい場合は、[ClickHouse Cloud](https://clickhouse.com/cloud) の利用を推奨します。ClickHouse Cloud は、インフラ管理にかかるコストを最小限に抑えつつ、ワークロードに応じて自動的にスケールおよび調整を行います。 @@ -23,8 +21,6 @@ ClickHouse クラスターの構成は、アプリケーションのユースケ - ハードウェアコスト - 運用・保守コスト - - ## Disk {#disk} ClickHouse で使用するディスクの種類は、データ量、レイテンシ、スループット要件によって異なります。 @@ -39,8 +35,6 @@ ClickHouse で使用するディスクの種類は、データ量、レイテン また、SSD と HDD を組み合わせて [hot/warm/cold アーキテクチャ](/guides/developer/ttl#implementing-a-hotwarmcold-architecture) による階層型ストレージを実装することもできます。あるいは、コンピュートとストレージを分離するために、ストレージとして [AWS S3](https://aws.amazon.com/s3/) を利用することも可能です。コンピュートとストレージを分離したオープンソース版 ClickHouse の利用方法については、[こちら](/guides/separation-storage-compute)のガイドを参照してください。ClickHouse Cloud では、コンピュートとストレージの分離がデフォルトで利用可能です。 - - ## CPU {#cpu} ### どの CPU を使用すべきですか? {#which-cpu-should-i-use} @@ -75,8 +69,6 @@ ClickHouse に対して標準的な CPU 使用率の目標値は存在しませ 例えば、M 系 CPU を使用する場合、25 CPU コアあたり 100 GB のメモリをプロビジョニングすることを推奨します。アプリケーションに適切なメモリ量を決定するには、メモリ使用状況のプロファイリングが必要です。[メモリ問題のデバッグに関するこのガイド](/guides/developer/debugging-memory-issues) を参照するか、[組み込みのオブザーバビリティダッシュボード](/operations/monitoring) を使用して ClickHouse を監視してください。 - - ## メモリ {#memory} CPU の選択と同様に、メモリとストレージの比率、およびメモリと CPU の比率は、ユースケースに依存します。 @@ -96,8 +88,6 @@ CPU の選択と同様に、メモリとストレージの比率、およびメ 顧客向けワークロードなど頻繁にアクセスされるユースケースでは、1:30〜1:50 のメモリとストレージの比率とし、より多くのメモリを使用することを推奨します。 - - ## レプリカ {#replicas} 1シャードあたり少なくとも3つのレプリカ(または [Amazon EBS](https://aws.amazon.com/ebs/) を使用する場合は2つのレプリカ)を確保することを推奨します。さらに、レプリカを追加して水平方向にスケールする前に、まずはすべてのレプリカを垂直方向にスケールアップしておくことを推奨します。 @@ -106,8 +96,6 @@ ClickHouse は自動でシャーディングを行わず、データセットの 自動スケーリングに対応し、ユースケースに応じてレプリカ数を容易に制御できる [ClickHouse Cloud](https://clickhouse.com/cloud) の利用も検討してください。 - - ## 大規模ワークロード向けの構成例 {#example-configurations-for-large-workloads} ClickHouse の構成は、利用するアプリケーション固有の要件に大きく依存します。コストとパフォーマンスの両面で最適なアーキテクチャ設計の支援をご希望の場合は、[営業チームまでお問い合わせ](https://clickhouse.com/company/contact?loc=docs-sizing-and-hardware-recommendations)ください。 @@ -178,8 +166,6 @@ ClickHouse の構成は、利用するアプリケーション固有の要件に ### ログ用途向け Fortune 500 通信事業者の例 {#fortune-500-telecom-operator-for-a-logging-use-case} - - @@ -236,8 +222,6 @@ ClickHouse の構成は、利用するアプリケーション固有の要件に
ストレージ
- - ## 参考資料 {#further-reading} 以下は、オープンソースの ClickHouse を利用している企業のアーキテクチャについて解説した公開ブログ記事です: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md index 09f53caee6a..fff2682e527 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/using_data_skipping_indices.md @@ -50,7 +50,6 @@ ClickHouse は **データスキップインデックス** と呼ばれる強力 Data Skipping Indices についてのより詳細なガイドは[こちら](/sql-reference/statements/alter/skipping-index)を参照してください。 - ## 例 {#example} 次のように最適化されたテーブルを考えます。このテーブルには、Stack Overflow のデータが投稿 1 件につき 1 行で格納されています。 @@ -142,7 +141,6 @@ LIMIT 1 簡単な分析から、`ViewCount` は予想どおり `CreationDate`(主キー)と相関していることが分かります — 投稿が存在している期間が長いほど、閲覧される機会も増えるためです。 - ```sql SELECT toDate(CreationDate) AS day, avg(ViewCount) AS view_count FROM stackoverflow.posts WHERE day > '2009-01-01' GROUP BY day ``` @@ -217,7 +215,6 @@ FROM stackoverflow.posts WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) ``` - ┌─explain────────────────────────────────────────────────────────────┐ │ 式 ((Project names + Projection)) │ │ 集約処理 │ @@ -259,7 +256,6 @@ WHERE (CreationDate > '2009-01-01') AND (ViewCount > 10000000) スキッピングインデックスの使用 ``` - ## 関連ドキュメント {#related-docs} - [データスキッピングインデックスガイド](/optimize/skipping-indexes) - [データスキッピングインデックスの例](/optimize/skipping-indexes/examples) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md index ba60fe3d29d..b58a6552e52 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/getting-started.md @@ -7,16 +7,12 @@ keywords: ['chdb', 'embedded', 'clickhouse-lite', 'in-process', 'in process'] doc_type: 'guide' --- - - # chDB を使い始める {#getting-started-with-chdb} このガイドでは、Python 版の chDB を使って、すぐに使い始めるための手順を説明します。 まず S3 上の JSON ファイルに対してクエリを実行し、その JSON ファイルを元に chDB 内にテーブルを作成し、データに対していくつかクエリを実行します。 また、Apache Arrow や Pandas など、複数のフォーマットでクエリ結果を取得する方法を確認し、最後に Pandas の DataFrame をクエリする方法を学びます。 - - ## セットアップ {#setup} まずは仮想環境を作成します。 @@ -51,7 +47,6 @@ ipython pip install pandas pyarrow ``` - ## S3 内の JSON ファイルをクエリする {#querying-a-json-file-in-s3} ここでは、S3 バケットに保存されている JSON ファイルをどのようにクエリするかを見ていきます。 @@ -149,7 +144,6 @@ chdb.query( これはプログラム内で定義した変数に対して行う分には問題ありませんが、ユーザーからの入力に対しては決して行わないでください。そうしないと、クエリが SQL インジェクション攻撃に対して無防備になります。 ::: - ## 出力フォーマットの設定 {#configuring-the-output-format} デフォルトの出力フォーマットは `CSV` ですが、`output_format` パラメータで変更できます。 @@ -202,7 +196,6 @@ is_live_content: [[false,true]] count(): [[315746,20686]] ``` - ## JSON ファイルからテーブルを作成する {#creating-a-table-from-json-file} 次に、chDB でテーブルを作成する方法を見ていきます。 @@ -309,7 +302,6 @@ sess.query(f""" ) ``` - ## テーブルをクエリする {#querying-a-table} 最後に、そのテーブルに対してクエリを実行してみましょう。 @@ -348,7 +340,6 @@ df df["likeDislikeRatio"] = df["likeCount"] / df["dislikeCount"] ``` - ## Pandas データフレームをクエリする {#querying-a-pandas-dataframe} その後、chDB からその DataFrame に対してクエリを実行できます。 @@ -379,7 +370,6 @@ chdb.query( Pandas の DataFrame に対するクエリについては、[Querying Pandas developer guide](guides/querying-pandas.md) も参照してください。 - ## 次のステップ {#next-steps} このガイドを通じて、chDB の概要を把握していただけたかと思います。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md index 44398d6fb1d..edb4ee746f3 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/jupysql.md @@ -95,7 +95,7 @@ conn = dbapi.connect(path="atp.chdb") %config SqlMagic.displaylimit = None ``` -## CSV ファイルのデータをクエリする +## CSV ファイルのデータをクエリする {#querying-data-in-csv-files} `atp_rankings` というプレフィックスを持つファイルをいくつかダウンロードしました。 スキーマを理解するために `DESCRIBE` 句を使って確認してみましょう。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md index 8161c2c7788..4c40057a184 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/guides/query-remote-clickhouse.md @@ -9,8 +9,6 @@ doc_type: 'guide' このガイドでは、chDB からリモート ClickHouse サーバーにクエリを実行する方法について説明します。 - - ## セットアップ {#setup} まずは仮想環境を作成します。 @@ -41,7 +39,6 @@ ipython このコードは、Python スクリプトやお使いのノートブック環境でも利用できます。 - ## ClickPy 入門 {#an-intro-to-clickpy} これからクエリを実行する対象となるリモート ClickHouse サーバーは [ClickPy](https://clickpy.clickhouse.com) です。 @@ -50,8 +47,6 @@ ClickPy は PyPI パッケージのすべてのダウンロードを記録し、 ClickPy について詳しくは、[GitHub リポジトリ](https://github.com/ClickHouse/clickpy)を参照してください。 - - ## ClickPy ClickHouse サービスにクエリを実行する {#querying-the-clickpy-clickhouse-service} chDB をインポートします: @@ -133,7 +128,6 @@ sklearn_df.sort_values(by=["x"], ascending=False).head(n=10) 2383 2024-09-23 1777554 ``` - ## Pandas の DataFrame を結合する {#merging-pandas-dataframes} これで 2 つの DataFrame が揃ったので、日付(`x` 列)をキーとして、次のように結合できます。 @@ -172,7 +166,6 @@ df.head(n=5) 4 2018-03-02 5 23842 0.000210 ``` - ## Pandas DataFrame をクエリする {#querying-pandas-dataframes} 次に、最も良い比率と最も悪い比率となっている日付を見つけたいとします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md index 67dde65da5a..ebde68e5098 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/index.md @@ -10,14 +10,11 @@ doc_type: 'guide' import Image from '@theme/IdealImage'; import dfBench from '@site/static/images/chdb/df_bench.png'; - # chDB {#chdb} chDB は、[ClickHouse](https://github.com/clickhouse/clickhouse) を基盤とした、高速なインプロセス SQL OLAP エンジンです。 ClickHouse サーバーに接続することなく、任意のプログラミング言語から ClickHouse のパワーを利用したい場合に使用できます。 - - ## 主な機能 {#key-features} - **インプロセス SQL OLAP エンジン** - ClickHouse を基盤としており、ClickHouse サーバーのインストールは不要 @@ -26,8 +23,6 @@ ClickHouse サーバーに接続することなく、任意のプログラミン - **豊富な Python エコシステムとの統合** - Pandas、Arrow、DB API 2.0 をネイティブサポートし、既存のデータサイエンスワークフローにシームレスに組み込める - **依存関係ゼロ** - 外部データベースのインストールは不要 - - ## chDB はどの言語をサポートしていますか? {#what-languages-are-supported-by-chdb} chDB では次の言語バインディングを利用できます。 @@ -39,8 +34,6 @@ chDB では次の言語バインディングを利用できます。 * [Bun](install/bun.md) * [C および C++](install/c.md) - - ## どのように始めればよいですか? {#how-do-i-get-started} * [Go](install/go.md)、[Rust](install/rust.md)、[NodeJS](install/nodejs.md)、[Bun](install/bun.md)、または [C および C++](install/c.md) を使用している場合は、対応する言語ページを参照してください。 @@ -53,8 +46,6 @@ chDB では次の言語バインディングを利用できます。 * [リモート ClickHouse をクエリする](guides/query-remote-clickhouse.md) * [clickhouse-local データベースの使用](guides/clickhouse-local.md) - - ## 紹介動画 {#an-introductory-video} ClickHouse のオリジナル開発者である Alexey Milovidov が chDB プロジェクトについて簡潔に紹介する動画をご覧いただけます。 @@ -63,24 +54,16 @@ ClickHouse のオリジナル開発者である Alexey Milovidov が chDB プロ - - ## パフォーマンスベンチマーク {#performance-benchmarks} chDB は、さまざまなユースケースにおいて卓越したパフォーマンスを発揮します。 - - * **[組み込みエンジンのClickBench](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQXRoZW5hIChwYXJ0aXRpb25lZCkiOnRydWUsIkF0aGVuYSAoc2luZ2xlKSI6dHJ1ZSwiQXVyb3JhIGZvciBNeVNRTCI6dHJ1ZSwiQXVyb3JhIGZvciBQb3N0Z3JlU1FMIjp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIiOnRydWUsIkNpdHVzIjp0cnVlLCJjbGlja2hvdXNlLWxvY2FsIChwYXJ0aXRpb25lZCkiOnRydWUsImNsaWNraG91c2UtbG9jYWwgKHNpbmdsZSkiOnRydWUsIkNsaWNrSG91c2UiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoenN0ZCkiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQiOnRydWUsIkNsaWNrSG91c2UgKHdlYikiOnRydWUsIkNyYXRlREIiOnRydWUsIkRhdGFiZW5kIjp0cnVlLCJEYXRhRnVzaW9uIChzaW5nbGUpIjp0cnVlLCJBcGFjaGUgRG9yaXMiOnRydWUsIkRydWlkIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQpIjp0cnVlLCJEdWNrREIiOnRydWUsIkVsYXN0aWNzZWFyY2giOnRydWUsIkVsYXN0aWNzZWFyY2ggKHR1bmVkKSI6ZmFsc2UsIkdyZWVucGx1bSI6dHJ1ZSwiSGVhdnlBSSI6dHJ1ZSwiSHlkcmEiOnRydWUsIkluZm9icmlnaHQiOnRydWUsIktpbmV0aWNhIjp0cnVlLCJNYXJpYURCIENvbHVtblN0b3JlIjp0cnVlLCJNYXJpYURCIjpmYWxzZSwiTW9uZXREQiI6dHJ1ZSwiTW9uZ29EQiI6dHJ1ZSwiTXlTUUwgKE15SVNBTSkiOnRydWUsIk15U1FMIjp0cnVlLCJQaW5vdCI6dHJ1ZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2VsZWN0REIiOnRydWUsIlNpbmdsZVN0b3JlIjp0cnVlLCJTbm93Zmxha2UiOnRydWUsIlNRTGl0ZSI6dHJ1ZSwiU3RhclJvY2tzIjp0cnVlLCJUaW1lc2NhbGVEQiAoY29tcHJlc3Npb24pIjp0cnVlLCJUaW1lc2NhbGVEQiI6dHJ1ZX0sInR5cGUiOnsic3RhdGVsZXNzIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsIkphdmEiOmZhbHNlLCJjb2x1bW4tb3JpZW50ZWQiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQyI6ZmFsc2UsIlBvc3RncmVTUUwgY29tcGF0aWJsZSI6ZmFsc2UsIkNsaWNrSG91c2UgZGVyaXZhdGl2ZSI6ZmFsc2UsImVtYmVkZGVkIjp0cnVlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiUnVzdCI6ZmFsc2UsInNlYXJjaCI6ZmFsc2UsImRvY3VtZW50IjpmYWxzZSwidGltZS1zZXJpZXMiOmZhbHNlfSwibWFjaGluZSI6eyJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNS40eGxhcmdlLCA1MDBnYiBncDIiOnRydWUsIjE2IHRocmVhZHMiOnRydWUsIjIwIHRocmVhZHMiOnRydWUsIjI0IHRocmVhZHMiOnRydWUsIjI4IHRocmVhZHMiOnRydWUsIjMwIHRocmVhZHMiOnRydWUsIjQ4IHRocmVhZHMiOnRydWUsIjYwIHRocmVhZHMiOnRydWUsIm01ZC4yNHhsYXJnZSI6dHJ1ZSwiYzVuLjR4bGFyZ2UsIDIwMGdiIGdwMiI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDE1MDBnYiBncDIiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMjQiOnRydWUsIlMyIjp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZX0sImNsdXN0ZXJfc2l6ZSI6eyIxIjp0cnVlLCIyIjp0cnVlLCI0Ijp0cnVlLCI4Ijp0cnVlLCIxNiI6dHJ1ZSwiMzIiOnRydWUsIjY0Ijp0cnVlLCIxMjgiOnRydWUsInNlcnZlcmxlc3MiOnRydWUsInVuZGVmaW5lZCI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** - 総合的なパフォーマンス比較 * **[DataFrame 処理パフォーマンス](https://colab.research.google.com/drive/1FogLujJ_-ds7RGurDrUnK-U0IW8a8Qd0)** - 他の DataFrame ライブラリとの処理性能比較 * **[DataFrame Benchmark](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQWxsb3lEQiI6dHJ1ZSwiQWxsb3lEQiAodHVuZWQpIjp0cnVlLCJBdGhlbmEgKHBhcnRpdGlvbmVkKSI6dHJ1ZSwiQXRoZW5hIChzaW5nbGUpIjp0cnVlLCJBdXJvcmEgZm9yIE15U1FMIjp0cnVlLCJBdXJvcmEgZm9yIFBvc3RncmVTUUwiOnRydWUsIkJ5Q29uaXR5Ijp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIgKERhdGFGcmFtZSkiOnRydWUsImNoREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiY2hEQiI6dHJ1ZSwiQ2l0dXMiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF3cykiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF6dXJlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSBDbG91ZCAoZ2NwKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoZGF0YSBsYWtlLCBwYXJ0aXRpb25lZCkiOnRydWUsIkNsaWNrSG91c2UgKGRhdGEgbGFrZSwgc2luZ2xlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJDbGlja0hvdXNlIChQYXJxdWV0LCBzaW5nbGUpIjp0cnVlLCJDbGlja0hvdXNlICh3ZWIpIjp0cnVlLCJDbGlja0hvdXNlIjp0cnVlLCJDbGlja0hvdXNlICh0dW5lZCkiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkLCBtZW1vcnkpIjp0cnVlLCJDbG91ZGJlcnJ5Ijp0cnVlLCJDcmF0ZURCIjp0cnVlLCJDcnVuY2h5IEJyaWRnZSBmb3IgQW5hbHl0aWNzIChQYXJxdWV0KSI6dHJ1ZSwiRGF0YWJlbmQiOnRydWUsIkRhdGFGdXNpb24gKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRGF0YUZ1c2lvbiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiQXBhY2hlIERvcmlzIjp0cnVlLCJEcnVpZCI6dHJ1ZSwiRHVja0RCIChEYXRhRnJhbWUpIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRHVja0RCIjp0cnVlLCJFbGFzdGljc2VhcmNoIjp0cnVlLCJFbGFzdGljc2VhcmNoICh0dW5lZCkiOmZhbHNlLCJHbGFyZURCIjp0cnVlLCJHcmVlbnBsdW0iOnRydWUsIkhlYXZ5QUkiOnRydWUsIkh5ZHJhIjp0cnVlLCJJbmZvYnJpZ2h0Ijp0cnVlLCJLaW5ldGljYSI6dHJ1ZSwiTWFyaWFEQiBDb2x1bW5TdG9yZSI6dHJ1ZSwiTWFyaWFEQiI6ZmFsc2UsIk1vbmV0REIiOnRydWUsIk1vbmdvREIiOnRydWUsIk1vdGhlcmR1Y2siOnRydWUsIk15U1FMIChNeUlTQU0pIjp0cnVlLCJNeVNRTCI6dHJ1ZSwiT3hsYSI6dHJ1ZSwiUGFuZGFzIChEYXRhRnJhbWUpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiUGlub3QiOnRydWUsIlBvbGFycyAoRGF0YUZyYW1lKSI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2luZ2xlU3RvcmUiOnRydWUsIlNub3dmbGFrZSI6dHJ1ZSwiU1FMaXRlIjp0cnVlLCJTdGFyUm9ja3MiOnRydWUsIlRhYmxlc3BhY2UiOnRydWUsIlRlbWJvIE9MQVAgKGNvbHVtbmFyKSI6dHJ1ZSwiVGltZXNjYWxlREIgKGNvbXByZXNzaW9uKSI6dHJ1ZSwiVGltZXNjYWxlREIiOnRydWUsIlVtYnJhIjp0cnVlfSwidHlwZSI6eyJDIjpmYWxzZSwiY29sdW1uLW9yaWVudGVkIjpmYWxzZSwiUG9zdGdyZVNRTCBjb21wYXRpYmxlIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsImdjcCI6ZmFsc2UsInN0YXRlbGVzcyI6ZmFsc2UsIkphdmEiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQ2xpY2tIb3VzZSBkZXJpdmF0aXZlIjpmYWxzZSwiZW1iZWRkZWQiOmZhbHNlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiZGF0YWZyYW1lIjp0cnVlLCJhd3MiOmZhbHNlLCJhenVyZSI6ZmFsc2UsImFuYWx5dGljYWwiOmZhbHNlLCJSdXN0IjpmYWxzZSwic2VhcmNoIjpmYWxzZSwiZG9jdW1lbnQiOmZhbHNlLCJzb21ld2hhdCBQb3N0Z3JlU1FMIGNvbXBhdGlibGUiOmZhbHNlLCJ0aW1lLXNlcmllcyI6ZmFsc2V9LCJtYWNoaW5lIjp7IjE2IHZDUFUgMTI4R0IiOnRydWUsIjggdkNQVSA2NEdCIjp0cnVlLCJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDUwMGdiIGdwMiI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCIxOTJHQiI6dHJ1ZSwiMjRHQiI6dHJ1ZSwiMzYwR0IiOnRydWUsIjQ4R0IiOnRydWUsIjcyMEdCIjp0cnVlLCI5NkdCIjp0cnVlLCJkZXYiOnRydWUsIjcwOEdCIjp0cnVlLCJjNW4uNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJBbmFseXRpY3MtMjU2R0IgKDY0IHZDb3JlcywgMjU2IEdCKSI6dHJ1ZSwiYzUuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgMTUwMGdiIGdwMiI6dHJ1ZSwiY2xvdWQiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMiI6dHJ1ZSwiUzI0Ijp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZSwiTDEgLSAxNkNQVSAzMkdCIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AzIjp0cnVlfSwiY2x1c3Rlcl9zaXplIjp7IjEiOnRydWUsIjIiOnRydWUsIjQiOnRydWUsIjgiOnRydWUsIjE2Ijp0cnVlLCIzMiI6dHJ1ZSwiNjQiOnRydWUsIjEyOCI6dHJ1ZSwic2VydmVybGVzcyI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** - - DataFrame のベンチマーク結果 - - ## chDB について {#about-chdb} - chDB プロジェクト誕生の経緯については [ブログ記事](https://clickhouse.com/blog/chdb-embedded-clickhouse-rocket-engine-on-a-bicycle) を参照してください @@ -89,8 +72,6 @@ chDB は、さまざまなユースケースにおいて卓越したパフォー - ブラウザ上で [codapi のサンプル](https://antonz.org/trying-chdb/) を使って chDB を試してください - さらに多くのサンプルについては https://github.com/chdb-io/chdb/tree/main/examples を参照してください - - ## ライセンス {#license} chDB は Apache License 2.0 のもとで提供されています。詳細については [LICENSE](https://github.com/chdb-io/chdb/blob/main/LICENSE.txt) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md index efcb40ec8d0..65fa1fbba8a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/bun.md @@ -48,7 +48,7 @@ bun run build chDB-bun は、1 回限りの処理向けのエフェメラルクエリと、データベースの状態を保持する永続セッションという 2 つのクエリモードをサポートしています。 -### エフェメラルクエリ +### エフェメラルクエリ {#persistent-sessions} 永続的な状態を保持する必要がない、単純な一度限りのクエリには次を使用します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md index 6b7ea53e686..50acd078b1e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/c.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'c', 'cpp', 'embedded', 'clickhouse', 'sql', 'olap', 'api'] doc_type: 'guide' --- - - # C および C++ 向け chDB {#chdb-for-c-and-c} chDB は、ClickHouse の機能をアプリケーションに直接組み込むためのネイティブ C/C++ API を提供します。この API は、簡単なクエリから、永続的な接続やクエリ結果のストリーミングなどの高度な機能までサポートします。 - - ## インストール {#installation} ### ステップ 1: libchdb をインストール {#install-libchdb} @@ -37,13 +33,11 @@ curl -sL https://lib.chdb.io | bash chDB ライブラリを用いてアプリケーションをコンパイルおよびリンクします: - ```bash # Cコンパイル {#c-compilation} gcc -o myapp myapp.c -lchdb ``` - # C++のコンパイル {#c-compilation} g++ -o myapp myapp.cpp -lchdb @@ -51,7 +45,6 @@ g++ -o myapp myapp.cpp -lchdb ``` ``` - ## C の例 {#c-examples} ### 基本的な接続とクエリ {#basic-connection-queries} @@ -194,7 +187,6 @@ int main() { chdb_destroy_query_result(json_result); ``` - // 整形表示 chdb_result* pretty_result = chdb_query(*conn, query, "Pretty"); printf("Pretty Result:\n%.*s\n\n", @@ -209,7 +201,6 @@ return 0; ``` ``` - ## C++ の例 {#cpp-example} ```cpp @@ -293,7 +284,6 @@ int main() { } ``` - ## エラー処理のベストプラクティス {#error-handling} ```c @@ -341,7 +331,6 @@ cleanup: } ``` - ## GitHub リポジトリ {#github-repository} - **メインリポジトリ**: [chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md index 65d4c50fdb6..2240e8a7678 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/go.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'go', 'golang', 'embedded', 'clickhouse', 'sql', 'olap'] doc_type: 'guide' --- - - # Go 向けの chDB {#chdb-for-go} chDB-go は chDB 向けの Go バインディングを提供し、外部への依存関係なしに Go アプリケーション内から直接 ClickHouse クエリを実行できるようにします。 - - ## インストール {#installation} ### ステップ 1: libchdb のインストール {#install-libchdb} @@ -39,26 +35,20 @@ go install github.com/chdb-io/chdb-go@latest go get github.com/chdb-io/chdb-go ``` - ## 使用方法 {#usage} ### コマンドラインインターフェース {#cli} chDB-go には、簡単なクエリをすばやく実行するための CLI が含まれています。 - - ```bash # シンプルなクエリ {#simple-query} ./chdb-go "SELECT 123" ``` - # インタラクティブモード {#interactive-mode} ./chdb-go - - # 永続ストレージを使用したインタラクティブモード {#interactive-mode-with-persistent-storage} ./chdb-go --path /tmp/chdb @@ -242,15 +232,12 @@ func main() { } ``` - **クエリストリーミングの利点:** - **メモリ効率が高い** - すべてをメモリに読み込まずに大規模データセットを処理できる - **リアルタイム処理** - 最初のチャンクが到着し次第、すぐに処理を開始できる - **キャンセルのサポート** - `Cancel()` を使って長時間実行中のクエリをキャンセルできる - **エラー処理** - ストリーミング中に `Error()` でエラーを確認できる - - ## API ドキュメント {#api-documentation} chDB-go は高レベル API と低レベル API の両方を提供します: @@ -258,8 +245,6 @@ chDB-go は高レベル API と低レベル API の両方を提供します: - **[高レベル API ドキュメント](https://github.com/chdb-io/chdb-go/blob/main/chdb.md)** - ほとんどのユースケースでの利用を推奨 - **[低レベル API ドキュメント](https://github.com/chdb-io/chdb-go/blob/main/lowApi.md)** - きめ細かな制御が必要な高度なユースケース向け - - ## システム要件 {#requirements} - Go 1.21 以降 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md index c7c44346142..7c69ffa15fb 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md @@ -7,21 +7,16 @@ keywords: ['chdb', 'nodejs', 'javascript', 'embedded', 'clickhouse', 'sql', 'ola doc_type: 'guide' --- - - # Node.js 向け chDB {#chdb-for-nodejs} chDB-node は chDB の Node.js バインディングを提供し、外部依存なしで Node.js アプリケーション内から直接 ClickHouse クエリを実行できるようにします。 - - ## インストール {#installation} ```bash npm install chdb ``` - ## 使用方法 {#usage} chDB-node は 2 つのクエリモードをサポートしています。単純な操作向けのスタンドアロン クエリと、データベース状態を維持するためのセッションベースのクエリです。 @@ -146,7 +141,6 @@ try { } ``` - ## エラー処理 {#error-handling} chDB を使用する際は、常にエラーを適切に処理してください。 @@ -192,7 +186,6 @@ function safeSessionQuery() { safeSessionQuery(); ``` - ## GitHub リポジトリ {#github-repository} - **GitHub リポジトリ**: [chdb-io/chdb-node](https://github.com/chdb-io/chdb-node) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md index a9a5ec68867..ccd91485a89 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/python.md @@ -18,7 +18,6 @@ doc_type: 'guide' pip install chdb ``` - ## 使用方法 {#usage} ### コマンドラインインターフェイス {#command-line-interface} @@ -33,7 +32,6 @@ python3 -m chdb "SELECT 1, 'abc'" Pretty python3 -m chdb "SELECT version()" JSON ``` - ### 基本的な Python の使い方 {#basic-python-usage} ```python @@ -49,7 +47,6 @@ print(f"読み取りバイト数: {result.bytes_read()}") print(f"実行時間: {result.elapsed()} 秒") ``` - ### 接続ベースの API(推奨) {#connection-based-api} リソース管理とパフォーマンスを改善するには、次の方法を使用します。 @@ -84,7 +81,6 @@ cur.close() conn.close() ``` - ## データの入力方法 {#data-input} ### ファイルベースのデータソース {#file-based-data-sources} @@ -118,7 +114,6 @@ result = chdb.query(""" """, 'Pretty') ``` - ### 出力フォーマットの例 {#output-format-examples} ```python @@ -139,7 +134,6 @@ pretty_result = chdb.query('SELECT * FROM system.numbers LIMIT 3', 'Pretty') print(pretty_result) ``` - ### DataFrame の操作 {#dataframe-operations} #### 従来の DataFrame API {#legacy-dataframe-api} @@ -164,7 +158,6 @@ summary = result_df.query('SELECT b, sum(a) FROM __table__ GROUP BY b') print(summary) ``` - #### Python テーブルエンジン(推奨) {#python-table-engine-recommended} ```python @@ -212,7 +205,6 @@ chdb.query(""" """).show() ``` - ### ステートフル セッション {#stateful-sessions} セッションは複数の操作にわたってクエリの状態を保持し、複雑なワークフローを可能にします。 @@ -267,7 +259,6 @@ print(result) sess.close() # オプション - オブジェクト削除時に自動クローズ ``` - ### 高度なセッション機能 {#advanced-session-features} ```python @@ -288,7 +279,6 @@ result = sess.query(""" こちらも参照してください: [test_stateful.py](https://github.com/chdb-io/chdb/blob/main/tests/test_stateful.py) - ### Python DB-API 2.0 インターフェイス {#python-db-api-20} 既存の Python アプリケーションとの互換性を確保するための標準的なデータベースインターフェイス。 @@ -337,7 +327,6 @@ cursor.executemany( ) ``` - ### ユーザー定義関数 (UDF) {#user-defined-functions} カスタム Python 関数で SQL を拡張できます。 @@ -378,7 +367,6 @@ result = query(""" print(result) ``` - #### カスタムの戻り値型を持つ高度な UDF {#advanced-udf-custom-return-types} ```python @@ -413,7 +401,6 @@ result = query(""" print(result) ``` - #### UDF のベストプラクティス {#udf-best-practices} 1. **ステートレス関数**: UDF は副作用のない純粋関数であることが望ましいです @@ -449,7 +436,6 @@ query(""" """) ``` - ### ストリーミングクエリ処理 {#streaming-queries} 一定のメモリ使用量で大規模データセットを処理できます: @@ -520,7 +506,6 @@ stream.close() sess.close() ``` - ### Python テーブルエンジン {#python-table-engine} #### Pandas DataFrame をクエリする {#query-pandas-dataframes} @@ -578,7 +563,6 @@ window_result = chdb.query(""" print(window_result) ``` - #### PyReader を使用したカスタムデータソース {#custom-data-sources-pyreader} 独自のデータソース向けにカスタムデータリーダーを実装します。 @@ -686,7 +670,6 @@ complex_json = chdb.query(""" print(complex_json) ```` - ## パフォーマンスと最適化 {#performance-optimization} ### ベンチマーク {#benchmarks} @@ -772,7 +755,6 @@ stream.close() sess.close() ``` - ## GitHub リポジトリ {#github-repository} - **メインリポジトリ**: [chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md index 7161df3ab2e..d7bdc6c9759 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/chdb/install/rust.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'embedded', 'clickhouse-lite', 'rust', 'install', 'ffi', 'bin doc_type: 'guide' --- - - # Rust 向け chDB {#chdb-for-rust} chDB-rust は chDB 向けの実験的な FFI(Foreign Function Interface)バインディングを提供し、外部への依存関係なしに Rust アプリケーション内から直接 ClickHouse クエリを実行できるようにします。 - - ## インストール {#installation} ### libchdb のインストール {#install-libchdb} @@ -25,7 +21,6 @@ chDB ライブラリをインストールします。 curl -sL https://lib.chdb.io | bash ``` - ## 使用方法 {#usage} chDB Rust は、ステートレスおよびステートフルの 2 種類のクエリ実行モードを提供します。 @@ -114,7 +109,6 @@ fn main() -> Result<(), Box> { } ``` - ## ビルドとテスト {#building-testing} ### プロジェクトをビルドする {#build-the-project} @@ -137,7 +131,6 @@ cargo test * `tempdir` (v0.3.7) - テスト用の一時ディレクトリ処理 * `thiserror` (v1) - エラー処理ユーティリティ - ## エラー処理 {#error-handling} chDB Rust は、`Error` 列挙型を通じて包括的なエラー処理機能を提供します。 @@ -164,7 +157,6 @@ match execute("SELECT 1", None) { } ``` - ## GitHub リポジトリ {#github-repository} このプロジェクトの GitHub リポジトリは [chdb-io/chdb-rust](https://github.com/chdb-io/chdb-rust) で公開されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md index f07ac4c7ae2..f3c6069b784 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md @@ -7,8 +7,6 @@ keywords: ['クラウドサービスティア', 'サービスプラン', 'クラ doc_type: 'reference' --- - - # ClickHouse Cloud のティア {#clickhouse-cloud-tiers} ClickHouse Cloud には複数のティアが用意されています。 @@ -17,8 +15,6 @@ ClickHouse Cloud には複数のティアが用意されています。 **クラウドティアの概要:** - - @@ -224,8 +220,6 @@ ClickHouse Cloud には複数のティアが用意されています。 - - ## Basic {#basic} - 単一レプリカのデプロイメントをサポートするコスト効率の高いオプションです。 @@ -236,8 +230,6 @@ Basic ティアのサービスは、あらかじめ固定されたサイズで スケーリングが必要な場合は、Scale または Enterprise ティアへアップグレードできます。 ::: - - ## Scale {#scale} 高い SLA(2 つ以上のレプリカを持つデプロイメント)、スケーラビリティ、高度なセキュリティを必要とするワークロード向けに設計されています。 @@ -248,8 +240,6 @@ Basic ティアのサービスは、あらかじめ固定されたサイズで - [柔軟なスケーリング](/manage/scaling) オプション(スケールアップ/ダウン、スケールイン/アウト) - [設定可能なバックアップ](/cloud/manage/backups/configurable-backups) - - ## エンタープライズ {#enterprise} 厳格なセキュリティおよびコンプライアンス要件を持つ、大規模でミッションクリティカルなデプロイメント向けのプランです。 @@ -268,8 +258,6 @@ Basic ティアのサービスは、あらかじめ固定されたサイズで 3 つすべてのティアにおいて、単一レプリカのサービスはサイズを固定(`8 GiB`、`12 GiB`)とするよう設計されています。 ::: - - ## 別のプランへのアップグレード {#upgrading-to-a-different-tier} Basic から Scale へ、または Scale から Enterprise へは、いつでもアップグレードできます。プランをダウングレードする場合は、プレミアム機能を無効にする必要があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md index 5ebacf6cb53..a33b843e6f0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md @@ -14,29 +14,22 @@ import insights_recent from '@site/static/images/cloud/sqlconsole/insights_recen import insights_drilldown from '@site/static/images/cloud/sqlconsole/insights_drilldown.png'; import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_query_info.png'; - # Query Insights {#query-insights} **Query Insights** 機能は、さまざまな可視化やテーブルを通じて、ClickHouse の組み込みクエリログをより簡単に活用できるようにします。ClickHouse の `system.query_log` テーブルは、クエリ最適化、デバッグ、クラスタ全体の健全性とパフォーマンスの監視にとって重要な情報源です。 - - ## クエリ概要 {#query-overview} サービスを選択すると、左サイドバーの **Monitoring** ナビゲーション項目が展開され、新たに **Query insights** というサブ項目が表示されます。このオプションをクリックすると、新しい Query insights ページが開きます。 - - ## トップレベルメトリクス {#top-level-metrics} 上部の統計ボックスは、選択した期間におけるいくつかの基本的なクエリのトップレベルメトリクスを表しています。その下には、クエリ種別(select、insert、other)ごとに分解されたクエリ数、レイテンシ、エラー率を、選択した時間範囲にわたって可視化する 3 つの時系列チャートが表示されます。レイテンシチャートではさらに、p50、p90、p99 のレイテンシを表示するように切り替えることができます。 - - ## 最近のクエリ {#recent-queries} トップレベルのメトリクスの下には、選択した時間範囲におけるクエリログのエントリ(正規化されたクエリハッシュとユーザーごとにグループ化)がテーブルで表示されます。 @@ -45,8 +38,6 @@ import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_q 最近のクエリは、利用可能な任意のフィールドでフィルタおよびソートできます。テーブルでは、テーブル名や p90 / p99 レイテンシなどの追加フィールドを表示または非表示にするように設定することもできます。 - - ## クエリのドリルダウン {#query-drill-down} 最近のクエリテーブルからクエリを選択すると、選択したクエリに固有のメトリクスと情報を含むフライアウトが開きます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md index d03f6a06b76..398175e7c22 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md @@ -20,13 +20,10 @@ import dashboards_9 from '@site/static/images/cloud/dashboards/9_dashboards.png' import dashboards_10 from '@site/static/images/cloud/dashboards/10_dashboards.png'; import dashboards_11 from '@site/static/images/cloud/dashboards/11_dashboards.png'; - # ダッシュボード {#dashboards} SQL Console のダッシュボード機能を使用すると、保存したクエリから得られた可視化をまとめて共有できます。まずはクエリを保存して可視化し、その可視化結果をダッシュボードに追加し、クエリパラメータを使ってダッシュボードをインタラクティブにしてみてください。 - - ## コアコンセプト {#core-concepts} ### クエリの共有 {#query-sharing} @@ -39,14 +36,10 @@ SQL Console のダッシュボード機能を使用すると、保存したク 可視化の設定で「filter」タイプを選択すると、**Global** フィルタのサイドペインからクエリパラメータの入力欄を表示・非表示に切り替えられます。ダッシュボード上の別のオブジェクト(テーブルなど)にリンクすることで、クエリパラメータの入力欄を切り替えることもできます。詳しくは、以下のクイックスタートガイドの「[フィルタを構成する](/cloud/manage/dashboards#configure-a-filter)」セクションを参照してください。 - - ## クイックスタート {#quick-start} [query\_log](/operations/system-tables/query_log) システムテーブルを使用して、ClickHouse サービスを監視するためのダッシュボードを作成します。 - - ## クイックスタート {#quick-start-1} ### 保存済みクエリを作成する {#create-a-saved-query} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md index 941427c2af1..a57b9782f02 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md @@ -16,7 +16,6 @@ import scaling_configure from '@site/static/images/cloud/manage/scaling-configur import scaling_memory_allocation from '@site/static/images/cloud/manage/scaling-memory-allocation.png'; import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - # 自動スケーリング {#automatic-scaling} スケーリングとは、クライアントからの需要に応じて利用可能なリソースを調整する機能を指します。Scale および Enterprise(標準 1:4 プロファイル)ティアのサービスは、API をプログラム経由で呼び出すか、UI 上の設定を変更してシステムリソースを調整することで、水平スケーリングが可能です。これらのサービスは、アプリケーションの需要に合わせて**垂直方向に自動スケーリング**することもできます。 @@ -27,8 +26,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' Scale および Enterprise ティアは単一レプリカとマルチレプリカの両方のサービスをサポートしますが、Basic ティアは単一レプリカのサービスのみをサポートします。単一レプリカのサービスはサイズが固定されており、垂直・水平どちらのスケーリングもできません。ユーザーは、サービスをスケーリングするために Scale または Enterprise ティアへアップグレードできます。 ::: - - ## ClickHouse Cloud におけるスケーリングの仕組み {#how-scaling-works-in-clickhouse-cloud} 現在、ClickHouse Cloud は Scale ティアのサービスに対して、垂直オートスケーリングと手動による水平スケーリングをサポートしています。 @@ -89,8 +86,6 @@ Enterprise ティアのサービスでは、標準の 1:4 プロファイルが ただし、サポートに連絡することで、これらのサービスも垂直方向にスケーリングできます。 ::: - - ## 手動による水平スケーリング {#manual-horizontal-scaling} @@ -129,8 +124,6 @@ UI からサービスを水平スケーリングするには、**Settings** ペ - - ## 自動アイドル化 {#automatic-idling} **Settings** ページでは、上の画像に示されているように、サービスが非アクティブなとき(つまり、サービスがユーザーが送信したクエリを一切実行していないとき)に自動的にアイドル状態にするかどうかも選択できます。自動アイドル化を有効にすると、サービスが一時停止している間はコンピュートリソースに対して課金されないため、コストを削減できます。 @@ -144,8 +137,6 @@ UI からサービスを水平スケーリングするには、**Settings** ペ クエリに応答するまでの遅延を許容できるユースケースにのみ自動アイドル化を使用してください。サービスが一時停止している間は、そのサービスへの接続はタイムアウトするためです。自動アイドル化は、利用頻度が低く、ある程度の遅延を許容できるサービスに最適です。頻繁に利用される顧客向け機能を支えるサービスには推奨されません。 ::: - - ## ワークロードのスパイクへの対応 {#handling-bursty-workloads} 近いうちにワークロードのスパイクが予想される場合は、 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md index 98a9f0f9bae..9eedcfb8214 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md @@ -6,15 +6,11 @@ keywords: ['自前クラウド', 'byoc', 'プライベート', '政府機関向 doc_type: 'reference' --- - - # ClickHouse デプロイメントオプション {#clickhouse-deployment-options} ClickHouse は、多様な顧客要件に対応するため、制御性、コンプライアンス、および運用負荷の観点から異なる、幅広いデプロイメントオプションを提供しています。 本ドキュメントでは、利用可能な各種デプロイメントタイプの特徴を整理し、利用者が自らのアーキテクチャ上の要件、規制上の義務、およびリソース管理戦略に最適に合致するソリューションを選択できるようにします。 - - ## ClickHouse Cloud {#clickhouse-cloud} ClickHouse Cloud は、自己管理に伴う運用上の複雑さなしに、ClickHouse の性能とスピードを提供する、フルマネージドなクラウドネイティブサービスです。 @@ -24,24 +20,18 @@ ClickHouse Cloud は、インフラストラクチャのプロビジョニング 詳しくは、[ClickHouse Cloud](/getting-started/quick-start/cloud) を参照してください。 - - ## 自前クラウド環境での利用(Bring Your Own Cloud) {#byoc} ClickHouse Bring Your Own Cloud (BYOC) は、組織が自社のクラウド環境内に ClickHouse をデプロイしつつ、マネージドサービスレイヤーの利点も活用できるようにするモデルです。このオプションは、ClickHouse Cloud によるフルマネージドな体験と、完全に自己管理されたデプロイメントとのギャップを埋めるものです。ClickHouse BYOC を利用すると、ユーザーはデータ、インフラストラクチャ、セキュリティポリシーを自ら管理して特定のコンプライアンスや規制要件を満たしながら、パッチ適用、監視、スケーリングといった運用タスクを ClickHouse にオフロードできます。このモデルは、マネージドサービスのメリットを享受しつつプライベートクラウドデプロイメントの柔軟性も備えており、厳格なセキュリティ、ガバナンス、データレジデンシー要件を持つエンタープライズでの大規模デプロイメントに適しています。 詳しくは、[Bring Your Own Cloud](/cloud/reference/byoc/overview) を参照してください。 - - ## ClickHouse Private {#clickhouse-private} ClickHouse Private は、ClickHouse をセルフホストして利用するためのバージョンであり、ClickHouse Cloud を支えるのと同じ独自技術を活用しています。このオプションはきわめて高いレベルの制御性を提供するため、厳格なコンプライアンス、ネットワークおよびセキュリティ要件を持つ組織や、自前のインフラストラクチャを運用できるだけの専門的な運用ノウハウを備えたチームに最適です。ClickHouse Cloud 環境で徹底的にテストされた定期的なアップデートおよびアップグレード、豊富な機能を備えたロードマップの恩恵を受けられ、さらに当社のエキスパートによるサポートチームによって支えられています。 [ClickHouse Private](/cloud/infrastructure/clickhouse-private) の詳細については、こちらをご覧ください。 - - ## ClickHouse Government {#clickhouse-government} ClickHouse Government は、分離された認定済み環境を必要とする政府機関や公共部門組織の、特有で厳格な要件を満たすように設計された、セルフホスト型の ClickHouse です。このデプロイメントオプションは、OpenSSL を利用した FIPS 140-3 準拠、追加のシステムハードニング、および脆弱性管理に重点を置きつつ、高度に安全で、各種コンプライアンス要件を満たした分離環境を提供します。ClickHouse Cloud の堅牢な機能を活用しながら、政府組織特有の運用要件とセキュリティ要件に対応するための専用機能および設定を統合しています。ClickHouse Government を利用することで、機関は、公共部門のニーズに合わせたエキスパートサポートを受けつつ、管理された認定インフラストラクチャ内で機微なデータに対する高性能な分析を実現できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md index 4ee1d641982..de7dac229be 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md @@ -8,7 +8,6 @@ doc_type: 'guide' import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; - # レプリカ認識ルーティング {#replica-aware-routing} @@ -27,8 +26,6 @@ Envoy がこのパターンに一致するホスト名を受信すると、そ 元のホスト名を使用した場合は、デフォルトのルーティングアルゴリズムである `LEAST_CONNECTION` ロードバランシングが引き続き使用されることに注意してください。 - - ## Replica-aware routing の制限事項 {#limitations-of-replica-aware-routing} ### Replica-aware routing はアイソレーションを保証しない {#replica-aware-routing-does-not-guarantee-isolation} @@ -39,8 +36,6 @@ Envoy がこのパターンに一致するホスト名を受信すると、そ お客様は、新しいホスト名パターンに対して名前解決が機能するように、DNS エントリを手動で追加する必要があります。誤って構成・使用すると、サーバー負荷に不均衡を生じさせる可能性があります。 - - ## レプリカ対応ルーティングの設定 {#configuring-replica-aware-routing} Replica-aware routing を有効にするには、弊社の[サポートチーム](https://clickhouse.com/support/program)までお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md index 2ece9a89c9f..7f99966a975 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md @@ -7,8 +7,6 @@ description: 'ClickHouse Cloud における Shared Catalog コンポーネント doc_type: 'reference' --- - - # 共有カタログと共有データベースエンジン {#shared-catalog-and-shared-database-engine} **ClickHouse Cloud(およびファーストパーティパートナークラウドサービス)でのみ利用可能です** @@ -24,8 +22,6 @@ Shared Catalog は**テーブル本体を複製するわけではなく**、DDL - MySQL - DataLakeCatalog - - ## アーキテクチャとメタデータストレージ {#architecture-and-metadata-storage} Shared Catalog 内のすべてのメタデータおよび DDL クエリ履歴は、ZooKeeper に集中管理されます。ローカルディスク上には一切永続化されません。このアーキテクチャにより、次の点が保証されます。 @@ -34,8 +30,6 @@ Shared Catalog 内のすべてのメタデータおよび DDL クエリ履歴は - コンピュートノードをステートレスに保てること - 高速かつ信頼性の高いレプリカのブートストラップ - - ## 共有データベースエンジン {#shared-database-engine} **共有データベースエンジン** は Shared Catalog と連携して、`SharedMergeTree` のような **ステートレスなテーブルエンジン** を使用するテーブルを持つデータベースを管理します。これらのテーブルエンジンは永続状態をディスクに書き込まず、動的なコンピュート環境と互換性があります。 @@ -69,8 +63,6 @@ Shared Catalog 内のすべてのメタデータおよび DDL クエリ履歴は - **集中管理されたバージョン付きメタデータ状態** Shared Catalog は ZooKeeper に唯一の信頼できる情報源を保持します。レプリカが起動すると、最新状態を取得し、その差分を適用して整合性を確保します。クエリ実行中は、正しさを保証するために、システムが他のレプリカが少なくとも必要なメタデータバージョンに到達するまで待機することができます。 - - ## ClickHouse Cloud での利用方法 {#usage-in-clickhouse-cloud} エンドユーザー側では、Shared Catalog と Shared データベースエンジンを利用するために特別な設定は必要ありません。データベースの作成方法はこれまでと同じです。 @@ -81,7 +73,6 @@ CREATE DATABASE my_database; ClickHouse Cloud では、データベースに Shared データベースエンジンが自動的に割り当てられます。そのようなデータベース内で stateless エンジンを使用して作成されたテーブルはすべて、自動的に Shared Catalog のレプリケーションおよびコーディネーション機能を利用できます。 - ## まとめ {#summary} Shared Catalog と Shared データベースエンジンは次の機能を提供します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md index 287de554079..ba4bb36c2d9 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md @@ -11,7 +11,6 @@ import shared_merge_tree from '@site/static/images/cloud/reference/shared-merge- import shared_merge_tree_2 from '@site/static/images/cloud/reference/shared-merge-tree-2.png'; import Image from '@theme/IdealImage'; - # SharedMergeTree テーブルエンジン {#sharedmergetree-table-engine} SharedMergeTree テーブルエンジンファミリーは、共有ストレージ(例: Amazon S3、Google Cloud Storage、MinIO、Azure Blob Storage)上で動作するように最適化された、クラウドネイティブな ReplicatedMergeTree エンジンの代替です。あらゆる種類の MergeTree エンジンに対応する SharedMergeTree が用意されており、たとえば ReplacingSharedMergeTree は ReplacingReplicatedMergeTree の代わりとなります。 @@ -34,8 +33,6 @@ SharedMergeTree の大きな改善点の 1 つは、ReplicatedMergeTree と比 ReplicatedMergeTree と異なり、SharedMergeTree ではレプリカ同士が直接通信する必要はありません。代わりに、すべての通信は共有ストレージと clickhouse-keeper を通じて行われます。SharedMergeTree は非同期のリーダーレスレプリケーションを実装し、clickhouse-keeper をコーディネーションおよびメタデータの保存に利用します。これは、サービスをスケールアップおよびスケールダウンしても、メタデータをレプリケートする必要がないことを意味します。その結果、レプリケーション、ミューテーション、マージ、およびスケールアップ操作が高速になります。SharedMergeTree はテーブルごとに数百のレプリカをサポートし、シャードを用いずに動的なスケーリングを可能にします。ClickHouse Cloud では、クエリに対してより多くのコンピュートリソースを活用するために、分散クエリ実行アプローチが採用されています。 - - ## 内部情報の確認 {#introspection} ReplicatedMergeTree の内部情報確認に利用されるほとんどの system テーブルは SharedMergeTree にも存在しますが、データおよびメタデータのレプリケーションが行われないため、`system.replication_queue` と `system.replicated_fetches` は存在しません。ただし、SharedMergeTree にはこれら 2 つのテーブルに対応する代替テーブルが用意されています。 @@ -48,8 +45,6 @@ ReplicatedMergeTree の内部情報確認に利用されるほとんどの syste このテーブルは、SharedMergeTree における `system.replicated_fetches` の代替です。プライマリキーおよびチェックサムをメモリにフェッチしている、進行中の取得処理に関する情報を保持します。 - - ## SharedMergeTree の有効化 {#enabling-sharedmergetree} `SharedMergeTree` はデフォルトで有効になっています。 @@ -103,7 +98,6 @@ ENGINE = SharedReplacingMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica ORDER BY key ``` - ## 設定 {#settings} 一部の設定の挙動が大きく変更されています。 @@ -112,8 +106,6 @@ ORDER BY key - `insert_quorum_parallel` -- SharedMergeTree へのすべての挿入はクォーラム挿入(共有ストレージへの書き込み)となるため、SharedMergeTree テーブルエンジンを使用する場合、この設定は不要です。 - `select_sequential_consistency` -- クォーラム挿入を必要とせず、`SELECT` クエリ実行時に clickhouse-keeper への追加負荷を発生させます。 - - ## 一貫性 {#consistency} SharedMergeTree は、ReplicatedMergeTree よりも軽量な一貫性モデルを提供します。SharedMergeTree に対して挿入を行う場合、`insert_quorum` や `insert_quorum_parallel` のような設定を指定する必要はありません。挿入はクォーラム挿入となり、メタデータは ClickHouse-Keeper に保存され、そのメタデータは少なくともクォーラムを満たす数の ClickHouse-Keeper にレプリケートされます。クラスタ内の各レプリカは、ClickHouse-Keeper から新しい情報を非同期に取得します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md index 0f040459484..31b77982882 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md @@ -15,11 +15,8 @@ import compute_7 from '@site/static/images/cloud/reference/compute-compute-7.png import compute_8 from '@site/static/images/cloud/reference/compute-compute-8.png'; import Image from '@theme/IdealImage'; - # ウェアハウス {#warehouses} - - ## コンピュートコンピュート分離とは何ですか? {#what-is-compute-compute-separation} コンピュートコンピュート分離は、Scale および Enterprise ティアで利用できます。 @@ -51,8 +48,6 @@ _図 2 - ClickHouse Cloud におけるコンピュート分離_ 既存のサービスと同じデータを共有する追加のサービスを作成したり、複数のサービスが同じデータを共有する、まったく新しいセットアップを構成したりすることが可能です。 - - ## ウェアハウスとは何ですか? {#what-is-a-warehouse} ClickHouse Cloud において、_ウェアハウス_ は同じデータを共有するサービスの集合です。 @@ -75,8 +70,6 @@ _図 3 - ウェアハウスの例_ サービスは、所属するウェアハウスごとに並べ替えることができます。 - - ## アクセス制御 {#access-controls} ### データベース認証情報 {#database-credentials} @@ -116,8 +109,6 @@ _図 6 - ウェアハウス内の読み書き可能サービスと読み取り 2. 現在、更新可能なマテリアライズドビューは、読み取り専用サービスを含むウェアハウス内のすべてのサービス上で実行されます。ただし、この動作は将来変更され、読み書き可能(Read-write)サービス上でのみ実行されるようになります。 ::: - - ## スケーリング {#scaling} ウェアハウス内の各サービスは、ワークロードに応じて次の点を調整できます: @@ -126,13 +117,9 @@ _図 6 - ウェアハウス内の読み書き可能サービスと読み取り - サービスを自動スケーリングするかどうか - サービスを非アクティブ時にアイドル状態にするかどうか(グループ内の最初のサービスには適用できません。**制限事項** セクションを参照してください) - - ## 動作の変更点 {#changes-in-behavior} あるサービスで compute-compute が有効になり(少なくとも 1 つのセカンダリサービスが作成され)ると、`default` クラスター名を指定した `clusterAllReplicas()` 関数呼び出しは、その関数が呼び出されたサービスのレプリカのみを使用します。つまり、同じデータセットに接続された 2 つのサービスがあり、サービス 1 から `clusterAllReplicas(default, system, processes)` を呼び出した場合、サービス 1 上で動作しているプロセスのみが表示されます。必要であれば、すべてのレプリカにアクセスするために、例えば `clusterAllReplicas('all_groups.default', system, processes)` を呼び出すことも可能です。 - - ## 制限事項 {#limitations} 1. **プライマリサービスは常に稼働しており、アイドル状態にはできません(この制限は GA 後しばらくして削除される予定です)。** プライベートプレビュー期間中および GA 後しばらくの間、プライマリサービス(通常は、他のサービスを追加して拡張したい既存のサービス)は常に稼働しており、アイドル設定は無効になっています。少なくとも 1 つのセカンダリサービスが存在する場合、プライマリサービスを停止したりアイドル状態にしたりすることはできません。すべてのセカンダリサービスを削除すると、元のサービスを再び停止またはアイドル状態にすることができます。 @@ -154,22 +141,17 @@ SETTINGS distributed_ddl_task_timeout=0 7. **現在、1つのウェアハウスあたりサービスは5つまでというソフトリミットがあります。** 1つのウェアハウス内で5つを超えるサービスが必要な場合は、サポートチームにお問い合わせください。 - ## 料金 {#pricing} コンピュート料金は、ウェアハウス内のすべてのサービス(プライマリおよびセカンダリ)で同一です。ストレージ料金は最初の(元の)サービスに含まれており、1 回だけ請求されます。 ワークロードの規模や選択したティアに基づいてコストを見積もるには、[料金](https://clickhouse.com/pricing) ページにある料金計算ツールを参照してください。 - - ## バックアップ {#backups} - 単一のウェアハウス内のすべてのサービスは同じストレージを共有するため、バックアップはプライマリ(最初の)サービスに対してのみ実行されます。このため、そのウェアハウス内のすべてのサービスのデータがバックアップされます。 - ウェアハウスのプライマリサービスのバックアップからリストアを行うと、そのバックアップは既存のウェアハウスとは関連付けられていない、完全に新しいサービスとしてリストアされます。リストアが完了した直後に、その新しいサービスに対して追加のサービスを作成できます。 - - ## ウェアハウスの使用 {#using-warehouses} ### ウェアハウスの作成 {#creating-a-warehouse} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md index 39ed3062da3..5fb3fc166d6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md @@ -8,20 +8,14 @@ doc_type: 'reference' keywords: ['ClickHouse Cloud', 'API 概要', 'クラウド API', 'REST API', 'プログラムによるアクセス'] --- - - # ClickHouse Cloud API {#clickhouse-cloud-api} - - ## 概要 {#overview} ClickHouse Cloud API は、開発者が ClickHouse Cloud 上の組織やサービスを簡単に管理できるように設計された REST API です。Cloud API を使用すると、サービスの作成および管理、API キーのプロビジョニング、組織メンバーの追加や削除などを行うことができます。 [最初の API キーを作成し、ClickHouse Cloud API の利用を開始する方法はこちら](/cloud/manage/openapi) - - ## Swagger (OpenAPI) エンドポイントと UI {#swagger-openapi-endpoint-and-ui} ClickHouse Cloud API はオープンソースの [OpenAPI 仕様](https://www.openapis.org/) に基づいて構築されており、クライアント側から予測可能な形で利用できるようになっています。ClickHouse Cloud API のドキュメントをプログラムから利用する必要がある場合は、JSON ベースの Swagger エンドポイントを https://api.clickhouse.cloud/v1 で提供しています。API ドキュメントは [Swagger UI](https://clickhouse.com/docs/cloud/manage/api/swagger) からも参照できます。 @@ -33,14 +27,10 @@ ClickHouse Cloud API はオープンソースの [OpenAPI 仕様](https://www.op これは、`POST`、`GET`、`PATCH` の各サービスリクエストによって返されるオブジェクトに影響します。そのため、これらの API を利用するコードは、この変更に対応できるように調整が必要になる場合があります。 ::: - - ## レート制限 {#rate-limits} 開発者は、1 つの組織につき最大 100 個まで API キーを作成できます。各 API キーは、10 秒間に最大 10 件までリクエストを送信できます。組織ごとに許可される API キー数や 10 秒間あたりのリクエスト数の上限を引き上げたい場合は、support@clickhouse.com までお問い合わせください。 - - ## Terraform provider {#terraform-provider} 公式の ClickHouse Terraform Provider を使用すると、[Infrastructure as Code](https://www.redhat.com/en/topics/automation/what-is-infrastructure-as-code-iac) @@ -56,16 +46,12 @@ ClickHouse Terraform Provider にコントリビュートしたい場合は、[G また、サービスリソースのプロパティとして `num_replicas` フィールドを指定できるようになります。 ::: - - ## Terraform と OpenAPI の新料金: レプリカ設定の説明 {#terraform-and-openapi-new-pricing---replica-settings-explained} 各サービス作成時のデフォルトのレプリカ数は、Scale および Enterprise ティアでは 3、Basic ティアでは 1 です。 Scale および Enterprise ティアでは、サービス作成リクエストで `numReplicas` フィールドを指定することで、この値を調整できます。 `numReplicas` フィールドの値は、ウェアハウス内の最初のサービスについては 2 から 20 の範囲である必要があります。既存のウェアハウスに作成されるサービスについては、レプリカ数を 1 まで下げることができます。 - - ## サポート {#support} 迅速にサポートを受けるには、まずは [Slack チャンネル](https://clickhouse.com/slack) をご利用いただくことをおすすめします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md index 9cf8d713b1a..5739ca4c407 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md @@ -14,7 +14,6 @@ import image_04 from '@site/static/images/cloud/manage/openapi4.png'; import image_05 from '@site/static/images/cloud/manage/openapi5.png'; import Image from '@theme/IdealImage'; - # API キーの管理 {#managing-api-keys} ClickHouse Cloud は OpenAPI を利用した API を提供しており、アカウントおよびサービスをプログラムから管理できます。 @@ -68,7 +67,6 @@ API キーの削除は元に戻せない操作です。そのキーを使用し - ## エンドポイント {#endpoints} エンドポイントの詳細については、[API リファレンス](https://clickhouse.com/docs/cloud/manage/api/swagger) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md index 79baccfa72b..5a5b3bcfaef 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md @@ -90,7 +90,6 @@ Postman アプリケーションは Web ブラウザ上で利用できるほか - ## ClickHouse Cloud API 機能のテスト {#test-the-clickhouse-cloud-api-functionalities} ### 「GET list of available organizations」のテスト {#test-get-list-of-available-organizations} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md index 9f59f70a6b5..f8fbe27886a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md @@ -15,7 +15,6 @@ import enroll_fast_release from '@site/static/images/cloud/manage/enroll_fast_re import scheduled_upgrades from '@site/static/images/cloud/manage/scheduled_upgrades.png'; import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled_upgrade_window.png'; - # アップグレード {#upgrades} ClickHouse Cloud を利用すると、パッチ適用やアップグレードについて心配する必要はありません。修正、新機能、パフォーマンス向上を含むアップグレードを定期的に実施します。ClickHouse における新機能の一覧については、[Cloud changelog](/whats-new/cloud) を参照してください。 @@ -26,8 +25,6 @@ ClickHouse Cloud を利用すると、パッチ適用やアップグレードに この変更の一環として、システムテーブルの履歴データは、アップグレードイベントの一部として最大 30 日間保持されます。さらに、AWS または GCP 上のサービスについては 2024 年 12 月 19 日より前、Azure 上のサービスについては 2025 年 1 月 14 日より前のシステムテーブルデータは、新しい組織ティアへの移行の一部として保持されません。 ::: - - ## バージョン互換性 {#version-compatibility} サービスを作成すると、その時点で ClickHouse Cloud 上で提供されている最新の ClickHouse バージョンが、そのサービスの [`compatibility`](/operations/settings/settings#compatibility) 設定として指定されます。 @@ -36,16 +33,12 @@ ClickHouse Cloud を利用すると、パッチ適用やアップグレードに サービスレベルのデフォルト `compatibility` 設定を自分で変更することはできません。サービスのデフォルト `compatibility` 設定に指定されているバージョンを変更したい場合は、[サポートに連絡](https://clickhouse.com/support/program)する必要があります。ただし、ユーザー、ロール、プロファイル、クエリ、またはセッションのレベルでは、`SET compatibility = '22.3'` をセッション内で実行したり、クエリ内で `SETTINGS compatibility = '22.3'` を指定したりするなど、標準的な ClickHouse の設定メカニズムを使用して `compatibility` 設定を上書きできます。 - - ## メンテナンスモード {#maintenance-mode} サービスを更新する必要が生じる場合があり、その際にはスケーリングや自動休止などの一部機能を無効化する必要が生じることがあります。まれに、不具合が発生しているサービスに対して作業を行い、正常な状態に戻す必要が生じることもあります。このようなメンテナンス中は、サービスページに _"Maintenance in progress"_(メンテナンス中)というバナーが表示されます。この期間中でも、クエリの実行にはサービスを利用できる場合があります。 サービスがメンテナンス中の時間について、料金が発生することはありません。_メンテナンスモード_ はまれにしか行われず、通常のサービスアップグレードと混同しないでください。 - - ## リリースチャネル(アップグレードスケジュール) {#release-channels-upgrade-schedule} ユーザーは、特定のリリースチャネルに登録することで、自身の ClickHouse Cloud サービスのアップグレードスケジュールを指定できます。リリースチャネルは 3 種類あり、**スケジュール済みアップグレード**機能を使って、アップグレードを実行する曜日と時間を設定できます。 @@ -112,8 +105,6 @@ Basic ティアのサービスは、高速リリースチャネルの直後に - より低速なチャネルへ移行してもサービスがダウングレードされることはなく、そのチャネルで新しいバージョンが利用可能になるまで現在のバージョンのまま維持されます(例: 通常からスロー、高速から通常またはスロー)。 ::: - - ## 予定されたアップグレード {#scheduled-upgrades} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/06_security.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/06_security.md index f8d62393008..906a268cefe 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/06_security.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/06_security.md @@ -7,16 +7,12 @@ doc_type: 'reference' keywords: ['セキュリティ', 'クラウドセキュリティ', 'アクセス制御', 'コンプライアンス', 'データ保護'] --- - - # ClickHouse Cloud のセキュリティ {#clickhouse-cloud-security} 本ドキュメントでは、ClickHouse Cloud の組織およびサービスを保護するために利用可能なセキュリティオプションとベストプラクティスについて詳述します。 ClickHouse は、安全な分析用データベースソリューションを提供することに注力しており、データおよびサービスの完全性を保護することを最優先事項としています。 ここでは、ユーザーが ClickHouse 環境を保護するのに役立つよう設計された、さまざまな方法について説明します。 - - ## クラウドコンソールの認証 {#cloud-console-auth} ### パスワード認証 {#password-auth} @@ -49,8 +45,6 @@ ClickHouse Cloud は、Google または Microsoft のソーシャル認証によ [API 認証](/cloud/manage/openapi)の詳細はこちらをご覧ください。 - - ## データベース認証 {#database-auth} ### データベースパスワード認証 {#db-password-auth} @@ -65,8 +59,6 @@ ClickHouse データベースユーザーは、SSH 認証を使用するよう [SSH 認証](/cloud/security/manage-database-users#database-ssh)の詳細をご覧ください。 - - ## アクセス制御 {#access-control} ### コンソールのロールベースアクセス制御 (RBAC) {#console-rbac} @@ -81,8 +73,6 @@ ClickHouse のデータベースは、ユーザーへの権限付与に基づく [データベースユーザーの権限付与](/cloud/security/manage-database-users#database-permissions) の詳細をご覧ください。 - - ## ネットワークセキュリティ {#network-security} ### IP フィルター {#ip-filters} @@ -97,8 +87,6 @@ ClickHouse サービスへの受信接続を制限するために IP フィル 詳しくは、[プライベート接続](/cloud/security/connectivity/private-networking)を参照してください。 - - ## 暗号化 {#encryption} ### ストレージレベルの暗号化 {#storage-encryption} @@ -119,8 +107,6 @@ ClickHouse Cloud Enterprise のお客様は、データベースレベルの暗 詳しくは、[お客様管理の暗号鍵](/cloud/security/cmek#customer-managed-encryption-keys-cmek)をご覧ください。 - - ## 監査とログ記録 {#auditing-logging} ### コンソール監査ログ {#console-audit-log} @@ -141,8 +127,6 @@ ClickHouse BYOC インスタンスを管理するセキュリティチーム向 [BYOC セキュリティプレイブック](/cloud/security/audit-logging/byoc-security-playbook)の詳細をご覧ください。 - - ## コンプライアンス {#compliance} ### セキュリティおよびコンプライアンスレポート {#compliance-reports} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md index d8c6be3be25..a6576615f5f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md @@ -23,7 +23,6 @@ Advanced Dashboard は、ClickHouse システムとその周辺環境につい Advanced Dashboard は ClickHouse OSS(Open Source Software)と Cloud の両方で利用できます。本記事では、Cloud で Advanced Dashboard を使用する方法を説明します。 - ## 高度なダッシュボードへのアクセス {#accessing-the-advanced-dashboard} 高度なダッシュボードには、次の手順でアクセスできます。 @@ -33,8 +32,6 @@ Advanced Dashboard は ClickHouse OSS(Open Source Software)と Cloud の両 - - ## ネイティブの高度なダッシュボードへのアクセス {#accessing-the-native-advanced-dashboard} ネイティブの高度なダッシュボードには、次の手順でアクセスできます。 @@ -51,8 +48,6 @@ Advanced Dashboard は ClickHouse OSS(Open Source Software)と Cloud の両 - - ## すぐに使える可視化 {#out-of-box-visualizations} Advanced Dashboard のデフォルトチャートは、ClickHouse システムの状態をリアルタイムに @@ -88,8 +83,6 @@ Advanced Dashboard のデフォルトチャートは、ClickHouse システム | OS CPU Usage (Userspace) | ユーザー空間コード実行時の CPU 使用率を示します | | OS CPU Usage (Kernel) | カーネルコード実行時の CPU 使用率を示します | - - ## ClickHouse Cloud 固有 {#clickhouse-cloud-specific} ClickHouse Cloud はオブジェクトストレージ(S3 タイプ)を使ってデータを保存します。このインターフェイスを監視することで、問題の検知に役立ちます。 @@ -108,8 +101,6 @@ ClickHouse Cloud はオブジェクトストレージ(S3 タイプ)を使っ | Network receive bytes/sec | 受信ネットワークトラフィックの現在の速度 | | Concurrent network connections | 現在の同時ネットワーク接続数 | - - ## 高度なダッシュボードを使用した問題の特定 {#identifying-issues-with-the-advanced-dashboard} ClickHouse サービスのヘルスをリアルタイムで可視化することで、ビジネスに影響が出る前に問題を緩和したり、発生した問題の解決に大きく役立ちます。以下では、高度なダッシュボードを使って検知できる代表的な問題をいくつか紹介します。 @@ -194,7 +185,6 @@ read_rows: 150957260 tables: ['default.amazon_reviews_no_pk'] ``` - 2 行目: ────── type: QueryFinish diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md index ed7eff6371b..038a43a8db0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md @@ -15,7 +15,6 @@ import prometheus_grafana_metrics_explorer from '@site/static/images/integration import prometheus_datadog from '@site/static/images/integrations/prometheus-datadog.png'; import Image from '@theme/IdealImage'; - # Prometheus 連携 {#prometheus-integration} この機能では、[Prometheus](https://prometheus.io/) と連携させて ClickHouse Cloud サービスを監視できます。Prometheus メトリクスへのアクセスは [ClickHouse Cloud API](/cloud/manage/api/api-overview) エンドポイントを通じて提供されており、ユーザーはこのエンドポイントに安全に接続し、メトリクスを Prometheus のメトリクスコレクターへエクスポートできます。これらのメトリクスは、Grafana や Datadog などのダッシュボードツールと連携させて可視化できます。 @@ -59,7 +58,6 @@ export SERVICE_ID= curl --silent --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations/$ORG_ID/services/$SERVICE_ID/prometheus?filtered_metrics=true ``` - ### サンプルレスポンス {#sample-response} ```response @@ -187,7 +185,6 @@ scrape_configs: `honor_labels` 構成パラメータは、`instance` ラベルが正しく設定されるように `true` に設定する必要があります。さらに、上記の例では `filtered_metrics` が `true` に設定されていますが、これはユーザーの好みに応じて設定してください。 - ## Grafana との統合 {#integrating-with-grafana} ユーザーが Grafana と統合する主な方法は 2 つあります。 @@ -260,7 +257,6 @@ prometheus.remote_write "metrics_service" { `honor_labels` 設定パラメータは、インスタンスラベルが正しく設定されるように `true` に設定する必要があります。 - ### Alloy を使用した自己管理型 Grafana {#grafana-self-managed-with-alloy} 自己管理で Grafana を運用しているユーザーは、Alloy エージェントのインストール手順を [こちら](https://grafana.com/docs/alloy/latest/get-started/install/) で確認できます。ここでは、ユーザーが Alloy を構成して Prometheus メトリクスを任意の送信先に送信するようにしていることを前提とします。以下の `prometheus.scrape` コンポーネントにより、Alloy は ClickHouse Cloud エンドポイントをスクレイプします。スクレイプされたメトリクスは `prometheus.remote_write` が受信すると想定しています。これが存在しない場合、または別の送信先を利用する場合は、`forward_to` キーを対象の送信先に合わせて調整してください。 @@ -293,7 +289,6 @@ prometheus.scrape "clickhouse_cloud" { `instance` ラベルが正しく設定されるようにするには、`honor_labels` 設定パラメータを `true` に設定する必要がある点に注意してください。 - ## Datadog との統合 {#integrating-with-datadog} Datadog の [Agent](https://docs.datadoghq.com/agent/?tab=Linux) と [OpenMetrics インテグレーション](https://docs.datadoghq.com/integrations/openmetrics/) を使用して、ClickHouse Cloud のエンドポイントからメトリクスを収集できます。以下は、このエージェントおよびインテグレーション向けのシンプルなサンプル設定です。ただし、実際には特に重要なメトリクスのみに絞って収集することを推奨します。下記の網羅的なサンプルでは、何千ものメトリクスとインスタンスの組み合わせがエクスポートされ、Datadog によってカスタムメトリクスとして扱われます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md index 71017a54186..bebcfc7c193 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md @@ -17,7 +17,6 @@ import endpoints_monitoring from '@site/static/images/cloud/sqlconsole/endpoints import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # クエリ API エンドポイントのセットアップ {#setting-up-query-api-endpoints} **Query API Endpoints** 機能を使用すると、ClickHouse Cloud コンソールで任意の保存済み SQL クエリから、直接 API エンドポイントを作成できます。ClickHouse Cloud サービスにネイティブ ドライバーで接続する必要なく、HTTP 経由で API エンドポイントにアクセスして保存済みクエリを実行できるようになります。 @@ -135,7 +134,6 @@ GET /query-endpoints/{queryEndpointId}/run POST /query-endpoints/{queryEndpointId}/run ``` - ### HTTP メソッド {#http-methods} | メソッド | ユースケース | パラメータ | @@ -251,7 +249,6 @@ POST /query-endpoints/{queryEndpointId}/run SELECT database, name AS num_tables FROM system.tables LIMIT 3; ``` - #### バージョン 1 {#version-1} @@ -428,7 +425,6 @@ SELECT name, database FROM system.tables WHERE match(name, {tableNameRegex: Stri - ### クエリ変数に配列を含む、テーブルにデータを挿入するリクエスト {#request-with-array-in-the-query-variables-that-inserts-data-into-a-table} **テーブル定義の SQL:** @@ -492,7 +488,6 @@ INSERT INTO default.t_arr VALUES ({arr: Array(Array(Array(UInt32)))}); - ### ClickHouse の設定 `max_threads` を 8 にしたリクエスト {#request-with-clickhouse-settings-max_threads-set-to-8} **クエリ API エンドポイントの SQL:** @@ -539,7 +534,6 @@ SELECT * FROM system.tables; - ### レスポンスをストリームとしてリクエストしてパースする` {#request-and-parse-the-response-as-a-stream} **クエリ API エンドポイントの SQL:** @@ -610,7 +604,6 @@ SELECT name, database FROM system.tables; - ### ファイルからテーブルにストリーム挿入する {#insert-a-stream-from-a-file-into-a-table} 次の内容でファイル `./samples/my_first_table_2024-07-11.csv` を作成します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md index 33d6d64211c..b5458a155eb 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md @@ -17,21 +17,16 @@ import backup_usage from '@site/static/images/cloud/manage/backup-usage.png'; import backup_restore from '@site/static/images/cloud/manage/backup-restore.png'; import backup_service_provisioning from '@site/static/images/cloud/manage/backup-service-provisioning.png'; - # バックアップの確認と復元 {#review-and-restore-backups} このガイドでは、ClickHouse Cloud におけるバックアップの仕組み、サービスのバックアップ設定に利用できるオプション、およびバックアップからの復元方法について説明します。 - - ## バックアップステータス一覧 {#backup-status-list} サービスは、デフォルトの毎日スケジュールまたは選択した[カスタムスケジュール](/cloud/manage/backups/configurable-backups)に従ってバックアップされます。利用可能なすべてのバックアップは、サービスの **Backups** タブから確認できます。ここでは、バックアップのステータス、所要時間、およびバックアップサイズを確認できます。また、**Actions** 列から特定のバックアップを復元することも可能です。 - - ## バックアップコストについて {#understanding-backup-cost} デフォルトポリシーでは、ClickHouse Cloud は 24 時間の保持期間で 1 日 1 回のバックアップ取得を必須としています。より多くのデータを保持するスケジュールを選択したり、バックアップ頻度を高くすると、バックアップ用ストレージに追加料金が発生する可能性があります。 @@ -50,8 +45,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup サービス内のデータサイズは時間の経過とともに増加するため、バックアップの推定コストも変化する点に注意してください。 ::: - - ## バックアップの復元 {#restore-a-backup} バックアップは、バックアップを取得した既存のサービスではなく、新しい ClickHouse Cloud サービスに復元されます。 @@ -64,8 +57,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup - - ## 復元したサービスの操作 {#working-with-your-restored-service} バックアップを復元すると、2 つの類似したサービスが存在することになります。復元が必要だった **元のサービス** と、そのバックアップから復元された新しい **復元済みサービス** です。 @@ -147,7 +138,6 @@ FROM remoteSecure('source-hostname', db, table, 'exporter', 'password-here') 元のサービスへのデータ挿入が正常に完了したら、そのサービス上でデータを必ず検証してください。データの検証が完了したら、新しいサービスは削除してください。 - ## テーブルの削除取り消し(UNDROP) {#undeleting-or-undropping-tables} `UNDROP` コマンドは、[Shared Catalog](https://clickhouse.com/docs/cloud/reference/shared-catalog) を通じて ClickHouse Cloud でサポートされています。 @@ -169,13 +159,10 @@ SYNC SETTINGS max_table_size_to_drop=2000000000000 -- 制限を2TBに増やし レガシープラン: レガシープランをご利用のお客様の場合、24時間保持されるデフォルトの日次バックアップはストレージコストに含まれます。 ::: - ## 設定可能なバックアップ {#configurable-backups} デフォルトとは異なるバックアップスケジュールを設定する場合は、[設定可能なバックアップ](/cloud/manage/backups/configurable-backups)を参照してください。 - - ## 自分のクラウドアカウントへのバックアップのエクスポート {#export-backups-to-your-own-cloud-account} バックアップを自分のクラウドアカウントにエクスポートしたい場合は、[こちら](/cloud/manage/backups/export-backups-to-own-cloud-account)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md index 7465f956e29..a7aeb84fc72 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md @@ -9,7 +9,6 @@ doc_type: 'landing-page' import TableOfContents from '@site/i18n/jp/docusaurus-plugin-content-docs/current/best-practices/_snippets/_table_of_contents.md'; - # ClickHouse Cloud におけるベストプラクティス {#best-practices-in-clickhouse-cloud} このセクションでは、ClickHouse Cloud を最大限に活用するために従うべきベストプラクティスを紹介します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md index 4f1caa0fde9..30ad874a3de 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md @@ -91,7 +91,6 @@ GRANT user_role TO user_1 GRANT user_role TO user_2 ``` - これで `user_1` ユーザーとして接続し、簡単な `SELECT` を実行できます。最初のテナントの行だけが返されます。 ```sql @@ -108,7 +107,6 @@ FROM events └───────────┴──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## 個別テーブル {#separate-tables} このアプローチでは、各テナントのデータを同一データベース内の個別のテーブルに保存するため、テナントを識別するための専用フィールドは不要です。[GRANT 文](/sql-reference/statements/grant)を使用してユーザーアクセスを制御し、各ユーザーが自分のテナントのデータを含むテーブルのみにアクセスできるようにします。 @@ -201,7 +199,6 @@ FROM default.events_tenant_1 └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## 個別データベース {#separate-databases} 各テナントのデータは、同じ ClickHouse サービス内の個別のデータベースに保存されます。 @@ -286,7 +283,6 @@ GRANT SELECT ON tenant_1.events TO user_1 GRANT SELECT ON tenant_2.events TO user_2 ``` - これで、`user_1` として接続し、対象データベースの events テーブルに対して簡単な SELECT クエリを実行できます。最初のテナントの行だけが返されます。 ```sql @@ -303,7 +299,6 @@ FROM tenant_1.events └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## コンピュート間分離 {#compute-compute-separation} 上記で説明した 3 つのアプローチは、[Warehouses](/cloud/reference/warehouses#what-is-a-warehouse) を使用することで、さらに分離できます。データは共通のオブジェクトストレージを介して共有されますが、[compute-compute separation](/cloud/reference/warehouses#what-is-compute-compute-separation) により、各テナントは異なる CPU/メモリ比率を持つ専用のコンピュートサービスを利用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md index 32825416dc1..51e0e22d803 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md @@ -7,14 +7,10 @@ keywords: ['ClickHouse Cloud', 'compatibility'] doc_type: 'guide' --- - - # ClickHouse Cloud 互換性ガイド {#clickhouse-cloud-compatibility-guide} このガイドでは、ClickHouse Cloud における機能面および運用面での挙動について概要を説明します。ClickHouse Cloud はオープンソース版 ClickHouse ディストリビューションをベースとしていますが、アーキテクチャや実装にはいくつか異なる点があります。背景情報として、[どのようにして ClickHouse Cloud を構築したか](https://clickhouse.com/blog/building-clickhouse-cloud-from-scratch-in-a-year) を説明しているこのブログ記事も、興味深く参考になるはずです。 - - ## ClickHouse Cloud のアーキテクチャ {#clickhouse-cloud-architecture} ClickHouse Cloud は、運用負荷を大幅に軽減し、大規模な ClickHouse の運用コストを削減します。デプロイメント規模を事前に見積もったり、高可用性のためのレプリケーションを構成したり、データを手動でシャーディングしたり、ワークロードの増加に応じてサーバーをスケールアップしたり、利用していないときにスケールダウンしたりする必要はありません — これらはすべて ClickHouse Cloud が代わりに行います。 @@ -26,8 +22,6 @@ ClickHouse Cloud は、運用負荷を大幅に軽減し、大規模な ClickHou - 断続的なワークロード向けのシームレスなハイバネーション機能はデフォルトで有効化されています。一定期間アイドル状態が続くとコンピュートリソースを自動的に一時停止し、新しいクエリが到着したときに透過的に再起動します。そのため、アイドルリソースに対して料金を支払う必要がありません。 - 高度なスケーリングコントロールにより、追加のコスト管理のためにオートスケーリングの上限を設定したり、特定の性能要件を持つアプリケーション向けにコンピュートリソースを確保するためのオートスケーリングの下限を設定したりできます。 - - ## 機能 {#capabilities} ClickHouse Cloud は、オープンソース版 ClickHouse に含まれる機能のうち厳選されたものへのアクセスを提供します。以下では、現時点で ClickHouse Cloud では無効化されている一部の機能について説明します。 @@ -107,8 +101,6 @@ SQLite、ODBC、JDBC、Redis、HDFS、Hive など一部の外部データベー [Named collections](/operations/named-collections) は、現在 ClickHouse Cloud ではサポートされていません。 - - ## 運用上のデフォルトと考慮事項 {#operational-defaults-and-considerations} 以下は、ClickHouse Cloud サービスのデフォルト設定です。サービスの正しい動作を保証するため、一部の設定は固定されていますが、それ以外の設定は調整可能です。 @@ -131,8 +123,6 @@ ClickHouse Cloud は変動するワークロードに対応するようチュー ### 高度なセキュリティ管理 {#advanced-security-administration} ClickHouse サービスの作成時に、デフォルトのデータベースと、このデータベースに対して広範な権限を持つデフォルトユーザーが作成されます。この初期ユーザーは、追加のユーザーを作成し、それらのユーザーに対してこのデータベースへの権限を付与できます。これ以外に、Kerberos、LDAP、または SSL X.509 証明書認証を使用してデータベース内で以下のセキュリティ機能を有効化することは、現時点ではサポートされていません。 - - ## ロードマップ {#roadmap} 現在、ClickHouse Cloud での実行可能な UDF のサポートを導入しており、その他多くの機能についてもニーズを評価しています。フィードバックや特定の機能のリクエストがある場合は、[こちらから送信してください](https://console.clickhouse.cloud/support)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md index 65a4d61e3a4..66e332630e2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md @@ -14,7 +14,6 @@ import s3_output from '@site/static/images/cloud/security/secures3_output.jpg'; この記事では、ClickHouse Cloud のお客様がロールベースのアクセス制御を利用して Amazon Simple Storage Service (S3) に認証し、データへ安全にアクセスする方法を示します。 - ## はじめに {#introduction} セキュアな S3 アクセスの設定に入る前に、その仕組みを理解しておくことが重要です。以下では、ClickHouse の各種サービスが、顧客の AWS アカウント内のロールを引き受けることで、プライベートな S3 バケットへアクセスできる仕組みの概要を示します。 @@ -130,7 +129,6 @@ IAM ポリシー(`{BUCKET_NAME}` をバケット名に置き換えてくださ 4 - 作成後に新しい **IAM Role Arn** をコピーします。これは S3 バケットにアクセスするために必要なものです。 - ## ClickHouseAccess ロールを使用して S3 バケットにアクセスする {#access-your-s3-bucket-with-the-clickhouseaccess-role} ClickHouse Cloud では、S3 テーブル関数の一部として `extra_credentials` を指定できる新機能が利用できます。以下は、上で作成した新しいロールを使用してクエリを実行する例です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md index f22c09f1b61..a0861ed4d96 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md @@ -16,7 +16,6 @@ import byoc_subnet_1 from '@site/static/images/cloud/reference/byoc-subnet-1.png import byoc_subnet_2 from '@site/static/images/cloud/reference/byoc-subnet-2.png'; import byoc_s3_endpoint from '@site/static/images/cloud/reference/byoc-s3-endpoint.png' - ## オンボーディングプロセス {#onboarding-process} お客様は、[こちら](https://clickhouse.com/cloud/bring-your-own-cloud) からお問い合わせいただくことで、オンボーディングプロセスを開始できます。専用の AWS アカウントと、利用予定のリージョンをあらかじめご用意いただく必要があります。現時点では、ClickHouse Cloud がサポートしているリージョンでのみ BYOC サービスを起動できます。 @@ -83,7 +82,6 @@ CloudFormation スタックを作成すると、クラウドコンソールか
-
@@ -169,8 +167,6 @@ ClickHouse にプライベートアクセスするために、ユーザーのピ 任意ですが、ピアリングが正常に機能していることを確認した後に、ClickHouse BYOC のパブリックロードバランサーの削除を依頼できます。 - - ## アップグレードプロセス {#upgrade-process} ClickHouse データベースバージョンのアップグレード、ClickHouse Operator、EKS などのコンポーネントを含め、ソフトウェアを定期的にアップグレードしています。 @@ -181,8 +177,6 @@ ClickHouse データベースバージョンのアップグレード、ClickHous メンテナンスウィンドウは、セキュリティおよび脆弱性修正には適用されません。これらは通常のスケジュール外のアップグレードとして対応し、運用への影響を最小限に抑えられるよう、適切な時間を調整するためのタイムリーなコミュニケーションを行います。 ::: - - ## CloudFormation IAM ロール {#cloudformation-iam-roles} ### ブートストラップ IAM ロール {#bootstrap-iam-role} @@ -217,8 +211,6 @@ CloudFormation で作成される `ClickHouseManagementRole` に加えて、コ 最後に、**`data-plane-mgmt`** は、ClickHouse Cloud コントロールプレーンコンポーネントが `ClickHouseCluster` や Istio Virtual Service/Gateway などの必要なカスタムリソースを調整できるようにします。 - - ## ネットワーク境界 {#network-boundaries} このセクションでは、顧客の BYOC VPC との間を流れるさまざまなネットワークトラフィックについて説明します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md index bca61d200de..e34042cebe9 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md @@ -7,8 +7,6 @@ keywords: ['本番運用準備', 'エンタープライズ', 'saml', 'sso', 'ter doc_type: 'guide' --- - - # ClickHouse Cloud 本番運用準備ガイド {#production-readiness} クイックスタートガイドを完了し、データが流れているアクティブなサービスをすでに運用している組織向けです。 @@ -23,8 +21,6 @@ doc_type: 'guide' - バックアップ手順を検証し、災害復旧プロセスを文書化する ::: - - ## はじめに {#introduction} ビジネス向けワークロードのために ClickHouse Cloud を本番環境で問題なく稼働させているとします。次の段階として、コンプライアンス監査の実施、テストされていないクエリが原因の本番インシデント、あるいは企業システムとの統合に関する IT 要件などをきっかけに、エンタープライズ本番環境の標準を満たすようにデプロイメントを成熟させる必要があります。 @@ -41,8 +37,6 @@ ClickHouse Cloud のマネージドプラットフォームは、インフラ運 本ガイドでは、これら各分野について順を追って解説し、稼働中の ClickHouse Cloud デプロイメントをエンタープライズ対応システムへ移行するのを支援します。 - - ## 環境戦略 {#environment-strategy} 本番ワークロードに影響を与える前に、安全に変更をテストできるよう、環境を分離して用意します。多くの本番インシデントは、テストされていないクエリや構成変更を本番システムに直接デプロイしたことに起因します。 @@ -57,8 +51,6 @@ ClickHouse Cloud のマネージドプラットフォームは、インフラ運 **サイズ設定**: ステージングサービスは、本番の負荷特性に近づけるように規模を見積もります。著しく小さいインフラでテストしても、リソース競合やスケーリングの問題が顕在化しない可能性があります。定期的なデータリフレッシュや合成データ生成を通じて、本番を代表するデータセットを使用してください。ステージング環境のサイズ決定やサービスを適切にスケールさせる方法については、[サイズとハードウェアの推奨事項](/guides/sizing-and-hardware-recommendations) および [Scaling in ClickHouse Cloud](/manage/scaling) のドキュメントを参照してください。これらの資料では、メモリ、CPU、ストレージのサイズ決定に関する実践的なアドバイスや、垂直スケーリングおよび水平スケーリングの選択肢の詳細を提供しており、ステージング環境を本番ワークロードに適合させる際の助けになります。 - - ## プライベートネットワーキング {#private-networking} ClickHouse Cloud の[プライベートネットワーキング](/cloud/security/connectivity/private-networking)を使用すると、ClickHouse サービスをクラウドの仮想ネットワークに直接接続でき、データがパブリックインターネットを経由しないようにできます。これは、厳格なセキュリティやコンプライアンス要件を持つ組織や、プライベートサブネットでアプリケーションを実行している組織にとって不可欠です。 @@ -71,8 +63,6 @@ ClickHouse Cloud は、次の方法でプライベートネットワーキング より技術的な詳細やステップバイステップのセットアップ手順が必要な場合は、各プロバイダー向けにリンクされているドキュメントに包括的なガイドが記載されています。 - - ## エンタープライズ認証とユーザー管理 {#enterprise-authentication} コンソールベースのユーザー管理からエンタープライズ認証との統合に移行することは、本番運用に向けた準備として不可欠です。 @@ -103,8 +93,6 @@ ClickHouse Cloud は現在、SCIM や IdP を介した自動プロビジョニ [Cloud Access Management](/cloud/security/cloud_access_management) および [SAML SSO のセットアップ](/cloud/security/saml-setup) について、詳しくはそれぞれのドキュメントを参照してください。 - - ## Infrastructure as Code と自動化 {#infrastructure-as-code} Infrastructure as Code(IaC)のプラクティスと API による自動化で ClickHouse Cloud を管理すると、デプロイメント構成に一貫性、バージョン管理、再現性を持たせることができます。 @@ -148,7 +136,6 @@ Terraform プロバイダーは、サービスのプロビジョニング、IP API 認証は Terraform と同じトークンベース方式を使用します。完全な API リファレンスおよび連携例については、[ClickHouse Cloud API](/cloud/manage/api/api-overview) ドキュメントを参照してください。 - ## モニタリングと運用統合 {#monitoring-integration} 既存のモニタリング基盤に ClickHouse Cloud を接続することで、可視性を確保し、問題を事前に検知できます。 @@ -179,7 +166,6 @@ scrape_configs: 詳細な Prometheus/Grafana の構成や高度なアラート設定を含む包括的なセットアップについては、[ClickHouse Cloud Observability Guide](/use-cases/observability/cloud-monitoring#prometheus) を参照してください。 - ## 事業継続性とサポート連携 {#business-continuity} バックアップ検証手順とサポート連携を確立することで、ClickHouse Cloud デプロイメントがインシデントから復旧し、必要なときにサポートを受けられるようにします。 @@ -206,8 +192,6 @@ ClickHouse Cloud は、保持期間を設定可能な自動バックアップを [ClickHouse Cloud のバックアップと復旧](/cloud/manage/backups/overview)および[サポートサービス](/about-us/support)について、詳しくは各ドキュメントを参照してください。 - - ## 次のステップ {#next-steps} このガイドで説明した連携と手順を実装したら、[監視](/cloud/get-started/cloud/resource-tour#monitoring)、[セキュリティ](/cloud/get-started/cloud/resource-tour#security)、[コスト最適化](/cloud/get-started/cloud/resource-tour#cost-optimization) に関するガイドについては、[Cloud リソースツアー](/cloud/get-started/cloud/resource-tour) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md index cd6d10f97eb..74fdaefff72 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md @@ -16,7 +16,6 @@ import step_5 from '@site/static/images/cloud/guides/sql_console/service_level_a import step_6 from '@site/static/images/cloud/guides/sql_console/service_level_access/6_service_settings.png' import step_7 from '@site/static/images/cloud/guides/sql_console/service_level_access/7_service_settings.png' - # SQL コンソールのロール割り当てを構成する {#configuring-sql-console-role-assignments} > このガイドでは、Cloud コンソール全体でのアクセス権限と、Cloud コンソール内でユーザーが利用できる機能を決定する SQL コンソールのロール割り当ての構成方法について説明します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md index 84d20d8db0c..f42a7952ee5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md @@ -16,7 +16,6 @@ import user_grant_permissions_options from '@site/static/images/cloud/security/c SQL コンソールユーザーは各セッションごとに作成され、自動的に更新される X.509 証明書を使って認証されます。セッションが終了すると、そのユーザーは削除されます。監査目的のアクセスリストを作成する場合は、コンソールで対象サービスの Settings タブに移動し、データベース内に存在するデータベースユーザーに加えて、SQL コンソールからのアクセスも確認してください。カスタムロールが設定されている場合、ユーザーのアクセス権は、そのユーザー名で終わるロールに一覧表示されます。 - ## SQL コンソールのユーザーとロール {#sql-console-users-and-roles} 基本的な SQL コンソールのロールは、Service Read Only 権限および Service Admin 権限を持つユーザーに割り当てることができます。詳細は、[Manage SQL Console Role Assignments](/cloud/guides/sql-console/manage-sql-console-role-assignments) を参照してください。本ガイドでは、SQL コンソールユーザー向けにカスタムロールを作成する方法を説明します。 @@ -52,8 +51,6 @@ GRANT database_developer TO `sql-console-role:my.user@domain.com`; - - ## データベース認証 {#database-authentication} ### データベースユーザー ID とパスワード {#database-user-id--password} @@ -79,7 +76,6 @@ ClickHouse Cloud のデータベースユーザーに対して SSH 認証を設 詳細な手順と例については、ナレッジベース内の「[SSH キーを使用して ClickHouse Cloud に接続する方法](/knowledgebase/how-to-connect-to-ch-cloud-using-ssh-keys)」を参照してください。 - ## データベース権限 {#database-permissions} SQL の [GRANT](/sql-reference/statements/grant) ステートメントを使用して、サービスおよびデータベース内で次の設定を行います。 @@ -160,7 +156,6 @@ GRANT default_role to userID; e. データベースにアクセスできるユーザー数を示すリンク `There are # users with access to this service.` をクリックして、ユーザー一覧を表示します。 - ## ウェアハウスユーザー {#warehouse-users} ウェアハウスユーザーは、同じウェアハウス内のサービス間で共有されます。詳細については、[ウェアハウスのアクセス制御](/cloud/reference/warehouses#access-controls)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md index 5427a0335b7..35224d369cd 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md @@ -15,7 +15,6 @@ import samlAzureApp from '@site/static/images/cloud/security/saml-azure-app.png' import samlAzureClaims from '@site/static/images/cloud/security/saml-azure-claims.png'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - # SAML SSO のセットアップ {#saml-sso-setup} @@ -24,16 +23,12 @@ ClickHouse Cloud は、Security Assertion Markup Language (SAML) を利用した 現在、サービスプロバイダー起点の SSO、個別の接続を用いた複数組織での利用、およびジャストインタイムプロビジョニングをサポートしています。現時点では、SCIM (System for Cross-domain Identity Management) や属性マッピングには対応していません。 - - ## はじめる前に {#before-you-begin} IdP での管理者権限と、ClickHouse Cloud 組織での **Admin** ロールが必要です。IdP 内で接続を設定したら、以下の手順で求められている情報を添えて当社までご連絡いただくことで、設定を完了できます。 ログインプロセスを簡素化するため、SAML 接続に加えて **組織への直接リンク** を設定することを推奨します。IdP ごとに扱い方が異なります。お使いの IdP での具体的な方法については、この先の説明を参照してください。 - - ## IdP を構成する方法 {#how-to-configure-your-idp} ### 手順 {#steps} @@ -148,8 +143,6 @@ IdP での管理者権限と、ClickHouse Cloud 組織での **Admin** ロール - -
{" "} @@ -260,7 +253,6 @@ IdP での管理者権限と、ClickHouse Cloud 組織での **Admin** ロール | App attributes | email | 12. **Finish** をクリックします。 - 14. アプリを有効にするには、全員に対して **OFF** をクリックし、設定を全員に対して **ON** に変更します。画面左側のオプションを選択することで、アクセスをグループまたは組織単位に制限することもできます。
@@ -345,7 +337,6 @@ Azure (Microsoft) SAML は、Azure Active Directory (AD) または Microsoft Ent - ## 仕組み {#how-it-works} ### SAML SSO を利用したユーザー管理 {#user-management-with-saml-sso} @@ -360,8 +351,6 @@ ClickHouse Cloud では、サービスプロバイダー開始型 SSO のみを ClickHouse Cloud は、組織ごとに個別の接続を提供することで、複数組織向け SSO をサポートします。各組織にログインするには、ダイレクトリンク (`https://console.clickhouse.cloud/?connection={organizationid}`) を使用してください。別の組織にログインする前に、現在ログインしている組織から必ずログアウトしてください。 - - ## 追加情報 {#additional-information} 認証に関しては、セキュリティを最優先としています。このため、SSO を実装するにあたり、いくつかの重要な設計上の判断を行っており、その点について事前にご理解いただく必要があります。 @@ -370,8 +359,6 @@ ClickHouse Cloud は、組織ごとに個別の接続を提供することで、 - **SSO アカウントと非 SSO アカウントは自動的にはリンクされません。** 同じメールアドレスを使用している場合でも、ClickHouse のユーザー一覧には、同一ユーザーに対して複数のアカウントが表示されることがあります。 - - ## よくある問題のトラブルシューティング {#troubleshooting-common-issues} | エラー | 原因 | 解決方法 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md index 2408be25bc4..af541c614e6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md @@ -11,7 +11,6 @@ import Image from '@theme/IdealImage'; import ip_filtering_after_provisioning from '@site/static/images/cloud/security/ip-filtering-after-provisioning.png'; import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filter-add-single-ip.png'; - ## IP フィルターの設定 {#setting-ip-filters} IP アクセスリストは、どの送信元アドレスからの接続を許可するかを指定することで、ClickHouse の各種サービスまたは API キーへのトラフィックを制限します。これらのリストは、サービスごとおよび API キーごとに設定できます。リストは、サービスや API キーの作成時だけでなく、作成後にも設定・変更できます。 @@ -20,16 +19,12 @@ IP アクセスリストは、どの送信元アドレスからの接続を許 ClickHouse Cloud サービスに対して IP アクセスリストを作成しなかった場合、そのサービスには一切のトラフィックが許可されません。ClickHouse サービスの IP アクセスリストを `Allow from anywhere` に設定していると、パブリック IP を探索するインターネットクローラーやスキャナーによって、アイドル状態からアクティブ状態へサービスが定期的に移行させられる可能性があり、その結果として、少額ではあるものの想定外のコストが発生することがあります。 ::: - - ## 準備 {#prepare} 開始する前に、アクセスリストに追加すべき IP アドレスまたは IP アドレス範囲を整理しておいてください。リモートワーカー、オンコール時の待機場所、VPN なども考慮に入れてください。IP アクセスリストのユーザーインターフェイスは、単一のアドレスおよび CIDR 表記のいずれも受け付けます。 Classless Inter-domain Routing (CIDR) 表記を使用すると、従来の Class A、B、C (8、16、24) のサブネットマスクサイズよりも小さい IP アドレス範囲を指定できます。[ARIN](https://account.arin.net/public/cidrCalculator) をはじめとする複数の組織が CIDR 計算機を提供しており、必要に応じて利用できます。CIDR 表記の詳細については、[Classless Inter-domain Routing (CIDR)](https://www.rfc-editor.org/rfc/rfc4632.html) RFC を参照してください。 - - ## IP アクセスリストを作成または変更する {#create-or-modify-an-ip-access-list} :::note PrivateLink の外側からの接続にのみ適用 @@ -90,8 +85,6 @@ IP アクセスリストは、[PrivateLink](/cloud/security/connectivity/private 行った変更を適用するには、**Save** をクリックする必要があります。 - - ## 検証 {#verification} フィルターを作成したら、その範囲内からサービスへ接続できることを確認し、許可された範囲外からの接続が拒否されることも確認してください。 簡単な `curl` コマンドを使用して検証できます: @@ -118,7 +111,6 @@ curl https://.clickhouse.cloud:8443 Ok. ``` - ## 制限事項 {#limitations} - 現在、IP アクセスリストは IPv4 のみをサポートしています diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md index 636805b6748..fd93abeace3 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md @@ -18,7 +18,6 @@ import pe_remove_private_endpoint from '@site/static/images/cloud/security/pe-re import aws_private_link_pe_filters from '@site/static/images/cloud/security/aws-privatelink-pe-filters.png'; import aws_private_link_ped_nsname from '@site/static/images/cloud/security/aws-privatelink-pe-dns-name.png'; - # AWS PrivateLink {#aws-privatelink} @@ -69,14 +68,10 @@ ClickHouse Cloud は、以下のリージョンからの [クロスリージョ Terraform のサンプルは[こちら](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)を参照してください。 - - ## 重要な考慮事項 {#considerations} ClickHouse は、AWS リージョン内で同じ公開済みの [サービスエンドポイント](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html#endpoint-service-overview) を再利用できるよう、サービスをグループ化しようとします。ただし、このグループ化が常に保証されるわけではなく、特にサービスを複数の ClickHouse 組織に分散している場合には当てはまらないことがあります。 すでに同じ ClickHouse 組織内の他のサービス向けに PrivateLink を構成済みの場合は、そのグループ化により多くの手順を省略できることが多く、最終ステップである「ClickHouse のエンドポイント ID を ClickHouse サービスの許可リストに追加する」に直接進むことができます。 - - ## この手順の前提条件 {#prerequisites} 作業を開始する前に、次のものを用意してください。 @@ -84,8 +79,6 @@ ClickHouse は、AWS リージョン内で同じ公開済みの [サービスエ 1. 利用可能な AWS アカウント 1. ClickHouse 側でプライベートエンドポイントを作成および管理するために必要な権限を持つ [ClickHouse API キー](/cloud/manage/openapi) - - ## 手順 {#steps} 次の手順に従って、AWS PrivateLink 経由で ClickHouse Cloud サービスに接続します。 @@ -178,7 +171,6 @@ VPC エンドポイントを作成したら、`Endpoint ID` の値を控えて #### オプション 2: AWS CloudFormation {#option-2-aws-cloudformation} - 次に、[エンドポイントの「Service name」を取得](#obtain-endpoint-service-info) の手順で取得した `Service name`console または `endpointServiceId`API を使用して、VPC エンドポイントを作成する必要があります。 正しいサブネット ID、セキュリティグループ、および VPC ID を使用していることを確認してください。 @@ -281,7 +273,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -d @pl_config.json | jq ``` - 許可リストからエンドポイント ID を削除するには、次の手順を実行します。 ```bash @@ -343,7 +334,6 @@ jq .result この例では、`privateDnsHostname` の値に対応するホスト名での接続は PrivateLink 経由でルーティングされますが、`endpointServiceId` に対応するホスト名での接続はインターネット経由でルーティングされます。 - ## トラブルシューティング {#troubleshooting} ### 1 つのリージョン内で複数の PrivateLink を利用する場合 {#multiple-privatelinks-in-one-region} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md index 1b24465c4c6..78c5e1e526f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md @@ -21,7 +21,6 @@ import gcp_pe_remove_private_endpoint from '@site/static/images/cloud/security/g import gcp_privatelink_pe_filters from '@site/static/images/cloud/security/gcp-privatelink-pe-filters.png'; import gcp_privatelink_pe_dns from '@site/static/images/cloud/security/gcp-privatelink-pe-dns.png'; - # Private Service Connect {#private-service-connect} @@ -50,16 +49,12 @@ Private Service Connect (PSC) は、サービス利用者が自分の Virtual Pr 1. 「Endpoint ID」を ClickHouse Cloud サービスに追加します。 1. 「Endpoint ID」を ClickHouse サービスの許可リストに追加します。 - - ## 注意 {#attention} ClickHouse は、GCP リージョン内で同じ公開されている [PSC エンドポイント](https://cloud.google.com/vpc/docs/private-service-connect) を再利用できるように、サービスをグループ化しようとします。ただし、このグループ化は保証されず、特に複数の ClickHouse 組織にサービスを分散している場合は当てはまらない可能性があります。 すでに同じ ClickHouse 組織内の他のサービス向けに PSC を構成している場合は、そのグループ化によりほとんどの手順を省略できることが多く、最終ステップである「["Endpoint ID" を ClickHouse サービス許可リストに追加する](#add-endpoint-id-to-services-allow-list)」に直接進むことができます。 Terraform の例は [こちら](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/) を参照してください。 - - ## 始める前に {#before-you-get-started} :::note @@ -98,7 +93,6 @@ jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\ * [新しいキーを作成](/cloud/manage/openapi)するか、既存のキーを使用できます。 ::: - ## Private Service Connect 用の GCP サービス アタッチメントと DNS 名を取得する {#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect} ### オプション 1: ClickHouse Cloud コンソール {#option-1-clickhouse-cloud-console} @@ -125,7 +119,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud `endpointServiceId` と `privateDnsHostname` をメモしておいてください。次の手順で使用します。 - ## サービスエンドポイントの作成 {#create-service-endpoint} :::important @@ -218,7 +211,6 @@ output "psc_connection_id" { `endpointServiceId`API または `Service name`console には、[Private Service Connect 用の GCP サービスアタッチメントの取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect) の手順で取得した値を使用します。 ::: - ## エンドポイントのプライベート DNS 名を設定する {#set-private-dns-name-for-endpoint} :::note @@ -227,8 +219,6 @@ DNS の構成方法にはさまざまなものがあります。ユースケー [Obtain GCP service attachment for Private Service Connect](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect) の手順で取得した「DNS 名」を、GCP Private Service Connect エンドポイントの IP アドレスを指すように設定する必要があります。これにより、VPC/ネットワーク内のサービスやコンポーネントが正しく名前解決できるようになります。 - - ## Endpoint ID を ClickHouse Cloud 組織に追加する {#add-endpoint-id-to-clickhouse-cloud-organization} ### オプション 1: ClickHouse Cloud コンソール {#option-1-clickhouse-cloud-console-1} @@ -288,7 +278,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## ClickHouse サービスの許可リストに「Endpoint ID」を追加する {#add-endpoint-id-to-services-allow-list} Private Service Connect で利用可能にしたい各インスタンスについて、許可リストに Endpoint ID を追加する必要があります。 @@ -343,7 +332,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## Private Service Connect を使用してインスタンスにアクセスする {#accessing-instance-using-private-service-connect} Private Link を有効にした各サービスには、パブリックエンドポイントとプライベートエンドポイントがあります。Private Link を使用して接続するには、プライベートエンドポイントを使用する必要があります。これは、[Private Service Connect 用の GCP サービスアタッチメントと DNS 名の取得](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect) で取得した `privateDnsHostname` です。 @@ -371,7 +359,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud この例では、`xxxxxxx.yy-xxxxN.p.gcp.clickhouse.cloud` ホスト名への接続は Private Service Connect 経由になります。一方、`xxxxxxx.yy-xxxxN.gcp.clickhouse.cloud` ホスト名への接続はインターネット経由になります。 - ## トラブルシューティング {#troubleshooting} ### DNS セットアップのテスト {#test-dns-setup} @@ -404,7 +391,6 @@ DNS_NAME - [Private Service Connect 用の GCP サービスアタッチメ openssl s_client -connect ${DNS_NAME}:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -447,7 +433,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: appl これを行うには、GCP VPC のファイアウォールルールを構成し、ClickHouse Cloud から社内/プライベートなデータベースサービスへの接続を許可します。[ClickHouse Cloud リージョンのデフォルトの送信 (egress) IP アドレス](/manage/data-sources/cloud-endpoints-api) と、[利用可能な静的 IP アドレス](https://api.clickhouse.cloud/static-ips.json) を確認してください。 - ## 詳細情報 {#more-information} 詳しくは、[cloud.google.com/vpc/docs/configure-private-service-connect-services](https://cloud.google.com/vpc/docs/configure-private-service-connect-services) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md index ab245abdaaf..d283a701bc1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md @@ -27,7 +27,6 @@ import azure_pe_remove_private_endpoint from '@site/static/images/cloud/security import azure_privatelink_pe_filter from '@site/static/images/cloud/security/azure-privatelink-pe-filter.png'; import azure_privatelink_pe_dns from '@site/static/images/cloud/security/azure-privatelink-pe-dns.png'; - # Azure Private Link {#azure-private-link} @@ -54,16 +53,12 @@ Azure は Private Link 経由でリージョンをまたいだ接続をサポー ClickHouse Cloud の Azure PrivateLink は、`resourceGUID` から Resource ID フィルタの利用へ切り替わりました。後方互換性があるため、引き続き `resourceGUID` を使用できますが、Resource ID フィルタへの移行を推奨します。移行するには、Resource ID を使って新しいエンドポイントを作成し、それをサービスに関連付けてから、従来の `resourceGUID` ベースのエンドポイントを削除してください。 ::: - - ## 注意事項 {#attention} ClickHouse は、同じ Azure リージョン内で公開済みの [Private Link service](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview) を再利用できるように、サービスをグループ化しようと試みます。ただし、このグループ化は保証されておらず、特にサービスを複数の ClickHouse 組織に分散している場合には、当てはまらないことがあります。 すでに同じ ClickHouse 組織内の他のサービス向けに Private Link を構成済みの場合は、そのグループ化により多くの手順を省略できることがあり、最終手順である [Private Endpoint Resource ID をサービスの許可リストに追加する](#add-private-endpoint-id-to-services-allow-list) に直接進める場合があります。 Terraform のサンプルは ClickHouse の [Terraform Provider リポジトリ](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/) を参照してください。 - - ## Private Link 用の Azure 接続エイリアスを取得する {#obtain-azure-connection-alias-for-private-link} ### オプション 1: ClickHouse Cloud コンソール {#option-1-clickhouse-cloud-console} @@ -109,7 +104,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud `endpointServiceId` をメモしておいてください。次の手順で使用します。 - ## Azure でプライベート エンドポイントを作成する {#create-private-endpoint-in-azure} :::important @@ -216,7 +210,6 @@ Private Link を使用するには、プライベート エンドポイント接 プライベート エンドポイントのリソース ID は Azure ポータルで確認できます。前の手順で作成したプライベート エンドポイントを開き、**JSON View** をクリックします。 - プロパティ内で `id` フィールドを探し、この値をコピーします。 @@ -229,8 +222,6 @@ Private Link を使用するには、プライベート エンドポイント接 - - ## Private Link 用の DNS の設定 {#setting-up-dns-for-private-link} Private Link 経由でリソースにアクセスするには、Private DNS ゾーン (`${location_code}.privatelink.azure.clickhouse.cloud`) を作成し、それを VNet に関連付ける必要があります。 @@ -310,7 +301,6 @@ Name: xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud Address: 10.0.0.4 ``` - ## プライベートエンドポイントのリソース ID を ClickHouse Cloud 組織に追加する {#add-the-private-endpoint-id-to-your-clickhouse-cloud-organization} ### オプション 1: ClickHouse Cloud コンソール {#option-1-clickhouse-cloud-console-1} @@ -379,7 +369,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## プライベートエンドポイントの Resource ID をサービスの許可リストに追加する {#add-private-endpoint-id-to-services-allow-list} デフォルトでは、Private Link 接続が承認・確立されていても、ClickHouse Cloud サービスは Private Link 接続経由では利用できません。Private Link を使用して利用可能にする各サービスごとに、プライベートエンドポイントの Resource ID を明示的に追加する必要があります。 @@ -443,7 +432,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## Private Link を使用して ClickHouse Cloud サービスにアクセスする {#access-your-clickhouse-cloud-service-using-private-link} Private Link を有効にした各サービスには、パブリックエンドポイントとプライベートエンドポイントがあります。Private Link を使用して接続するには、[Private Link 用の Azure 接続エイリアスを取得する](#obtain-azure-connection-alias-for-private-link) で取得した `privateDnsHostname`API または `DNS name`console のプライベートエンドポイントを使用する必要があります。 @@ -486,7 +474,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud Private Link 経由で ClickHouse Cloud サービスに接続するには、`privateDnsHostname` を使用します。 - ## トラブルシューティング {#troubleshooting} ### DNS 設定のテスト {#test-dns-setup} @@ -525,7 +512,6 @@ OpenSSL で接続できる必要があります(出力に CONNECTED と表示 openssl s_client -connect abcd.westus3.privatelink.azure.clickhouse.cloud:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -564,7 +550,6 @@ INSTANCE_ID=<インスタンスID> curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | jq .result.privateEndpointIds ``` - ## 詳細情報 {#more-information} Azure Private Link の詳細は、[azure.microsoft.com/en-us/products/private-link](https://azure.microsoft.com/en-us/products/private-link) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md index 28519a91a3d..a5d60e8afa2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md @@ -11,11 +11,8 @@ import Image from '@theme/IdealImage'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' import cmek_performance from '@site/static/images/_snippets/cmek-performance.png'; - # データの暗号化 {#data-encryption} - - ## ストレージレベルの暗号化 {#storage-encryption} ClickHouse Cloud では、クラウドプロバイダー管理の AES 256 キーを利用した保存データの暗号化 (encryption at rest) がデフォルトで構成されています。詳細については、次を参照してください。 @@ -23,8 +20,6 @@ ClickHouse Cloud では、クラウドプロバイダー管理の AES 256 キー - [GCP におけるデフォルトの保存データ暗号化](https://cloud.google.com/docs/security/encryption/default-encryption) - [保存データ向け Azure ストレージ暗号化](https://learn.microsoft.com/en-us/azure/storage/common/storage-service-encryption) - - ## データベースレベルの暗号化 {#database-encryption} @@ -111,16 +106,12 @@ CMEK をセットアップしたら、新しい KMS キーを作成して権限 #### KMS キーポーラー {#kms-key-poller} - - CMEK を使用している場合、指定された KMS キーが有効かどうかは 10 分ごとに検証されます。KMS キーへのアクセスができなくなった場合、ClickHouse サービスは停止します。サービスを再開するには、このガイドの手順に従って KMS キーへのアクセスを復旧し、その後サービスを再起動してください。 ### バックアップと復元 {#backup-and-restore} バックアップは、関連付けられたサービスと同じキーを使用して暗号化されます。暗号化されたバックアップを復元すると、元のインスタンスと同じ KMS キーを使用する暗号化されたインスタンスが作成されます。必要に応じて、復元後に KMS キーをローテーションすることもできます。詳細は [Key Rotation](#key-rotation) を参照してください。 - - ## パフォーマンス {#performance} データベース暗号化は、ClickHouse に組み込まれている [データ暗号化用仮想ファイルシステム機能](/operations/storing-data#encrypted-virtual-file-system) を利用して、データを暗号化し、保護します。この機能で使用されるアルゴリズムは `AES_256_CTR` であり、ワークロードに応じて 5~15% 程度のパフォーマンス低下(オーバーヘッド)が発生することが想定されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md index 1ae2f730d1f..df1c967ed7f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md @@ -7,8 +7,6 @@ doc_type: 'guide' keywords: ['監査ログ', 'データベースログ', 'コンプライアンス', 'セキュリティ', '監視'] --- - - # データベース監査ログ {#database-audit-log} ClickHouse では、デフォルトでデータベース監査ログが有効になっています。このページでは、セキュリティに関連するログに焦点を当てます。システムによって記録されるデータの詳細については、[system tables](/operations/system-tables/overview) のドキュメントを参照してください。 @@ -17,8 +15,6 @@ ClickHouse では、デフォルトでデータベース監査ログが有効に 情報は system テーブルに直接記録され、デフォルトでは最大 30 日間保持されます。この期間は、システム内でのマージ頻度の影響を受けて長くなったり短くなったりします。お客様は、ログをより長期間保存したり、長期保管のためにセキュリティ情報・イベント管理 (SIEM) システムへエクスポートしたりするために、追加の対策を講じることができます。詳細は後述します。 ::: - - ## セキュリティ関連のログ {#security-relevant-logs} ClickHouse は、主にセッションログとクエリログに、データベースにおけるセキュリティ関連イベントを記録します。 @@ -53,13 +49,10 @@ FROM clusterAllReplicas('default', system.query_log) WHERE user=’compromised_account’ ``` - ## サービス内でのログデータの保持 {#reatining-log-data-within-services} より長期間の保持やログの耐久性を必要とするお客様は、マテリアライズドビューを使用してこれらの目的を達成できます。マテリアライズドビューとは何か、その利点や実装方法の詳細については、[materialized views](/materialized-views) に関する動画およびドキュメントを参照してください。 - - ## ログのエクスポート {#exporting-logs} システムログは、SIEM システムと互換性のあるさまざまな形式で、任意のストレージ先に書き込みまたはエクスポートできます。詳細については、[テーブル関数](/sql-reference/table-functions)のドキュメントを参照してください。最も一般的な方法は次のとおりです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md index 42655267092..5f37e4f5656 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md @@ -7,20 +7,14 @@ doc_type: 'guide' keywords: ['byoc', 'セキュリティ', 'プレイブック', 'ベストプラクティス', 'コンプライアンス'] --- - - # BYOC セキュリティプレイブック {#byoc-security-playbook} ClickHouse は Trust Center (https://trust.clickhouse.com) からダウンロード可能なセキュリティ共有責任モデルに基づいて、Bring Your Own Cloud (BYOC) を運用しています。以下の情報は、潜在的なセキュリティイベントを識別する方法の例として、BYOC のお客様向けに提供されています。お客様は、自身のセキュリティプログラムの観点からこの情報を検討し、追加の検知やアラートが有用かどうかを判断してください。 - - ## ClickHouse の認証情報が漏洩した可能性がある場合 {#compromised-clickhouse-credentials} 認証情報を悪用した攻撃を検出するためのクエリや、悪意のあるアクティビティを調査するためのクエリについては、[database audit log](/cloud/security/audit-logging/database-audit-log) のドキュメントを参照してください。 - - ## アプリケーション層に対するサービス拒否攻撃 {#application-layer-dos-attack} サービス拒否(DoS)攻撃を実行する方法にはさまざまなものがあります。攻撃が特定のペイロードによって ClickHouse インスタンスをクラッシュさせることを目的としている場合は、システムを稼働状態に復旧するか、システムを再起動したうえでアクセスを制限し、制御を取り戻してください。攻撃に関する詳細情報を取得するには、次のクエリを使用して [system.crash_log](/operations/system-tables/crash_log) を確認します。 @@ -30,15 +24,12 @@ SELECT * FROM clusterAllReplicas('default',system.crash_log) ``` - ## 侵害された、ClickHouse によって作成された AWS ロール {#compromised-clickhouse-created-aws-roles} ClickHouse は、システム機能を有効にするためにあらかじめ作成されたロールを使用します。このセクションでは、お客様が CloudTrail を有効にした AWS を利用しており、CloudTrail ログにアクセスできることを前提としています。 インシデントがロールの侵害によるものである可能性がある場合は、ClickHouse の IAM ロールおよびアクションに関連する CloudTrail と CloudWatch 内のアクティビティを確認してください。IAM ロールの一覧については、セットアップの一部として提供される [CloudFormation](/cloud/reference/byoc/onboarding/aws#cloudformation-iam-roles) スタックまたは Terraform モジュールを参照してください。 - - ## EKS クラスターへの不正アクセス {#unauthorized-access-eks-cluster} ClickHouse BYOC は EKS 上で動作します。このセクションでは、AWS で CloudTrail と CloudWatch を使用しており、ログへアクセスできることを前提とします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md index e1a55420d03..ade9afd8f1e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md @@ -15,7 +15,6 @@ import rta_3 from '@site/static/images/cloud/onboard/discover/use_cases/3_rta.pn - - ## ClickHouse Cloud 宛先 {#clickhouse-cloud-destination} Fivetran の公式ドキュメントを参照してください: @@ -51,8 +46,6 @@ Fivetran の公式ドキュメントを参照してください: - [ClickHouse 宛先の概要](https://fivetran.com/docs/destinations/clickhouse) - [ClickHouse 宛先のセットアップガイド](https://fivetran.com/docs/destinations/clickhouse/setup-guide) - - ## お問い合わせ {#contact-us} ご不明な点や機能に関するご要望がある場合は、[サポートチケット](/about-us/support)を作成してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md index bd4aff29010..e70900583ac 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md @@ -30,7 +30,6 @@ import nifi14 from '@site/static/images/integrations/data-ingestion/etl-tools/ni import nifi15 from '@site/static/images/integrations/data-ingestion/etl-tools/nifi_15.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Apache NiFiをClickHouseに接続する {#connect-apache-nifi-to-clickhouse} @@ -42,27 +41,20 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - ## 接続情報を収集する {#1-gather-your-connection-details} - - ## Apache NiFi のダウンロードと実行 {#2-download-and-run-apache-nifi} 新規セットアップを行う場合は、https://nifi.apache.org/download.html からバイナリをダウンロードし、`./bin/nifi.sh start` を実行して起動します。 - - ## ClickHouse JDBC ドライバーをダウンロードする {#3-download-the-clickhouse-jdbc-driver} 1. GitHub の ClickHouse JDBC ドライバーのリリースページ にアクセスし、最新の JDBC リリースバージョンを確認します 2. 対象のリリースバージョンで「Show all xx assets」をクリックし、「shaded」または「all」というキーワードを含む JAR ファイルを探します(例: `clickhouse-jdbc-0.5.0-all.jar`) 3. JAR ファイルを Apache NiFi からアクセス可能なフォルダに配置し、その絶対パスを控えておきます - - ## `DBCPConnectionPool` コントローラサービスを追加し、プロパティを設定する {#4-add-dbcpconnectionpool-controller-service-and-configure-its-properties} 1. Apache NiFi でコントローラサービスを設定するには、歯車アイコン("gear" ボタン)をクリックして NiFi Flow Configuration ページを開きます @@ -107,8 +99,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## `ExecuteSQL` プロセッサを使用してテーブルから読み取る {#5-read-from-a-table-using-the-executesql-processor} 1. 適切なアップストリームおよびダウンストリームのプロセッサと共に、`ExecuteSQL` プロセッサを追加します @@ -134,8 +124,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## `MergeRecord`と`PutDatabaseRecord`プロセッサを使用してテーブルに書き込む {#6-write-to-a-table-using-mergerecord-and-putdatabaserecord-processor} 1. 単一のINSERT文で複数行を書き込むには、まず複数のレコードを1つのレコードにマージする必要があります。これは`MergeRecord`プロセッサを使用して実行できます diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md index 0e0d4eafddb..04d609f6cd7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md @@ -13,7 +13,6 @@ import Image from '@theme/IdealImage'; import GCS_examine_bucket_1 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-1.png'; import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-2.png'; - # Google Cloud Storage を ClickHouse と統合する {#integrate-google-cloud-storage-with-clickhouse} :::note @@ -22,8 +21,6 @@ import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestio ClickHouse は、ストレージとコンピュートを分離したいユーザーにとって、GCS が魅力的なストレージソリューションであると認識しています。この要件を満たすために、MergeTree エンジンのストレージとして GCS を使用することをサポートしています。これにより、ユーザーは GCS のスケーラビリティとコスト面での利点に加え、MergeTree エンジンのデータ挿入およびクエリのパフォーマンスを活用できるようになります。 - - ## GCS バックエンドの MergeTree {#gcs-backed-mergetree} ### ディスクの作成 {#creating-a-disk} @@ -140,7 +137,6 @@ GCS バケットをディスクとして利用するには、まず `conf.d` 配 このディスク定義に関連するすべての設定項目の一覧は[こちら](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3)にあります。 - ### テーブルの作成 {#creating-a-table} 書き込み権限のあるバケットを使用するようにディスクを設定してあると仮定すると、以下の例のようなテーブルを作成できるはずです。簡潔にするため、NYC タクシー データセットのカラムの一部のみを使用し、データを GCS をバックエンドとするテーブルに直接ストリーミングします。 @@ -189,7 +185,6 @@ GCS ディスクを用いたレプリケーションは、`ReplicatedMergeTree` スレッドのチューニングに関する詳細は、[パフォーマンスの最適化](../s3/index.md#s3-optimizing-performance) を参照してください。 - ## Google Cloud Storage (GCS) を使用する {#gcs-multi-region} :::tip @@ -257,8 +252,6 @@ ClickHouse Keeper ノードでデプロイメント手順を実行する際は - ファイルを各 Keeper サーバー上の `/etc/clickhouse-keeper/keeper_config.xml` に配置します - 各マシンで、そのマシンの `raft_configuration` 内でのエントリ番号に基づいて `server_id` を編集します - - ```xml title=/etc/clickhouse-keeper/keeper_config.xml @@ -352,7 +345,6 @@ ClickHouse Keeper ノードでデプロイメント手順を実行する際は * ファイルを編集してホスト名を設定し、それらが ClickHouse サーバーノードから名前解決できることを確認してください - ```xml title=/etc/clickhouse-server/config.d/remote-servers.xml @@ -452,7 +444,6 @@ sudo systemctl status clickhouse-keeper `netcat` を使って ClickHouse Keeper にコマンドを送信します。たとえば、`mntr` は ClickHouse Keeper クラスターの状態を返します。各 Keeper ノードでこのコマンドを実行すると、1 つがリーダーで、残りの 2 つがフォロワーであることがわかります。 - ```bash echo mntr | nc localhost 9181 ``` @@ -561,7 +552,6 @@ is_broken: 0 cache_path: ``` - 3 行が結果セットに含まれています。経過時間: 0.002 秒。 ```` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md index 170b5f065a5..c47911bfb30 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md @@ -10,7 +10,6 @@ keywords: ['Google Dataflow ClickHouse', 'Dataflow ClickHouse integration', 'Apa import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Google Dataflow と ClickHouse の統合 {#integrating-google-dataflow-with-clickhouse} @@ -22,8 +21,6 @@ Google Dataflow を ClickHouse と組み合わせて利用する主な方法は - [Java runner](#1-java-runner) - [Predefined templates](#2-predefined-templates) - - ## Java runner {#1-java-runner} [Java runner](./java-runner) を使用すると、Apache Beam SDK の `ClickHouseIO` 統合を用いて、カスタム Dataflow パイプラインを実装できます。このアプローチではパイプラインロジックを柔軟かつ詳細に制御できるため、ETL プロセスを特定の要件に合わせて最適化できます。 ただし、このオプションを利用するには、Java プログラミングの知識と Apache Beam フレームワークへの習熟が必要です。 @@ -33,8 +30,6 @@ Google Dataflow を ClickHouse と組み合わせて利用する主な方法は - 複雑または高度なユースケースに最適。 - コーディングおよび Beam API の理解が必要。 - - ## 事前定義済みテンプレート {#2-predefined-templates} ClickHouse は、BigQuery から ClickHouse へのデータインポートなど、特定のユースケース向けに設計された[事前定義済みテンプレート](./templates)を提供しています。これらのテンプレートはすぐに利用可能で、連携プロセスを簡素化するため、ノーコードソリューションを好むユーザーにとって最適な選択肢です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md index 9262e134ff7..e98927ca6b0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md @@ -10,15 +10,12 @@ keywords: ['Dataflow Java Runner', 'Google Dataflow ClickHouse', 'Apache Beam Ja import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Dataflow Java ランナー {#dataflow-java-runner} Dataflow Java Runner を使用すると、カスタム Apache Beam パイプラインを Google Cloud の Dataflow サービス上で実行できます。このアプローチは柔軟性が最大限に高く、高度な ETL ワークフローに適しています。 - - ## 仕組み {#how-it-works} 1. **パイプラインの実装** diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md index 45ccd449073..2e9cd2ce547 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md @@ -10,30 +10,23 @@ keywords: ['google dataflow', 'gcp', 'データパイプライン', 'テンプ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Google Dataflow テンプレート {#google-dataflow-templates} Google Dataflow テンプレートは、カスタムコードを記述することなく、事前構築済みのすぐに利用できるデータパイプラインを実行するための便利な手段を提供します。これらのテンプレートは、一般的なデータ処理タスクを簡素化するよう設計されており、`ClickHouseIO` などのコネクタを活用して ClickHouse データベースとシームレスに統合できる [Apache Beam](https://beam.apache.org/) を用いて構築されています。Google Dataflow 上でこれらのテンプレートを実行することで、最小限の労力で高いスケーラビリティを備えた分散データ処理を実現できます。 - - ## なぜ Dataflow テンプレートを使用するのか {#why-use-dataflow-templates} - **使いやすさ**: テンプレートを使えば、特定のユースケース向けに事前構成されたパイプラインを利用でき、コードを書く必要がありません。 - **スケーラビリティ**: Dataflow により、大量データを扱う分散処理でもパイプラインを効率的にスケールできます。 - **コスト効率**: 使用したリソース分だけ支払い、パイプラインの実行コストを最適化できます。 - - ## Dataflow テンプレートの実行方法 {#how-to-run-dataflow-templates} 現時点では、ClickHouse の公式テンプレートは Google Cloud コンソール、CLI、または Dataflow REST API を通じて利用できます。 詳しい手順については、[Google Dataflow Run Pipeline From a Template Guide](https://cloud.google.com/dataflow/docs/templates/provided-templates) を参照してください。 - - ## ClickHouse テンプレート一覧 {#list-of-clickhouse-templates} * [BigQuery To ClickHouse](./templates/bigquery-to-clickhouse) * [GCS To ClickHouse](https://github.com/ClickHouse/DataflowTemplates/issues/3)(近日公開予定!) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md index c6f666db706..4b65877a01b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md @@ -18,7 +18,6 @@ import dataflow_extended_template_form from '@site/static/images/integrations/da import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # Dataflow BigQuery から ClickHouse へのテンプレート {#dataflow-bigquery-to-clickhouse-template} BigQuery から ClickHouse への Dataflow テンプレートは、BigQuery テーブルから ClickHouse テーブルへデータをバッチで取り込むパイプラインです。 @@ -26,16 +25,12 @@ BigQuery から ClickHouse への Dataflow テンプレートは、BigQuery テ - - ## パイプラインの要件 {#pipeline-requirements} * ソース BigQuery テーブルが存在している必要があります。 * ターゲット ClickHouse テーブルが存在している必要があります。 * ClickHouse ホストが Dataflow ワーカーマシンからアクセス可能である必要があります。 - - ## テンプレートパラメータ {#template-parameters}
@@ -60,14 +55,10 @@ BigQuery から ClickHouse への Dataflow テンプレートは、BigQuery テ | `queryTempDataset` | クエリ結果を保存する一時テーブルを作成するために使用する既存のデータセットを指定します。例: `temp_dataset`。 | | | | `KMSEncryptionKey` | クエリソースを使用して BigQuery から読み取る場合に、一時テーブルを暗号化するために使用する Cloud KMS キー。例: `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`。 | | | - - :::note すべての `ClickHouseIO` パラメータのデフォルト値は、[`ClickHouseIO` Apache Beam Connector](/integrations/apache-beam#clickhouseiowrite-parameters) で確認できます。 ::: - - ## ソースおよびターゲットテーブルのスキーマ {#source-and-target-tables-schema} BigQuery のデータセットを ClickHouse に効果的にロードするために、このパイプラインは次の段階からなる列推論プロセスを実行します。 @@ -81,8 +72,6 @@ BigQuery のデータセットを ClickHouse に効果的にロードするた ただし、BigQuery データセット(テーブルまたはクエリ)の列名は、ClickHouse のターゲットテーブルと完全に一致している必要があります。 ::: - - ## データ型のマッピング {#data-types-mapping} BigQuery の型は、ClickHouse テーブル定義に基づいて変換されます。したがって、上記の表では(特定の BigQuery テーブル/クエリに対して)ClickHouse 側のテーブルで使用することを推奨するマッピングを示しています。 @@ -97,8 +86,6 @@ BigQuery の型は、ClickHouse テーブル定義に基づいて変換されま | [**数値 - 整数型**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**整数型**](../../../sql-reference/data-types/int-uint) | BigQuery では、すべての Int 型(`INT`、`SMALLINT`、`INTEGER`、`BIGINT`、`TINYINT`、`BYTEINT`)は `INT64` のエイリアスです。テンプレートは定義されたカラム型(`Int8`、`Int16`、`Int32`、`Int64`)に基づいてカラムを変換するため、ClickHouse では適切な整数サイズを設定することを推奨します。また、ClickHouse テーブルで符号なし整数型(`UInt8`、`UInt16`、`UInt32`、`UInt64`)が使用されている場合も、テンプレートはそれらにも変換します。 | | [**数値 - 浮動小数点型**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**浮動小数点型**](../../../sql-reference/data-types/float) | サポートされる ClickHouse 型:`Float32` および `Float64` がサポートされています。 | - - ## テンプレートの実行 {#running-the-template} BigQuery から ClickHouse へのテンプレートは、Google Cloud CLI を通じて実行できます。 @@ -187,12 +174,8 @@ job: Google Cloud Console の [Dataflow Jobs タブ](https://console.cloud.google.com/dataflow/jobs) に移動し、 ジョブのステータスを監視します。進捗状況やエラーなどのジョブの詳細を確認できます。 - - - - ## トラブルシューティング {#troubleshooting} ### メモリ制限(合計)超過エラー(コード 241){#code-241-dbexception-memory-limit-total-exceeded} @@ -202,8 +185,6 @@ Google Cloud Console の [Dataflow Jobs タブ](https://console.cloud.google.com * インスタンスのリソースを増やす: データ処理負荷に対応できるよう、より多くのメモリを持つ大きなインスタンスに ClickHouse サーバーをアップグレードします。 * バッチサイズを減らす: Dataflow ジョブ設定でバッチサイズを調整し、より小さなデータチャンクを ClickHouse に送信することで、バッチごとのメモリ消費を抑えます。これらの変更により、データインジェスト時のリソース使用をバランスさせることができます。 - - ## テンプレートのソースコード {#template-source-code} このテンプレートのソースコードは、ClickHouseの [DataflowTemplates](https://github.com/ClickHouse/DataflowTemplates) フォーク先リポジトリで公開されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md index 99485d24e43..8d0b40cec7d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import Image from '@theme/IdealImage'; - # Confluent Cloud と ClickHouse との連携 {#integrating-confluent-cloud-with-clickhouse}
@@ -30,15 +29,11 @@ import Image from '@theme/IdealImage';
- - ## 前提条件 {#prerequisites} 以下の内容について理解していることを前提とします: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Confluent Cloud - - ## Confluent Cloud 向け ClickHouse 公式 Kafka コネクタ {#the-official-kafka-connector-from-clickhouse-with-confluent-cloud} #### トピックを作成する {#create-a-topic} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md index e57a96e148e..dc13b34b06d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md @@ -12,7 +12,6 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import AddCustomConnectorPlugin from '@site/static/images/integrations/data-ingestion/kafka/confluent/AddCustomConnectorPlugin.png'; - # Confluent Platform と ClickHouse の連携 {#integrating-confluent-platform-with-clickhouse}
@@ -27,15 +26,11 @@ import AddCustomConnectorPlugin from '@site/static/images/integrations/data-inge
- - ## 前提条件 {#prerequisites} 以下の内容に精通していることを前提とします: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Confluent Platform および [Custom Connectors](https://docs.confluent.io/cloud/current/connectors/bring-your-connector/overview.html)。 - - ## Confluent Platform 向け ClickHouse 公式 Kafka コネクタ {#the-official-kafka-connector-from-clickhouse-with-confluent-platform} ### Confluent Platform へのインストール {#installing-on-confluent-platform} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md index 0c8c5729138..772441792b6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md @@ -13,14 +13,10 @@ integration: - category: 'data_ingestion' --- - - # Kafka を ClickHouse と統合する {#integrating-kafka-with-clickhouse} [Apache Kafka](https://kafka.apache.org/) は、ハイパフォーマンスなデータパイプライン、ストリーミング分析、データ統合、およびミッションクリティカルなアプリケーションのために数千社で利用されている、オープンソースの分散イベントストリーミングプラットフォームです。ClickHouse は、Kafka およびその他の Kafka API 互換ブローカー(例: Redpanda、Amazon MSK)からの**読み取り**およびそれらへの**書き込み**を行うための複数のオプションを提供します。 - - ## 利用可能なオプション {#available-options} ユースケースに最適なオプションを選択するには、ClickHouse のデプロイメントタイプ、データフローの方向性、運用要件など、複数の要因を考慮する必要があります。 @@ -91,8 +87,6 @@ ClickHouse をセルフホストしており、**導入ハードルが低い** #### はじめに {#kafka-table-engine-getting-started} - - Kafka テーブルエンジンの利用を開始するには、[リファレンスドキュメント](./kafka-table-engine.md) を参照してください。 ### オプションの選択 {#choosing-an-option} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md index e21883f1419..967aa14ba73 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md @@ -10,7 +10,6 @@ keywords: ['ClickHouse Kafka Connect Sink', 'ClickHouse 用 Kafka コネクタ', import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # ClickHouse Kafka Connect Sink {#clickhouse-kafka-connect-sink} :::note @@ -92,7 +91,6 @@ ClickHouse Sink を ClickHouse サーバーに接続するには、次の情報 設定オプションの完全な一覧表: - | Property Name | Description | Default Value | |-------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | `hostname` (Required) | サーバーのホスト名または IP アドレス | N/A | @@ -124,8 +122,6 @@ ClickHouse Sink を ClickHouse サーバーに接続するには、次の情報 ### 対象テーブル {#target-tables} - - ClickHouse Connect Sink は Kafka のトピックからメッセージを読み取り、適切なテーブルに書き込みます。ClickHouse Connect Sink が書き込むのは既存のテーブルのみです。データの挿入を開始する前に、対象テーブルが ClickHouse 上に適切なスキーマで作成済みであることを必ず確認してください。 各トピックごとに、ClickHouse 上に専用の対象テーブルが必要です。対象テーブル名は、元のトピック名と一致している必要があります。 @@ -205,7 +201,6 @@ ClickHouse Kafka Connect Sink に送信される前に送信メッセージを コネクタは複数のトピックからデータを読み取ることができます - ```json { "name": "clickhouse-connect", @@ -340,7 +335,6 @@ com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} * `byte-rate`: 1 秒あたりに送信されたバイト数の平均レート * `compression-rate`: 達成された圧縮率 - **パーティションレベルのメトリクス:** - `records-sent-total`: パーティションに送信されたレコードの総数 - `bytes-sent-total`: パーティションに送信されたバイト数の総量 @@ -430,8 +424,6 @@ JMX メトリクスの詳細な定義および Prometheus との統合につい - デフォルトのコネクタ設定で既にスループット要件を満たしている場合 - ClickHouse クラスターが受信負荷を容易に処理できている場合 - - #### データフローの理解 {#understanding-the-data-flow} チューニングを行う前に、コネクタ内でデータがどのように流れるかを理解しておくことが重要です。 @@ -467,24 +459,17 @@ Kafka Connect(フレームワーク)は、コネクタとは独立してバ ClickHouse のパフォーマンスを最適化するには、より大きなバッチサイズを目標としてください。 - - ```properties # ポーリング1回あたりのレコード数を増やす {#increase-the-number-of-records-per-poll} consumer.max.poll.records=5000 ``` - # パーティションのフェッチサイズを増やす (5 MB) {#increase-the-partition-fetch-size-5-mb} consumer.max.partition.fetch.bytes=5242880 - - # 任意: より多くのデータが揃うまで待つように最小フェッチサイズを増やす (1 MB) {#optional-increase-minimum-fetch-size-to-wait-for-more-data-1-mb} consumer.fetch.min.bytes=1048576 - - # オプション: レイテンシがクリティカルな場合の待機時間を短縮する {#optional-reduce-wait-time-if-latency-is-critical} consumer.fetch.max.wait.ms=300 @@ -581,7 +566,6 @@ consumer.fetch.max.wait.ms=300 `exactlyOnce=true` を非同期インサートと併用する場合: - ```json { "config": { @@ -685,7 +669,6 @@ SETTINGS **一般的なパフォーマンス問題**: - | 症状 | 考えられる原因 | 解決策 | | ------------------- | -------------------- | ----------------------------------------------------- | | コンシューマーラグが大きい | バッチが小さすぎる | `max.poll.records` を増やし、async inserts を有効にする | @@ -783,7 +766,6 @@ SETTINGS * `UnknownHostException` - ホスト名が解決できないときにスローされます。 * `IOException` - ネットワークに問題がある場合にスローされます。 - #### 「すべてのデータが空/ゼロになっている」 {#all-my-data-is-blankzeroes} おそらく、データ内のフィールドがテーブル内のフィールドと一致していません。これは特に CDC(変更データキャプチャ)や Debezium フォーマットでよく発生します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md index e496326cdbb..db8b19cc0a2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md @@ -10,7 +10,6 @@ keywords: ['kafka', 'kafka connect', 'jdbc', 'integration', 'data pipeline'] import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # JDBC コネクタ {#jdbc-connector} :::note @@ -54,8 +53,6 @@ ClickHouse JDBC ドライバ `clickhouse-jdbc--shaded.jar` を[こち 以下のパラメータは、ClickHouse で JDBC コネクタを使用する際に関連するものです。パラメータの完全な一覧は[こちら](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/index.html)で確認できます。 - - * `_connection.url_` - これは `jdbc:clickhouse://<clickhouse host>:<clickhouse http port>/<target database>` の形式にする必要があります * `connection.user` - 対象データベースへの書き込み権限を持つユーザー * `table.name.format`- データを挿入する ClickHouse テーブル。事前に作成されている必要があります。 @@ -84,8 +81,6 @@ GitHub サンプルデータ用の設定ファイル例は、Connect をスタ テーブルが作成されていることを確認し、以前の例で既に存在する場合は削除してください。縮小版 GitHub データセットと互換性のある例を以下に示します。現在サポートされていない Array 型や Map 型が存在しないことに注意してください。 - - ```sql CREATE TABLE github ( @@ -150,7 +145,6 @@ SELECT count() FROM default.github; ### おすすめの参考資料 {#recommended-further-reading} - * [Kafka Sink 構成パラメータ](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/sink_config_options.html#sink-config-options) * [Kafka Connect Deep Dive – JDBC Source Connector](https://www.confluent.io/blog/kafka-connect-deep-dive-jdbc-source-connector) * [Kafka Connect JDBC Sink 詳解: プライマリキーの扱い](https://rmoff.net/2021/03/12/kafka-connect-jdbc-sink-deep-dive-working-with-primary-keys/) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md index b34eb5e3244..d8f3fc0d06e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md @@ -6,12 +6,8 @@ slug: /integrations/data-ingestion/kafka/kafka-table-engine-named-collections doc_type: 'guide' --- - - # 名前付きコレクションを用いた ClickHouse と Kafka の統合 {#integrating-clickhouse-with-kafka-using-named-collections} - - ## はじめに {#introduction} このガイドでは、named collection(名前付きコレクション)を使用して ClickHouse を Kafka に接続する方法を解説します。named collection 用の設定ファイルを使用することで、次のような利点があります。 @@ -21,8 +17,6 @@ doc_type: 'guide' このガイドは、Apache Kafka 3.4.1 と ClickHouse 24.5.1 で検証されています。 - - ## 前提条件 {#assumptions} このドキュメントでは、以下を前提としています。 @@ -30,8 +24,6 @@ doc_type: 'guide' 2. セットアップ済みで稼働中の ClickHouse クラスター。 3. SQL の基本的な知識と、ClickHouse および Kafka の設定に関する基本的な理解。 - - ## 前提条件 {#prerequisites} 名前付きコレクションを作成するユーザーに、必要なアクセス権限が付与されていることを確認してください。 @@ -45,7 +37,6 @@ doc_type: 'guide' アクセス制御を有効にする方法の詳細については、[User Management Guide](./../../../guides/sre/user-management/index.md) を参照してください。 - ## 設定 {#configuration} 次のセクションを ClickHouse の `config.xml` ファイルに追加してください。 @@ -106,7 +97,6 @@ doc_type: 'guide' 3. `` 内のセクションには、追加の Kafka 設定オプションが含まれます。利用可能なオプションの詳細については、[librdkafka configuration](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md) を参照してください。 4. この例では、`SASL_SSL` セキュリティプロトコルと `PLAIN` メカニズムを使用しています。これらの設定は、利用している Kafka クラスター構成に応じて調整してください。 - ## テーブルとデータベースの作成 {#creating-tables-and-databases} ClickHouse クラスター上に必要なデータベースとテーブルを作成します。ClickHouse をシングルノード構成で実行している場合は、SQL コマンド内のクラスター指定部分を省略し、`ReplicatedMergeTree` の代わりに別のエンジンを使用してください。 @@ -193,7 +183,6 @@ SELECT FROM second_kafka_table; ``` - ## セットアップの検証 {#verifying-the-setup} これで、Kafka クラスター上にそれぞれに対応するコンシューマグループが表示されているはずです: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md index 0118b63ee5e..6399799cb5e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md @@ -4,7 +4,7 @@ sidebar_position: 1 slug: /integrations/kafka/cloud/amazon-msk/ description: 'ClickHouse 公式 Kafka コネクタを使用した Amazon MSK との統合' keywords: ['統合', 'kafka', 'amazon msk', 'シンク', 'コネクタ'] -title: 'Amazon MSK と ClickHouse の統合' +title: 'Amazon MSK と ClickHouse の統合 {#integrating-amazon-msk-with-clickhouse}' doc_type: 'guide' integration: - support_level: 'community' @@ -28,16 +28,14 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr -> 注記: 動画内で示されているポリシーは権限が広く、クイックスタート用途のみを想定しています。以下の最小権限の IAM ガイダンスを参照してください。 - - +> 注意: この動画で示しているポリシーは権限設定が緩く、クイックスタート用にのみ意図されています。IAM の最小権限ガイドラインについては、以下を参照してください。 ## 前提条件 {#prerequisites} -次のことを前提とします: -* [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md)、Amazon MSK、および MSK Connectors について理解していること。Amazon MSK の [はじめにガイド](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) と [MSK Connect ガイド](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html) の参照を推奨します。 -* MSK ブローカーがパブリックにアクセス可能であること。Developer Guide の [Public Access](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html) セクションを参照してください。 +次のことを前提とします: +* [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) について理解していること。 +* Amazon MSK および MSK Connectors について理解していること。Amazon MSK の [はじめにガイド](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) と [MSK Connect ガイド](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html) の参照を推奨します。 ## ClickHouse 公式 Kafka コネクタと Amazon MSK の連携 {#the-official-kafka-connector-from-clickhouse-with-amazon-msk} @@ -184,4 +182,4 @@ MSK Connect から ClickHouse に接続できるようにするには、MSK ク 1. **接続検証(簡易チェックリスト):** 1. コネクターの実行環境から MSK のブートストラップ DNS を解決し、ブローカーポートへ TLS で接続できること。 1. ClickHouse の 9440 ポート(または HTTPS 用の 8443)へ TLS 接続を確立できること。 - 1. AWS のサービス(Glue / Secrets Manager)を使用する場合、それらのエンドポイントへの送信(アウトバウンド)トラフィックが許可されていること。 + 1. AWS のサービス(Glue / Secrets Manager)を使用する場合、それらのエンドポイントへの送信(アウトバウンド)トラフィックが許可されていること。 \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md index 589f661e159..6e53c3640b7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md @@ -25,7 +25,6 @@ import HardwareSize from '@site/static/images/integrations/data-ingestion/s3/har 挿入パフォーマンスを向上させるためにスレッド数やブロックサイズをチューニングする前に、まずは S3 への INSERT の仕組みを理解することをお勧めします。すでに INSERT の仕組みに慣れている場合や、すぐに役立つヒントだけを知りたい場合は、以下の[サンプルデータセット](/integrations/s3/performance#example-dataset)に進んでください。 - ## 挿入メカニズム(単一ノード) {#insert-mechanics-single-node} ハードウェアの規模に加えて、ClickHouse のデータ挿入メカニズム(単一ノード)のパフォーマンスとリソース使用量に影響を与える主な要因は 2 つあります。**挿入ブロックサイズ** と **挿入の並列度** です。 @@ -81,7 +80,6 @@ ClickHouse は、圧縮サイズが約 150 GiB に[到達](/operations/settings/ ② メモリにロードされたブロックをマージして、より大きなブロックにします。 ``` - ③ マージされたブロックをディスク上の新しいパートに書き込む。 ① に戻る @@ -118,13 +116,10 @@ s3 のようなテーブル関数では、グロブパターンを使って読 `s3` 関数およびテーブルの場合、個々のファイルの並列ダウンロードは [max_download_threads](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_threads) と [max_download_buffer_size](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_buffer_size) の値によって決まります。ファイルサイズが `2 * max_download_buffer_size` より大きい場合にのみ、ファイルは並列でダウンロードされます。デフォルトでは、`max_download_buffer_size` は 10MiB に設定されています。場合によっては、各ファイルが単一スレッドによってダウンロードされるようにすることを目的として、このバッファサイズを 50 MB(`max_download_buffer_size=52428800`)まで安全に増やすことができます。これにより、各スレッドが行う S3 呼び出しに要する時間を短縮でき、その結果として S3 の待ち時間も短縮されます。さらに、並列読み取りには小さすぎるファイルについてスループットを向上させるために、ClickHouse はそのようなファイルを非同期に先読みすることでデータを自動的にプリフェッチします。 - ## パフォーマンスの測定 {#measuring-performance} S3 テーブル関数を使用するクエリのパフォーマンス最適化は、次の 2 つのケースで必要になります。1 つ目は、S3 上のデータをその場に置いたままクエリする場合、すなわちデータは元の形式のまま S3 に残し、ClickHouse の計算リソースのみを使用するアドホッククエリの場合、2 つ目は、S3 から ClickHouse の MergeTree テーブルエンジンにデータを挿入する場合です。特に明記がない限り、以下の推奨事項は両方のシナリオに適用されます。 - - ## ハードウェア規模の影響 {#impact-of-hardware-size} @@ -137,14 +132,10 @@ S3 テーブル関数を使用するクエリのパフォーマンス最適化 したがって、取り込み全体のスループットにも影響します。 - - ## リージョンのローカリティ {#region-locality} バケットが ClickHouse インスタンスと同じリージョンに存在することを確認してください。この簡単な最適化により、特に ClickHouse インスタンスを AWS のインフラストラクチャ上にデプロイしている場合、スループットが大幅に向上する可能性があります。 - - ## フォーマット {#formats} ClickHouse は、`s3` 関数および `S3` エンジンを使用して、S3 バケットに保存されたファイルを [サポートされているフォーマット](/interfaces/formats#formats-overview) で読み取ることができます。元のファイルを直接読み取る場合、これらのフォーマットにはいくつかの明確な利点があります。 @@ -155,8 +146,6 @@ ClickHouse は、`s3` 関数および `S3` エンジンを使用して、S3 バ * 各圧縮フォーマットには一長一短があり、多くの場合、圧縮率と速度、および圧縮・解凍それぞれの性能のバランスを取ります。CSV や TSV のような元のファイルを圧縮する場合、lz4 は圧縮率を犠牲にする代わりに、最速の解凍性能を提供します。Gzip は一般的に、読み取り速度がわずかに低下する代わりに、より高い圧縮率を実現します。Xz はこれをさらに推し進め、通常は最も高い圧縮率を提供しますが、圧縮および解凍の性能は最も低くなります。エクスポートする場合、Gz と lz4 は概ね同程度の圧縮速度を提供します。これを接続速度と比較して検討してください。解凍や圧縮が高速になることによる利点は、S3 バケットへの接続が遅いと簡単に相殺されてしまいます。 * Native や Parquet のようなフォーマットでは、通常、圧縮のオーバーヘッドを正当化できません。これらのフォーマットは本質的にコンパクトであるため、データサイズ削減の効果は小さいことが多いです。圧縮および解凍に費やす時間は、ネットワーク転送時間を相殺することはほとんどありません。特に S3 はグローバルに利用可能であり、高いネットワーク帯域幅を持つため、なおさらです。 - - ## 例となるデータセット {#example-dataset} さらなる最適化の可能性を示すために、ここでは [Stack Overflow データセットの投稿](/data-modeling/schema-design#stack-overflow-dataset) を利用し、このデータに対するクエリおよび書き込みパフォーマンスの両方を最適化していきます。 @@ -202,7 +191,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow クエリを実行して読み取りを行う際、同じクエリを繰り返し実行した場合と比べて、最初のクエリが遅く見えることがよくあります。これは、S3 側のキャッシュと [ClickHouse Schema Inference Cache](/operations/system-tables/schema_inference_cache) の両方によるものです。後者はファイルに対して推論されたスキーマを保存するため、後続のアクセスではスキーマ推論ステップを省略でき、その結果クエリ時間を短縮できます。 ::: - ## 読み取りにスレッドを使用する {#using-threads-for-reads} S3 上での読み取りパフォーマンスは、ネットワーク帯域幅やローカル I/O によって制限されない限り、コア数に比例してスケールします。スレッド数を増やすと追加のメモリオーバーヘッドも発生するため、ユーザーはこれを理解しておく必要があります。読み取りスループットを向上させるために、次の項目を調整できます。 @@ -249,7 +237,6 @@ SETTINGS max_threads = 64 Peak memory usage: 639.99 MiB. ``` - ## INSERT におけるスレッド数とブロックサイズのチューニング {#tuning-threads-and-block-size-for-inserts} インジェスト性能を最大化するには、(1) INSERT のブロックサイズ、(2) INSERT の並列度を、(3) 利用可能な CPU コア数と RAM 量に基づいて選択・設定する必要があります。まとめると次のとおりです。 @@ -287,7 +274,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow 示したように、これらの設定を調整することで、挿入パフォーマンスは `33%` 以上向上しました。単一ノードでのパフォーマンスをさらに向上できるかどうかは、読者の検証に委ねます。 - ## リソースおよびノードによるスケーリング {#scaling-with-resources-and-nodes} リソースおよびノードによるスケーリングは、読み取りクエリと挿入クエリの両方に適用されます。 @@ -365,7 +351,6 @@ FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws 0 rows in set. Elapsed: 171.202 sec. Processed 59.82 million rows, 24.03 GB (349.41 thousand rows/s., 140.37 MB/s.) ``` - ファイルの読み込みではクエリ性能は向上しますが、挿入性能は改善されていないことに気付くでしょう。デフォルトでは、読み取りは `s3Cluster` を使って分散されますが、挿入はイニシエーターノードに対して行われます。これは、読み取り自体は各ノードで行われる一方で、得られた行は分散処理のためにイニシエーターノードへルーティングされることを意味します。高スループットなシナリオでは、これがボトルネックとなる可能性があります。これに対処するには、`s3cluster` 関数に対してパラメータ `parallel_distributed_insert_select` を設定します。 これを `parallel_distributed_insert_select=2` に設定すると、各ノード上の分散エンジンの下位テーブルに対して、各シャードで `SELECT` および `INSERT` が実行されるようになります。 @@ -382,7 +367,6 @@ Peak memory usage: 11.75 GiB. 予想どおり、これにより挿入パフォーマンスは 3 分の 1 に低下します。 - ## さらなるチューニング {#further-tuning} ### 重複排除の無効化 {#disable-de-duplication} @@ -416,7 +400,6 @@ SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, 0 rows in set. Elapsed: 49.688 sec. Processed 59.82 million rows, 24.03 GB (1.20 million rows/s., 483.66 MB/s.) ``` - ## その他の注意事項 {#misc-notes} * メモリが限られた環境では、S3 へのデータ挿入時に `max_insert_delayed_streams_for_parallel_write` の値を下げることを検討してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md index 4e24bba32b2..98d43042c11 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md @@ -25,15 +25,12 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Astrato を ClickHouse に接続する {#connecting-astrato-to-clickhouse} Astrato は Pushdown SQL を使用して、ClickHouse Cloud またはオンプレミスの ClickHouse 環境に直接クエリを実行します。つまり、業界をリードする ClickHouse のパフォーマンスによって支えられた、必要なすべてのデータにアクセスできます。 - - ## 接続に必要な情報 {#connection-data-required} データ接続を設定するには、以下の情報が必要です。 @@ -43,8 +40,6 @@ Astrato は Pushdown SQL を使用して、ClickHouse Cloud またはオンプ - - ## ClickHouse へのデータ接続の作成 {#creating-the-data-connection-to-clickhouse} - サイドバーで **Data** を選択し、**Data Connection** タブを選択します @@ -74,8 +69,6 @@ Astrato は Pushdown SQL を使用して、ClickHouse Cloud またはオンプ 重複が作成された場合、データソース名にタイムスタンプが追加されます。 ::: - - ## セマンティックモデル / データビューの作成 {#creating-a-semantic-model--data-view} Data View エディターでは、ClickHouse 上のすべてのテーブルとスキーマが表示されます。開始するには、そこからいくつかを選択します。 @@ -92,8 +85,6 @@ Data View エディターでは、ClickHouse 上のすべてのテーブルと - - ## ダッシュボードの作成 {#creating-a-dashboard} わずか数ステップで、Astrato で最初のチャートを作成できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md index d3a7ebf5549..16afdd8c949 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md @@ -22,15 +22,12 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import Image from '@theme/IdealImage'; - # Chartbrew を ClickHouse に接続する {#connecting-chartbrew-to-clickhouse} [Chartbrew](https://chartbrew.com) は、ユーザーがダッシュボードを作成し、データをリアルタイムで監視できるデータ可視化プラットフォームです。複数のデータソース(ClickHouse を含む)をサポートしており、チャートやレポートをノーコードで作成できるインターフェースを提供します。 - - ## 目的 {#goal} このガイドでは、Chartbrew を ClickHouse に接続し、SQL クエリを実行して、可視化を作成します。最後には、ダッシュボードは次のような表示になるかもしれません。 @@ -41,14 +38,10 @@ import Image from '@theme/IdealImage'; 作業に使えるデータセットがない場合は、サンプルデータセットのいずれかを追加できます。このガイドでは [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) データセットを使用します。 ::: - - ## 1. 接続情報を準備する {#1-gather-your-connection-details} - - ## 2. Chartbrew を ClickHouse に接続する {#2-connect-chartbrew-to-clickhouse} 1. [Chartbrew](https://chartbrew.com/login) にログインし、**Connections** タブを開きます。 @@ -72,8 +65,6 @@ import Image from '@theme/IdealImage'; - - ## 3. データセットを作成して SQL クエリを実行する {#3-create-a-dataset-and-run-a-sql-query} 1. **Create dataset** ボタンをクリックするか、**Datasets** タブを開いて新しいデータセットを作成します。 @@ -100,7 +91,6 @@ ORDER BY year; データが取得できたら、**Configure dataset** をクリックして、可視化のパラメータを設定します。 - ## 4. 可視化を作成する {#4-create-a-visualization} 1. 可視化用のメトリクス(数値)とディメンション(カテゴリ型の値)を定義します。 @@ -114,8 +104,6 @@ ORDER BY year; - - ## 5. データ更新の自動化 {#5-automate-data-updates} ダッシュボードを最新の状態に保つために、データの自動更新をスケジュールできます。 @@ -126,8 +114,6 @@ ORDER BY year; - - ## さらに詳しく {#learn-more} 詳しくは、[Chartbrew と ClickHouse](https://chartbrew.com/blog/visualizing-clickhouse-data-with-chartbrew-a-step-by-step-guide/) についてのブログ記事をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md index a0ca59f7949..7327448bd59 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md @@ -18,7 +18,6 @@ import databrain_06 from '@site/static/images/integrations/data-visualization/da import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Databrain を ClickHouse に接続する {#connecting-databrain-to-clickhouse} @@ -31,16 +30,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; このガイドでは、Databrain を ClickHouse インスタンスに接続する手順を説明します。 - - ## 前提条件 {#pre-requisites} - 独自のインフラストラクチャ上で、または [ClickHouse Cloud](https://clickhouse.com/) 上でホストされている ClickHouse データベース。 - [Databrain アカウント](https://app.usedatabrain.com/users/sign-up)。 - データソースを接続するための Databrain ワークスペース。 - - ## Databrain を ClickHouse に接続する手順 {#steps-to-connect-databrain-to-clickhouse} ### 1. 接続情報を準備する {#1-gather-your-connection-details} @@ -102,7 +97,6 @@ GRANT SELECT ON your_database.* TO your_databrain_user; `your_databrain_user` と `your_database` を、実際に使用するユーザー名とデータベース名に置き換えてください。 - ## ClickHouse で Databrain を使用する {#using-databrain-with-clickhouse} ### データを探索する {#explore-your-data} @@ -152,8 +146,6 @@ Databrain は、ClickHouse を使用する際にいくつかの高度な機能 - **埋め込みアナリティクス**: ダッシュボードとメトリクスをアプリケーションに直接埋め込めます - **セマンティックレイヤー**: 再利用可能なデータモデルとビジネスロジックを作成できます - - ## トラブルシューティング {#troubleshooting} ### 接続に失敗する {#connection-fails} @@ -175,8 +167,6 @@ ClickHouse に接続できない場合は、次の点を確認してください 3. **適切なデータ型の使用**: ClickHouse のスキーマで最適なデータ型を使用していることを確認してください 4. **インデックスの最適化**: ClickHouse のプライマリキーとスキップインデックスを活用してください - - ## さらに詳しく {#learn-more} Databrain の機能や、強力な分析機能を構築する方法の詳細については、以下を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md index f5b6d3f2737..e82d33792e3 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md @@ -19,7 +19,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # ClickHouse を Deepnote に接続する {#connect-clickhouse-to-deepnote} @@ -28,15 +27,11 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr このガイドでは、Deepnote のアカウントと稼働中の ClickHouse インスタンスが既に用意されていることを前提とします。 - - ## インタラクティブな例 {#interactive-example} Deepnote のデータノートブックから ClickHouse に対してクエリを実行するインタラクティブな例を試したい場合は、下のボタンをクリックして、[ClickHouse playground](../../../getting-started/playground.md) と接続されたテンプレートプロジェクトを起動してください。 [](https://deepnote.com/launch?template=ClickHouse%20and%20Deepnote) - - ## ClickHouse に接続する {#connect-to-clickhouse} 1. Deepnote 内で「Integrations」画面を開き、ClickHouse のタイルをクリックします。 @@ -52,8 +47,6 @@ Deepnote のデータノートブックから ClickHouse に対してクエリ 3. 以上で、ClickHouse と Deepnote の連携が完了しました。 - - ## ClickHouse 連携を使用する {#using-clickhouse-integration} 1. まず、ノートブック右側の ClickHouse 連携に接続します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md index 5c037c5385e..f1ab5eb5b30 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md @@ -12,7 +12,6 @@ import dot_01 from '@site/static/images/integrations/data-visualization/dot_01.p import dot_02 from '@site/static/images/integrations/data-visualization/dot_02.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Dot {#dot} @@ -20,16 +19,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [Dot](https://www.getdot.ai/) は、あなたの **AI データアナリスト** です。 ClickHouse に直接接続し、自然言語でデータに関する質問をしたり、データを探索したり、仮説を検証したり、「なぜ」に関する問いに答えたりできます。これらはすべて、Slack、Microsoft Teams、ChatGPT、あるいはネイティブな Web UI 上からそのまま実行できます。 - - ## 前提条件 {#pre-requisites} - セルフホスト型、または [ClickHouse Cloud](https://clickhouse.com/cloud) 上の ClickHouse データベース - [Dot](https://www.getdot.ai/) のアカウント - [Hashboard](https://www.hashboard.com/) のアカウントとプロジェクト - - ## Dot を ClickHouse に接続する {#connecting-dot-to-clickhouse} @@ -48,8 +43,6 @@ ClickHouse に直接接続し、自然言語でデータに関する質問をし Dot は **query-pushdown** を利用します。ClickHouse がスケール可能な大規模な数値処理を担当し、Dot はその結果に基づいて正確で信頼性の高い回答を提供します。 - - ## ハイライト {#highlights} Dot は、会話を通じてデータを活用できるようにします: @@ -60,8 +53,6 @@ Dot は、会話を通じてデータを活用できるようにします: - **信頼できる結果**:Dot がクエリをスキーマや定義と照合して検証し、エラーを最小限に抑えます。 - **スケーラブル**:query-pushdown を基盤とし、Dot のインテリジェンスと ClickHouse の高速性を組み合わせています。 - - ## セキュリティとガバナンス {#security} Dot はエンタープライズ対応です。 @@ -72,8 +63,6 @@ Dot はエンタープライズ対応です。 - **ガバナンスと検証**: トレーニング/検証用スペースにより、誤回答(ハルシネーション)の発生を抑制 - **コンプライアンス**: SOC 2 Type I 認証取得済み - - ## 追加リソース {#additional-resources} - Dot ウェブサイト: [https://www.getdot.ai/](https://www.getdot.ai/) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md index cdfdbdee52d..7bf8da86718 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md @@ -21,20 +21,15 @@ import draxlr_06 from '@site/static/images/integrations/data-visualization/draxl import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Draxlr を ClickHouse に接続する {#connecting-draxlr-to-clickhouse} Draxlr は、ClickHouse データベースに接続するための直感的なインターフェースを提供し、チームが数分でデータの探索、可視化、インサイトの公開を行えるようにします。本ガイドでは、問題なく接続を確立するための手順を順を追って説明します。 - - ## 1. ClickHouse の認証情報を取得する {#1-get-your-clickhouse-credentials} - - ## 2. Draxlr を ClickHouse に接続する {#2--connect-draxlr-to-clickhouse} 1. ナビゲーションバーの **Connect a Database** ボタンをクリックします。 @@ -51,8 +46,6 @@ Draxlr は、ClickHouse データベースに接続するための直感的な 6. **Next** ボタンをクリックし、接続が確立されるまで待ちます。接続に成功すると、テーブル一覧ページが表示されます。 - - ## 4. データを探索する {#4-explore-your-data} 1. 一覧からいずれかのテーブルをクリックします。 @@ -67,8 +60,6 @@ Draxlr は、ClickHouse データベースに接続するための直感的な - - ## 4. SQL クエリの使用 {#4-using-sql-queries} 1. ナビゲーションバーの「Explore」ボタンをクリックします。 @@ -79,8 +70,6 @@ Draxlr は、ClickHouse データベースに接続するための直感的な 3. 「**Execute Query**」ボタンをクリックして結果を表示します。 - - ## 4. クエリの保存 {#4-saving-you-query} 1. クエリを実行した後、**Save Query** ボタンをクリックします。 @@ -93,8 +82,6 @@ Draxlr は、ClickHouse データベースに接続するための直感的な 4. クエリを保存するには、**Save** ボタンをクリックします。 - - ## 5. ダッシュボードの作成 {#5-building-dashboards} 1. ナビゲーションバーの **Dashboards** ボタンをクリックします。 @@ -107,7 +94,5 @@ Draxlr は、ClickHouse データベースに接続するための直感的な 4. 保存済みクエリの一覧からクエリを選択し、可視化の種類を選んでから **Add Dashboard Item** ボタンをクリックします。 - - ## 詳細はこちら {#learn-more} Draxlr についてさらに詳しく知るには、[Draxlr ドキュメント](https://draxlr.notion.site/draxlr/Draxlr-Docs-d228b23383f64d00a70836ff9643a928) サイトを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md index d9f78e1c2c1..cce3e0bbe12 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md @@ -10,7 +10,6 @@ doc_type: 'guide' import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Embeddable を ClickHouse に接続する {#connecting-embeddable-to-clickhouse} @@ -21,13 +20,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 組み込みの行レベルセキュリティ機能により、各ユーザーは自分に閲覧権限があるデータだけを常に正確に確認できます。さらに、完全に構成可能な 2 段階のキャッシュにより、スケールさせながら高速なリアルタイムアナリティクスを提供できます。 - - ## 1. 接続情報を確認する {#1-gather-your-connection-details} - - ## 2. ClickHouse 接続タイプを作成する {#2-create-a-clickhouse-connection-type} Embeddable API を使用してデータベース接続を追加します。この接続は ClickHouse サービスへの接続に利用されます。次の API コールを使用して接続を追加できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md index 3afa48db408..5108d9f0beb 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md @@ -31,15 +31,12 @@ import explo_15 from '@site/static/images/integrations/data-visualization/explo_ import explo_16 from '@site/static/images/integrations/data-visualization/explo_16.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Explo を ClickHouse に接続する {#connecting-explo-to-clickhouse} あらゆるプラットフォームで利用できる顧客向けアナリティクス。美しい可視化のために設計され、シンプルさを追求して実装されています。 - - ## 目標 {#goal} このガイドでは、ClickHouse のデータを Explo に接続して結果を可視化します。チャートは次のようになります。 @@ -51,13 +48,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; まだ扱うデータセットがない場合は、サンプルデータセットのいずれかを追加できます。このガイドでは [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) データセットを使用しているので、それを選んでもよいでしょう。同じドキュメントカテゴリに、他にもいくつかのデータセットが掲載されています。 ::: - - ## 1. 接続情報を取得する {#1-gather-your-connection-details} - - ## 2. Explo を ClickHouse に接続する {#2--connect-explo-to-clickhouse} 1. Explo アカウントにサインアップします。 @@ -91,8 +84,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 54.211.43.19, 52.55.98.121, 3.214.169.94, and 54.156.141.148 ` - - ## 3. ダッシュボードを作成する {#3-create-a-dashboard} 1. 左側のナビゲーションバーから **Dashboard** タブを開きます。 @@ -107,8 +98,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 4. SQL クエリを実行する {#4-run-a-sql-query} 1. 右側のサイドバーで、スキーマ名の下に表示されているテーブル名を確認します。その後、データセットエディタに次のコマンドを入力します: @@ -123,8 +112,6 @@ LIMIT 100 - - ## 5. チャートを作成する {#5-build-a-chart} 1. 左側の棒グラフアイコンを画面上にドラッグ&ドロップします。 @@ -147,8 +134,6 @@ LIMIT 100 - - ## 詳細情報 {#learn-more} Explo の詳細やダッシュボードの作成方法については、Explo ドキュメントをご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md index d325f8d4a2a..04998f75dfa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md @@ -15,7 +15,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # ClickHouse を Fabi.ai に接続する {#connecting-clickhouse-to-fabiai} @@ -24,14 +23,10 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr - - ## 接続情報を取得する {#gather-your-connection-details} - - ## Fabi.ai アカウントを作成し、ClickHouse に接続する {#connect-to-clickhouse} Fabi.ai にログインするか、アカウントを作成します: https://app.fabi.ai/ @@ -46,16 +41,12 @@ Fabi.ai にログインするか、アカウントを作成します: https://ap 3. これで完了です。ClickHouse が Fabi.ai に接続されました。 - - ## ClickHouse へのクエリ実行 {#querying-clickhouse} Fabi.ai を ClickHouse に接続したら、任意の [Smartbook](https://docs.fabi.ai/analysis_and_reporting/smartbooks) を開き、SQL セルを作成します。Fabi.ai インスタンスに接続されているデータソースが 1 つだけの場合、SQL セルでは自動的に ClickHouse がデフォルトとして選択されます。複数のデータソースがある場合は、ソースのドロップダウンからクエリ対象のソースを選択できます。 - - ## 参考資料 {#additional-resources} [Fabi.ai](https://www.fabi.ai) ドキュメント: https://docs.fabi.ai/introduction diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md index f708313b4a7..1034514a22e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md @@ -13,7 +13,6 @@ import hashboard_01 from '@site/static/images/integrations/data-visualization/ha import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # ClickHouse を Hashboard に接続する {#connecting-clickhouse-to-hashboard} @@ -26,15 +25,11 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; このガイドでは、Hashboard を ClickHouse インスタンスに接続する手順を順を追って説明します。この情報は Hashboard の [ClickHouse 連携ドキュメント](https://docs.hashboard.com/docs/database-connections/clickhouse) にも掲載されています。 - - ## 前提条件 {#pre-requisites} - 自前のインフラストラクチャ上、または [ClickHouse Cloud](https://clickhouse.com/) 上でホストされている ClickHouse データベース。 - [Hashboard のアカウント](https://hashboard.com/getAccess)とプロジェクト。 - - ## Hashboard を ClickHouse に接続する手順 {#steps-to-connect-hashboard-to-clickhouse} ### 1. 接続情報を収集する {#1-gather-your-connection-details} @@ -53,8 +48,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; これで ClickHouse データベースが Hashboard に接続され、[Data Models](https://docs.hashboard.com/docs/data-modeling/add-data-model)、[Explorations](https://docs.hashboard.com/docs/visualizing-data/explorations)、[Metrics](https://docs.hashboard.com/docs/metrics)、[Dashboards](https://docs.hashboard.com/docs/dashboards) の作成に進むことができます。これらの機能の詳細については、対応する Hashboard ドキュメントを参照してください。 - - ## 詳細情報 {#learn-more} 高度な機能やトラブルシューティングの詳細については、[Hashboard のドキュメント](https://docs.hashboard.com/)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md index 3d733411d5a..e28be33999e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md @@ -18,13 +18,10 @@ import luzmo_02 from '@site/static/images/integrations/data-visualization/luzmo_ import luzmo_03 from '@site/static/images/integrations/data-visualization/luzmo_03.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Luzmo と ClickHouse の連携 {#integrating-luzmo-with-clickhouse} - - ## 1. ClickHouse 接続をセットアップする {#1-setup-a-clickhouse-connection} ClickHouse への接続を行うには、**Connections ページ**に移動し、**New Connection** を選択してから、New Connection モーダルで ClickHouse を選択します。 @@ -42,8 +39,6 @@ ClickHouse への接続を行うには、**Connections ページ**に移動し API 経由で[ClickHouse への接続を作成する](https://developer.luzmo.com/api/createAccount?exampleSection=AccountCreateClickhouseRequestBody)方法については、開発者ドキュメントのサンプルを参照してください。 - - ## 2. データセットを追加する {#2-add-datasets} ClickHouse への接続が完了したら、[こちら](https://academy.luzmo.com/article/ldx3iltg)で説明されている手順に従ってデータセットを追加できます。ClickHouse 上で利用可能な 1 つまたは複数のデータセットを選択し、それらを Luzmo 内で[リンク](https://academy.luzmo.com/article/gkrx48x5)して、ダッシュボード上で一緒に利用できるようにします。[分析向けのデータ準備](https://academy.luzmo.com/article/u492qov0)に関するこの記事も必ず参照してください。 @@ -54,8 +49,6 @@ API を使用してデータセットを追加する方法については、[開 - - ## 使用上の注意事項 {#usage-notes} 1. Luzmo ClickHouse コネクタは、接続に HTTP API インターフェイス(通常はポート 8123 で稼働)を使用します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md index 3341f425784..ba73492004d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md @@ -25,7 +25,6 @@ import mitzu_10 from '@site/static/images/integrations/data-visualization/mitzu_ import mitzu_11 from '@site/static/images/integrations/data-visualization/mitzu_11.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Mitzu を ClickHouse に接続する {#connecting-mitzu-to-clickhouse} @@ -34,8 +33,6 @@ Mitzu は、ノーコードで利用できる、データウェアハウスネ しかし、これらのプラットフォームとは異なり、Mitzu では企業のプロダクト利用データを複製しません。その代わりに、企業がすでに保有しているデータウェアハウスまたはデータレイク上のデータに対して、ネイティブな SQL クエリを直接生成します。 - - ## 目的 {#goal} このガイドでは、次の内容を取り上げます。 @@ -50,38 +47,28 @@ Mitzu で使用するデータセットをお持ちでない場合は、NYC Taxi このガイドは、Mitzu の使い方についての簡単な概要にとどまります。より詳細な情報は [Mitzu のドキュメント](https://docs.mitzu.io/)を参照してください。 - - ## 1. 接続情報を取得する {#1-gather-your-connection-details} - - ## 2. Mitzu にサインインまたは新規登録する {#2-sign-in-or-sign-up-to-mitzu} まずは [https://app.mitzu.io](https://app.mitzu.io) にアクセスして新規登録します。 - - ## 3. ワークスペースを設定する {#3-configure-your-workspace} 組織を作成したら、左サイドバーにある `Set up your workspace` のオンボーディングガイドに従います。次に、`Connect Mitzu with your data warehouse` リンクをクリックします。 - - ## 4. Mitzu を ClickHouse に接続する {#4-connect-mitzu-to-clickhouse} まず、接続種別として ClickHouse を選択し、接続情報を設定します。次に、`Test connection & Save` ボタンをクリックして設定を保存します。 - - ## 5. Configure event tables {#5-configure-event-tables} 接続を保存したら、`Event tables` タブを選択し、`Add table` ボタンをクリックします。モーダルで、Mitzu に追加したいデータベースとテーブルを選択します。 @@ -103,8 +90,6 @@ Mitzu で使用するデータセットをお持ちでない場合は、NYC Taxi
すべてのテーブルの設定が完了したら、`Save & update event catalog` ボタンをクリックします。Mitzu が、上で設定したテーブルからすべてのイベントとそのプロパティを取得します。このステップには、データセットのサイズに応じて数分かかる場合があります。 - - ## 4. セグメンテーションクエリを実行する {#4-run-segmentation-queries} Mitzu でのユーザーセグメンテーションは、Amplitude、Mixpanel、PostHog と同じくらい簡単です。 @@ -120,8 +105,6 @@ Explore ページの左側にはイベント選択エリアがあり、上部セ ブレイクダウンには任意のイベントプロパティまたはユーザープロパティを使用できます(ユーザープロパティを統合する方法については以下を参照してください)。 ::: - - ## 5. ファネルクエリを実行する {#5-run-funnel-queries} ファネルには最大 9 ステップまで指定できます。ユーザーがそのファネルを完了できる時間枠を設定します。 @@ -135,8 +118,6 @@ SQL を 1 行も書かずに、コンバージョン率に関するインサイ `Funnel trends` を選択して、時間の経過に伴うファネルのトレンドを可視化します。 ::: - - ## 6. Run retention queries {#6-run-retention-queries} リテンション率を計算するために、最大 2 つのステップを選択します。測定したい期間に合わせて、リテンションウィンドウ(繰り返しウィンドウ)を選択します。 @@ -150,8 +131,6 @@ SQL を 1 行も書かずに、コンバージョン率に関するインサイ リテンション率が時間とともにどのように変化するかを可視化するには、`Weekly cohort retention` を選択します。 ::: - - ## 7. ジャーニークエリを実行する {#7-run-journey-queries} ファネルのステップを最大 9 個まで選択します。ユーザーがジャーニーを完了できる時間ウィンドウを選択します。Mitzu のジャーニーチャートは、選択したイベント間でユーザーがたどるすべてのパスを可視化してマッピングします。 @@ -164,15 +143,11 @@ SQL を 1 行も書かずに、コンバージョン率に関するインサイ
- - ## 8. 収益クエリを実行する {#8-run-revenue-queries} 収益設定を行っている場合、Mitzu は支払いイベントに基づいて合計 MRR とサブスクリプション数を算出できます。 - - ## 9. SQL ネイティブ {#9-sql-native} Mitzu は SQL ネイティブで、Explore ページで選択した設定に基づいてネイティブな SQL コードを生成します。 @@ -185,16 +160,12 @@ Mitzu は SQL ネイティブで、Explore ページで選択した設定に基 Mitzu の UI で制約にぶつかった場合は、SQL コードをコピーして BI ツールで作業を続けてください。 ::: - - ## Mitzu サポート {#mitzu-support} ご不明な点がありましたら、[support@mitzu.io](email://support@mitzu.io) までお気軽にお問い合わせください。 または、[こちら](https://join.slack.com/t/mitzu-io/shared_invite/zt-1h1ykr93a-_VtVu0XshfspFjOg6sczKg) から Slack コミュニティにご参加ください。 - - ## さらに詳しく {#learn-more} Mitzu の詳細は [mitzu.io](https://mitzu.io) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md index e8250b24b82..bca0063d6a7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md @@ -30,7 +30,6 @@ import rocketbi_17 from '@site/static/images/integrations/data-visualization/roc import rocketbi_18 from '@site/static/images/integrations/data-visualization/rocketbi_18.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 目標: Rocket.BI で最初のダッシュボードを構築する {#goal-build-your-first-dashboard-with-rocketbi} @@ -43,8 +42,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [こちらのリンクからダッシュボードを確認できます。](https://demo.rocket.bi/dashboard/sales-dashboard-7?token=7eecf750-cbde-4c53-8fa8-8b905fec667e) - - ## インストール {#install} あらかじめ用意されている Docker イメージで RocketBI を起動します。 @@ -64,7 +61,6 @@ wget https://raw.githubusercontent.com/datainsider-co/rocket-bi/main/docker/.cli ソースコードからのビルドや高度な設定については、[Rocket.BI README](https://github.com/datainsider-co/rocket-bi/blob/main/README.md) を参照してください。 - ## ダッシュボードを作成しましょう {#lets-build-the-dashboard} Dashboard では、作成したレポートを確認でき、**+New** をクリックして可視化を開始します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md index 0b520921e0b..7d0ccb54815 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md @@ -22,15 +22,12 @@ import zing_08 from '@site/static/images/integrations/data-visualization/zing_08 import zing_09 from '@site/static/images/integrations/data-visualization/zing_09.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Zing Data を ClickHouse に接続する {#connect-zing-data-to-clickhouse} Zing Data は、データ探索および可視化のためのプラットフォームです。Zing Data は、ClickHouse が提供する JS ドライバーを使用して ClickHouse に接続します。 - - ## 接続方法 {#how-to-connect} 1. 接続情報を収集します。 @@ -62,8 +59,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 6. ClickHouse データソースが追加されると、Zing の組織内の全ユーザーが **Data Sources** / **Sources** タブから利用できるようになります。 - - ## Zing Data でチャートとダッシュボードを作成する {#creating-charts-and-dashboards-in-zing-data} 1. ClickHouse データソースを追加したら、Web では **Zing App** をクリックし、モバイルではそのデータソースをタップしてチャートの作成を開始します。 @@ -93,8 +88,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained';
- - ## 関連コンテンツ {#related-content} - [ドキュメント](https://docs.getzingdata.com/docs/) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md index ab1344d9d85..1d9598fd56b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md @@ -19,7 +19,6 @@ import alias_table_config_example from '@site/static/images/integrations/data-vi import alias_table_select_example from '@site/static/images/integrations/data-visualization/grafana/alias_table_select_example.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Grafana での ClickHouse データソースの設定 {#configuring-clickhouse-data-source-in-grafana} @@ -62,7 +61,6 @@ secureJsonData: UI から構成を保存すると、`version` プロパティが追加されることに注意してください。これは、その構成を保存したプラグインのバージョンを示します。 - ### HTTP プロトコル {#http-protocol} HTTP プロトコル経由で接続する場合、追加の設定項目が表示されます。 @@ -79,7 +77,6 @@ jsonData: path: additional/path/example ``` - #### カスタム HTTP ヘッダー {#custom-http-headers} サーバーに送信されるリクエストにカスタムヘッダーを追加できます。 @@ -106,7 +103,6 @@ secureJsonData: secureHttpHeaders.X-Example-Secure-Header: セキュアヘッダー値 ``` - ## 追加設定 {#additional-settings} これらの追加設定は必須ではありません。 @@ -125,7 +121,6 @@ jsonData: validateSql: false # trueに設定すると、SQLエディタでSQLを検証します。 ``` - ### OpenTelemetry {#opentelemetry} OpenTelemetry (OTel) は、このプラグインに深く統合されています。 @@ -164,7 +159,6 @@ jsonData: messageColumn: # ログのメッセージ/内容 ``` - ### トレース {#traces} [トレース用のクエリビルダー](./query-builder.md#traces)でのクエリ作成を高速化するために、トレースクエリ用のデフォルトのデータベース/テーブルおよびカラムを設定できます。これにより、クエリビルダーに実行可能なトレース検索クエリがあらかじめ読み込まれ、Explore ページ上でのオブザーバビリティ向けのブラウジングが高速化されます。 @@ -201,7 +195,6 @@ jsonData: serviceTagsColumn: # サービスタグカラム。マップ型であることが想定されます。 ``` - ### カラムエイリアス {#column-aliases} カラムエイリアスは、データを別名や別の型として扱ってクエリするための便利な方法です。 @@ -232,7 +225,6 @@ CREATE TABLE alias_example ( 詳細については、[ALIAS](/sql-reference/statements/create/table#alias) カラム型のドキュメントを参照してください。 - #### カラムエイリアステーブル {#column-alias-tables} デフォルトでは、Grafana は `DESC table` のレスポンスに基づいてカラム候補を提示します。 @@ -277,7 +269,6 @@ INSERT INTO example_table_aliases (`alias`, `select`, `type`) VALUES これら 2 種類のエイリアスは、複雑な型変換や JSON フィールドの抽出を行うために利用できます。 - ## すべての YAML オプション {#all-yaml-options} 以下は、プラグインで利用可能なすべての YAML 設定オプションです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md index 571c07a74b4..8435a41f6a7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md @@ -22,7 +22,6 @@ import valid_ds from '@site/static/images/integrations/data-visualization/grafan import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Grafana 向け ClickHouse データソースプラグイン {#clickhouse-data-source-plugin-for-grafana} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md index d253b6e728d..977a177c699 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md @@ -20,7 +20,6 @@ import trace_id_in_logs from '@site/static/images/integrations/data-visualizatio import demo_data_links from '@site/static/images/integrations/data-visualization/grafana/demo_data_links.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # クエリビルダー {#query-builder} @@ -37,8 +36,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [Traces](#traces): トレースの検索・閲覧に最適化されています。[デフォルトを設定](./config.md#traces)した Explore ビューで使用すると最も効果的です。 - [SQL Editor](#sql-editor): クエリを完全に制御したい場合に使用できます。このモードでは、任意の SQL クエリを実行できます。 - - ## クエリタイプ {#query-types} *クエリタイプ* 設定を変更すると、作成するクエリの種類に合わせてクエリビルダーのレイアウトが変わります。 @@ -110,8 +107,6 @@ OpenTelemetry を有効にすると、スキーマバージョンに応じてカ (データセットが許容する場合は)`LIMIT` 句を `0` に設定して削除してみてください。 ::: - - | Field | Description | |----|----| | Builder Mode | Simple クエリでは Aggregate と Group By を除外し、Aggregate クエリではこれらのオプションを含めます。 | @@ -164,8 +159,6 @@ Trace クエリタイプは [data links](#data-links) をサポートします このクエリタイプでは、Trace Search モードではテーブルビューでデータがレンダリングされ、Trace ID モードではトレースパネルでレンダリングされます。 - - ## SQL エディタ {#sql-editor} クエリビルダーでは扱いきれないような複雑なクエリには、SQL エディタを使用できます。 @@ -180,8 +173,6 @@ SQL エディタは、クエリエディタ上部の「SQL Editor」を選択し - - ## データリンク {#data-links} Grafana の [data links](https://grafana.com/docs/grafana/latest/panels-visualizations/configure-data-links) @@ -220,8 +211,6 @@ Grafana の [data links](https://grafana.com/docs/grafana/latest/panels-visualiz - - ## マクロ {#macros} マクロは、クエリに動的な SQL を追加するための簡単な方法です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md index c82f58a0056..6dff4e18954 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md @@ -8,8 +8,6 @@ description: 'ClickHouse におけるデータ可視化について学ぶ' doc_type: 'guide' --- - - # ClickHouse でのデータの可視化 {#visualizing-data-in-clickhouse}
@@ -49,8 +47,6 @@ doc_type: 'guide' - [Tableau](./tableau/tableau-and-clickhouse.md) - [Zing Data](./community_integrations/zingdata-and-clickhouse.md) - - ## ClickHouse Cloud とデータ可視化ツールの互換性 {#clickhouse-cloud-compatibility-with-data-visualization-tools} | Tool | サポート方法 | テスト済み | ドキュメント | コメント | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md index 0c390d25a56..a13a721ff48 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md @@ -21,7 +21,6 @@ import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Lightdash {#lightdash} @@ -32,8 +31,6 @@ Lightdash は、dbt のオープン性と ClickHouse のパフォーマンスを このパートナーシップにより、**ClickHouse の高速性** と **Lightdash の開発者エクスペリエンス** が組み合わさり、AI を活用してインサイトを探索・可視化・自動化することがこれまでになく容易になります。 - - ## Lightdash と ClickHouse でインタラクティブなダッシュボードを構築する {#build-an-interactive-dashboard} このガイドでは、**Lightdash** を **ClickHouse** に接続して dbt モデルを探索し、インタラクティブなダッシュボードを構築する方法を説明します。 @@ -128,7 +125,6 @@ dbt プロジェクトが Lightdash に接続されて同期されると、**テ **Explore** ページは、5 つの主要な領域で構成されています。 - 1. **Dimensions and Metrics** — 選択したテーブルで利用可能なすべてのフィールドです 2. **Filters** — クエリで返されるデータを制限します 3. **Chart** — クエリ結果を可視化します @@ -196,7 +192,6 @@ AI Agents の詳細については、次を参照してください: [AI Agents - ## 詳細はこちら {#learn-more} dbt プロジェクトを Lightdash に接続する方法の詳細については、[Lightdash ドキュメント「ClickHouse セットアップ」](https://docs.lightdash.com/get-started/setup-lightdash/connect-project#clickhouse?utm_source=clickhouse&utm_medium=partner&utm_campaign=integration_docs)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md index 4531f3ce176..17c2d74a10b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md @@ -18,20 +18,15 @@ import looker_03 from '@site/static/images/integrations/data-visualization/looke import looker_04 from '@site/static/images/integrations/data-visualization/looker_04.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker {#looker} Looker は公式の ClickHouse データソースを通じて、ClickHouse Cloud またはオンプレミス環境の ClickHouse デプロイメントに接続できます。 - - ## 1. 接続情報を確認する {#1-gather-your-connection-details} - - ## 2. ClickHouse データソースを作成する {#2-create-a-clickhouse-data-source} Admin -> Database -> Connections ページに移動し、右上の「Add Connection」ボタンをクリックします。 @@ -56,8 +51,6 @@ ClickHouse Cloud を使用している場合、またはデプロイメントで これで、Looker プロジェクトに ClickHouse データソースを関連付けられるようになります。 - - ## 3. 既知の制限事項 {#3-known-limitations} 1. 次のデータ型は、デフォルトで文字列として扱われます: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md index 724c2147ea9..af13d37a969 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md @@ -23,25 +23,18 @@ import looker_studio_enable_mysql from '@site/static/images/integrations/data-vi import looker_studio_mysql_cloud from '@site/static/images/integrations/data-visualization/looker_studio_mysql_cloud.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker Studio {#looker-studio} Looker Studio は、公式の Google MySQL データソースを使用して、MySQL インターフェース経由で ClickHouse に接続できます。 - - ## ClickHouse Cloud のセットアップ {#clickhouse-cloud-setup} - - ## オンプレミス環境での ClickHouse サーバーのセットアップ {#on-premise-clickhouse-server-setup} - - ## Looker Studio を ClickHouse に接続する {#connecting-looker-studio-to-clickhouse} まず、Google アカウントで https://lookerstudio.google.com にログインし、新しい Data Source を作成します: @@ -77,8 +70,6 @@ Google が提供する公式の MySQL コネクタ(名前は **MySQL**)を これで、データの探索や新しいレポートの作成に進むことができます。 - - ## ClickHouse Cloud で Looker Studio を使用する {#using-looker-studio-with-clickhouse-cloud} ClickHouse Cloud を使用する場合は、まず MySQL インターフェイスを有効にする必要があります。接続ダイアログの「MySQL」タブで有効にできます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md index 4c1f058435e..7b430f3e752 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md @@ -24,15 +24,12 @@ import metabase_07 from '@site/static/images/integrations/data-visualization/met import metabase_08 from '@site/static/images/integrations/data-visualization/metabase_08.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Metabase を ClickHouse に接続する {#connecting-metabase-to-clickhouse} Metabase は、データに関する疑問を解消するための、使いやすいオープンソースの UI ツールです。Metabase は Java アプリケーションであり、JAR ファイルをダウンロードして `java -jar metabase.jar` で実行するだけで起動できます。Metabase は、JDBC ドライバーを使用して ClickHouse に接続します。このドライバーをダウンロードして `plugins` フォルダに配置します。 - - ## 目標 {#goal} このガイドでは、Metabase を使って ClickHouse のデータに対していくつかクエリを実行し、その結果を可視化します。可視化結果の 1 つは次のようになります: @@ -44,13 +41,9 @@ Metabase は、データに関する疑問を解消するための、使いや 作業に使えるデータセットがない場合は、サンプルデータセットのいずれかを追加できます。このガイドでは [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) データセットを使用しているので、それを選んでもよいでしょう。同じドキュメントカテゴリ内に、他にもいくつか利用できるデータセットがあります。 ::: - - ## 1. 接続情報を確認する {#1-gather-your-connection-details} - - ## 2. Metabase 用の ClickHouse プラグインをダウンロードする {#2--download-the-clickhouse-plugin-for-metabase} 1. `plugins` フォルダがない場合は、`metabase.jar` を保存している場所のサブフォルダとして `plugins` フォルダを作成します。 @@ -63,8 +56,6 @@ Metabase は、データに関する疑問を解消するための、使いや 5. http://hostname:3000 で Metabase にアクセスします。初回起動時にはウェルカム画面が表示され、いくつかの質問に順に回答する必要があります。データベースの選択を求められた場合は、「**I'll add my data later**」を選択します。 - - ## 3. Metabase を ClickHouse に接続する {#3--connect-metabase-to-clickhouse} 1. 右上の歯車アイコンをクリックし、**Admin Settings** を選択して、Metabase 管理ページに移動します。 @@ -83,8 +74,6 @@ Metabase は、データに関する疑問を解消するための、使いや 6. **Save** ボタンをクリックすると、Metabase がデータベース内のテーブルをスキャンします。 - - ## 4. SQL クエリを実行する {#4-run-a-sql-query} 1. 右上隅にある **Exit admin** ボタンをクリックして、**Admin settings** を閉じます。 @@ -97,8 +86,6 @@ Metabase は、データに関する疑問を解消するための、使いや - - ## 5. 質問を作成する {#5-ask-a-question} 1. **+ New** をクリックして、**Question** を選択します。データベースとテーブルを基点として質問を作成できることに注目してください。たとえば、次の質問は `default` データベース内の `uk_price_paid` というテーブルに対して作成されています。以下は、Greater Manchester 郡内の町ごとの平均価格を計算するシンプルな質問です。 @@ -113,8 +100,6 @@ Metabase は、データに関する疑問を解消するための、使いや - - ## 詳細はこちら {#learn-more} Metabase の詳細やダッシュボードの作成方法については、Metabase ドキュメントをご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md index 18f3be7895c..ca1f741391d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md @@ -13,21 +13,16 @@ import omni_01 from '@site/static/images/integrations/data-visualization/omni_01 import omni_02 from '@site/static/images/integrations/data-visualization/omni_02.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Omni {#omni} Omniは、公式の ClickHouse データソースを介して ClickHouse Cloud またはオンプレミス環境に接続できます。 - - ## 1. 接続情報を準備する {#1-gather-your-connection-details} - - ## 2. ClickHouse データソースを作成する {#2-create-a-clickhouse-data-source} Admin -> Connections に移動し、画面右上の「Add Connection」ボタンをクリックします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md index 8b81d557a22..0f283a23cc2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md @@ -29,7 +29,6 @@ import powerbi_dsn_credentials from '@site/static/images/integrations/data-visua import powerbi_16 from '@site/static/images/integrations/data-visualization/powerbi_16.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Power BI {#power-bi} @@ -50,8 +49,6 @@ Power BI では、Desktop 版でダッシュボードを作成し、それを Po * [Power BI Desktop での可視化のために ClickHouse からデータをクエリする](#query-and-visualise-data) * [Power BI Service 用のオンプレミス データ ゲートウェイをセットアップする](#power-bi-service) - - ## 前提条件 {#prerequisites} ### Power BI のインストール {#power-bi-installation} @@ -69,8 +66,6 @@ ClickHouse インスタンスに接続するには、次の情報が必要です * Password - ユーザーのパスワード * Database - 接続対象インスタンス上のデータベース名 - - ## Power BI desktop {#power-bi-desktop} Power BI Desktop でデータのクエリを開始するには、次の手順を実行します。 @@ -157,16 +152,12 @@ ClickHouse からデータをインポートします。 インポートが完了すると、ClickHouse のデータは他のデータと同様に Power BI から利用できるようになります。
- - ## Power BI サービス {#power-bi-service} Microsoft Power BI サービスを使用するには、[オンプレミス データ ゲートウェイ](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-onprem) を作成する必要があります。 カスタム コネクタの設定方法の詳細については、Microsoft のドキュメント「[オンプレミス データ ゲートウェイでカスタム データ コネクタを使用する](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-custom-connectors)」を参照してください。 - - ## ODBC ドライバー(インポート専用) {#odbc-driver-import-only} DirectQuery を使用する ClickHouse Connector の使用を推奨します。 @@ -235,8 +226,6 @@ Power BI Desktop の開始画面で「データの取得」をクリックしま インポートが完了すると、通常どおり Power BI から ClickHouse のデータにアクセスできるようになります。 - - ## 既知の制限事項 {#known-limitations} ### UInt64 {#uint64} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md index 16189d1e1a9..e66688557eb 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md @@ -21,15 +21,12 @@ import quicksight_06 from '@site/static/images/integrations/data-visualization/q import quicksight_07 from '@site/static/images/integrations/data-visualization/quicksight_07.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # QuickSight {#quicksight} QuickSight は、公式の MySQL データソースと Direct Query モードを使用することで、MySQL インターフェイス経由でオンプレミス環境の ClickHouse(23.11 以降)に接続できます。 - - ## オンプレミス ClickHouse サーバーのセットアップ {#on-premise-clickhouse-server-setup} MySQL インターフェイスを有効にした ClickHouse サーバーのセットアップ方法については、[公式ドキュメント](/interfaces/mysql) を参照してください。 @@ -122,7 +119,6 @@ mysql> show databases; Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. ``` - ## QuickSight を ClickHouse に接続する {#connecting-quicksight-to-clickhouse} まず [https://quicksight.aws.amazon.com](https://quicksight.aws.amazon.com) にアクセスし、Datasets セクションに移動して「New dataset」をクリックします。 @@ -163,8 +159,6 @@ UI の左下隅で「Direct Query」モードが選択されていることを これで、データセットを公開して新しいビジュアライゼーションを作成できます。 - - ## 既知の制限事項 {#known-limitations} - SPICE インポートは期待どおりに動作しません。代わりに Direct Query モードを使用してください。[#58553](https://github.com/ClickHouse/ClickHouse/issues/58553) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md index f0594684c0d..572292de687 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md @@ -21,7 +21,6 @@ import splunk_9 from '@site/static/images/integrations/splunk/splunk-9.png'; import splunk_10 from '@site/static/images/integrations/splunk/splunk-10.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Splunk を ClickHouse に接続する {#connecting-splunk-to-clickhouse} @@ -36,8 +35,6 @@ ClickHouse 向けには、[Splunk DB Connect App](https://splunkbase.splunk.com/ この連携の代表的なユースケースは、NetFlow、Avro や Protobuf のバイナリデータ、DNS、VPC フローログ、その他の OTel ログなどの大規模データソースに ClickHouse を利用し、それらを Splunk 上でチームと共有して検索やダッシュボード作成を行う場合です。このアプローチでは、データは Splunk のインデックス層に取り込まれず、[Metabase](https://www.metabase.com/) や [Superset](https://superset.apache.org/) などの他の可視化ツール連携と同様に、ClickHouse から直接クエリされます。 - - ## 目的​ {#goal} このガイドでは、ClickHouse JDBC ドライバーを使用して ClickHouse を Splunk に接続します。ローカル環境に Splunk Enterprise をインストールしますが、データのインデックス作成は行いません。その代わりに、DB Connect のクエリエンジン経由で検索機能を使用します。 @@ -50,8 +47,6 @@ ClickHouse 向けには、[Splunk DB Connect App](https://splunkbase.splunk.com/ このガイドでは [New York City Taxi データセット](/getting-started/example-datasets/nyc-taxi) を使用します。[ドキュメント](http://localhost:3000/docs/getting-started/example-datasets)には、利用できる他の多くのデータセットもあります。 ::: - - ## 前提条件 {#prerequisites} 開始する前に、次のものが必要です: @@ -61,8 +56,6 @@ ClickHouse 向けには、[Splunk DB Connect App](https://splunkbase.splunk.com/ - Splunk Enterprise を実行している OS インスタンスへの管理者権限または SSH アクセス - ClickHouse の接続情報(ClickHouse Cloud を使用している場合は[こちら](/integrations/metabase#1-gather-your-connection-details)を参照) - - ## Splunk Enterprise で DB Connect をインストールして設定する {#install-and-configure-db-connect-on-splunk-enterprise} まず、Splunk Enterprise インスタンスに Java Runtime Environment をインストールする必要があります。Docker を使用している場合は、`microdnf install java-11-openjdk` コマンドを実行します。 @@ -81,8 +74,6 @@ DB Connect App がインストールされていることを確認したら、[C - - ## ClickHouse 向けに JDBC を設定する {#configure-jdbc-for-clickhouse} [ClickHouse JDBC driver](https://github.com/ClickHouse/clickhouse-java) をダウンロードし、次のような DB Connect Drivers フォルダに配置します: @@ -111,7 +102,6 @@ DB Connect App に戻り、Configuration > Settings > Drivers に移動し - ## Splunk の検索を ClickHouse に接続する {#connect-splunk-search-to-clickhouse} DB Connect App の Configuration から Databases -> Identities に移動し、ClickHouse 用の Identity を作成します。 @@ -132,8 +122,6 @@ ClickHouse ホストの情報を入力し、"Enable SSL" にチェックが入 エラーが発生した場合は、Splunk インスタンスの IP アドレスを ClickHouse Cloud の IP Access List に追加しているか確認してください。詳細は [ドキュメント](/cloud/security/setting-ip-filters) を参照してください。 ::: - - ## SQL クエリを実行する {#run-a-sql-query} ここでは、すべてが正しく動作していることを確認するために SQL クエリを実行します。 @@ -148,8 +136,6 @@ DB Connect App の DataLab セクションにある SQL Explorer で、接続先 クエリが成功すると、結果が表示されます。 - - ## ダッシュボードを作成する {#create-a-dashboard} SQL と強力な Splunk Processing Language (SPL) を組み合わせて活用するダッシュボードを作成します。 @@ -194,7 +180,6 @@ ORDER BY year, count(*) DESC; " connection="chc" - ## 時系列データ {#time-series-data} Splunk には、ダッシュボードで時系列データの可視化や表示に利用できる組み込み関数が数百用意されています。ここでは、SQL と SPL を組み合わせて、Splunk で時系列データを扱えるクエリを作成する例を示します。 @@ -209,7 +194,6 @@ FROM "demo"."conn" WHERE time >= now() - interval 1 HOURS" connection="chc" | sort - duration: ``` - ## さらに詳しく知る {#learn-more} Splunk DB Connect およびダッシュボードの作成方法の詳細については、[Splunk ドキュメント](https://docs.splunk.com/Documentation)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md index b00bf1d0acb..1fa6f0c990f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md @@ -28,15 +28,12 @@ import superset_11 from '@site/static/images/integrations/data-visualization/sup import superset_12 from '@site/static/images/integrations/data-visualization/superset_12.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Superset を ClickHouse に接続する {#connect-superset-to-clickhouse} Apache Superset は、Python で構築されたオープンソースのデータ探索および可視化プラットフォームです。Superset は、ClickHouse が提供する Python ドライバーを使用して ClickHouse に接続します。ここでは、その仕組みを見ていきましょう。 - - ## 目標 {#goal} このガイドでは、ClickHouse データベースのデータを用いて、Superset でダッシュボードを作成します。ダッシュボードは次のようになります。 @@ -48,13 +45,9 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; まだ扱うデータセットを持っていない場合は、サンプルデータセットのいずれかを追加できます。このガイドでは [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) データセットを使用しているので、それを選択するとよいでしょう。同じドキュメントカテゴリには、他にもいくつかのデータセットが用意されています。 ::: - - ## 1. 接続情報を準備する {#1-gather-your-connection-details} - - ## 2. ドライバーをインストールする {#2-install-the-driver} 1. Superset は ClickHouse に接続するために `clickhouse-connect` ドライバーを使用します。`clickhouse-connect` の詳細は https://pypi.org/project/clickhouse-connect/ に記載されており、次のコマンドでインストールできます: @@ -65,8 +58,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 2. Superset を起動(または再起動)します。 - - ## 3. Superset を ClickHouse に接続する {#3-connect-superset-to-clickhouse} 1. Superset で、上部メニューから **Data** を選択し、ドロップダウンメニューから **Databases** を選択します。**+ Database** ボタンをクリックして新しいデータベースを追加します: @@ -89,8 +80,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 4. **CONNECT** ボタンをクリックし、続けて **FINISH** ボタンをクリックしてセットアップウィザードを完了します。完了後、データベース一覧に対象のデータベースが表示されます。 - - ## 4. データセットを追加する {#4-add-a-dataset} 1. Superset で ClickHouse のデータを操作するには、**_dataset_** を定義する必要があります。Superset の上部メニューから **Data** を選択し、ドロップダウンメニューで **Datasets** を選択します。 @@ -102,8 +91,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 3. ダイアログウィンドウの下部にある **ADD** ボタンをクリックすると、テーブルがデータセット一覧に表示されます。これでダッシュボードを作成し、ClickHouse のデータを分析する準備が整いました。 - - ## 5. Superset でチャートとダッシュボードを作成する {#5--creating-charts-and-a-dashboard-in-superset} Superset に慣れている場合は、このセクションもすぐに馴染めるはずです。Superset を初めて使う場合でも、世の中にある他の多くの優れた可視化ツールと同様に、使い始めるのに時間はかからず、細かな設定やちょっとしたコツは、ツールを使い込む中で徐々に身についていきます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md index 9940113529b..5138f41ffce 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md @@ -29,7 +29,6 @@ import tableau_workbook6 from '@site/static/images/integrations/data-visualizati import tableau_workbook7 from '@site/static/images/integrations/data-visualization/tableau_workbook7.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Tableau を ClickHouse に接続する {#connecting-tableau-to-clickhouse} @@ -43,8 +42,6 @@ ClickHouse は公式の Tableau コネクタを提供しており、 - - ## 利用開始前のセットアップ {#setup-required-prior-usage} 1. 接続情報を準備します @@ -64,8 +61,6 @@ ClickHouse は公式の Tableau コネクタを提供しており、 - Windows: `C:\Program Files\Tableau\Drivers` 5. Tableau で ClickHouse のデータソースを設定し、データの可視化を開始します。 - - ## Tableau で ClickHouse データソースを構成する {#configure-a-clickhouse-data-source-in-tableau} `clickhouse-jdbc` ドライバーのインストールと設定が完了したので、ClickHouse の **TPCD** データベースに接続する @@ -127,8 +122,6 @@ ClickHouse Cloud を利用する場合は、安全な接続のために SSL の これで Tableau で可視化を作成する準備が整いました。 - - ## Tableau での可視化の作成 {#building-visualizations-in-tableau} Tableau で ClickHouse のデータソースを構成できたので、さっそくデータを可視化してみましょう。 @@ -187,8 +180,6 @@ TCPD データのシミュレートされた注文には大きな変動がない これで完了です。Tableau を ClickHouse に正常に接続できました。これにより、ClickHouse データを分析および可視化するための 可能性が大きく広がりました。 - - ## コネクタを手動でインストールする {#install-the-connector-manually} デフォルトでコネクタが含まれていない古いバージョンの Tableau Desktop を使用している場合は、次の手順で手動インストールできます。 @@ -199,18 +190,12 @@ TCPD データのシミュレートされた注文には大きな変動がない * Windows: `C:\Users\[Windows User]\Documents\My Tableau Repository\Connectors` 3. Tableau Desktop を再起動します。インストールが正常に完了していれば、「New Data Source」セクションにコネクタが表示されます。 - - ## 接続と分析に関するヒント {#connection-and-analysis-tips} Tableau と ClickHouse の統合を最適化するための、より詳しい説明やベストプラクティスについては、[接続のヒント](/integrations/tableau/connection-tips) および [分析のヒント](/integrations/tableau/analysis-tips) を参照してください。 - - ## テスト {#tests} このコネクタは [TDVT フレームワーク](https://tableau.github.io/connector-plugin-sdk/docs/tdvt) でテストされており、現在テストカバレッジ 97% を維持しています。 - - ## 概要 {#summary} Tableau を ClickHouse に接続するには、汎用の ODBC/JDBC 用 ClickHouse ドライバーを使用できます。ただし、このコネクターを使用すると、接続設定の手順を簡略化できます。コネクターに関して問題が発生した場合は、GitHub でお気軽にお問い合わせください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md index 03328e469da..7b5bfcf0d97 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md @@ -11,13 +11,10 @@ doc_type: 'guide' import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 接続に関するヒント {#connection-tips} - - ## 初期 SQL タブ {#initial-sql-tab} [詳細設定] タブで *Set Session ID* チェックボックスが有効になっている場合(デフォルト)、次を使用してセッションレベルの [設定](/operations/settings/settings/) を行うことができます。 @@ -26,7 +23,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; SET my_setting=value; ``` - ## 詳細タブ {#advanced-tab} 99% のケースでは詳細タブを使用する必要はありませんが、残りの 1% では次の設定を使用できます: @@ -38,8 +34,6 @@ SET my_setting=value; ``` マッピングについての詳細は、該当するセクションを参照してください。 - - * **JDBC Driver URL Parameters**。このフィールドには、`jdbcCompliance` などの残りの[ドライバーパラメータ](https://github.com/ClickHouse/clickhouse-jdbc#configuration)を指定できます。パラメータ値は URL エンコード形式で渡す必要がある点に注意してください。また、このフィールドと Advanced タブ内の前のフィールドの両方で `custom_http_params` や `typeMappings` を指定した場合は、Advanced タブ側の前の 2 つのフィールドで指定した値が優先されます。 * **Set Session ID** チェックボックス。Initial SQL タブでセッションレベルの設定を行うために必要であり、`"tableau-jdbc-connector-*{timestamp}*-*{number}*"` という形式で、タイムスタンプと疑似乱数を含む `session_id` を生成します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md index c80cd34a224..a2cac24c0d8 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md @@ -21,23 +21,16 @@ import tableau_desktop_03 from '@site/static/images/integrations/data-visualizat import tableau_desktop_04 from '@site/static/images/integrations/data-visualization/tableau_desktop_04.png'; import tableau_desktop_05 from '@site/static/images/integrations/data-visualization/tableau_desktop_05.png'; - # Tableau Online {#tableau-online} Tableau Online は、公式の MySQL データソースを利用し、MySQL インターフェース経由で ClickHouse Cloud またはオンプレミス環境の ClickHouse に接続できます。 - - ## ClickHouse Cloud のセットアップ {#clickhouse-cloud-setup} - - ## オンプレミスの ClickHouse サーバーのセットアップ {#on-premise-clickhouse-server-setup} - - ## Tableau Online から ClickHouse(オンプレミス・SSL なし)への接続 {#connecting-tableau-online-to-clickhouse-on-premise-without-ssl} Tableau Cloud サイトにログインし、新しい Published Data Source(公開データソース)を追加します。 @@ -64,8 +57,6 @@ Tableau Online がデータベースをスキャンし、利用可能なテー 注: Tableau Online と Tableau Desktop を併用し、それらの間で ClickHouse のデータセットを共有したい場合は、Tableau Desktop でもデフォルトの MySQL コネクタを使用してください。Data Source ドロップダウンから MySQL を選択した際に表示されるセットアップガイドに従って構成します(ガイドは [こちら](https://www.tableau.com/support/drivers) にあります)。M1 Mac を使用している場合は、ドライバーインストールの回避策について、この [トラブルシューティングスレッド](https://community.tableau.com/s/question/0D58b0000Ar6OhvCQE/unable-to-install-mysql-driver-for-m1-mac) を参照してください。 - - ## Tableau Online を ClickHouse に接続する(SSL を用いたクラウドまたはオンプレミス環境) {#connecting-tableau-online-to-clickhouse-cloud-or-on-premise-setup-with-ssl} Tableau Online の MySQL 接続セットアップウィザードでは SSL 証明書を指定できないため、 @@ -108,8 +99,6 @@ ClickHouse Cloud インスタンス用の MySQL ユーザー認証情報と、 最後に「Publish」をクリックすると、認証情報が埋め込まれたデータソースが自動的に Tableau Online で開かれます。 - - ## 既知の制限事項(ClickHouse 23.11) {#known-limitations-clickhouse-2311} 既知の制限事項はすべて ClickHouse `23.11` で修正されています。その他の非互換性が発生した場合は、[お問い合わせ](https://clickhouse.com/company/contact)いただくか、[新しい issue](https://github.com/ClickHouse/ClickHouse/issues) を作成してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md index 887b3ae0530..ea7229f80f1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md @@ -55,7 +55,6 @@ Install-Package ClickHouse.Driver *** - ## クイックスタート {#quick-start} ```csharp @@ -83,7 +82,6 @@ using (var connection = new ClickHouseConnection("Host=my.clickhouse")) *** - ## 使用方法 {#usage} ### 接続文字列パラメータ {#connection-string} @@ -154,7 +152,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### データの挿入 {#inserting-data} パラメータ化されたクエリを使用してデータを挿入します。 @@ -178,7 +175,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### 一括挿入 {#bulk-insert} `ClickHouseBulkCopy` を使用するには、次のものが必要です: @@ -221,7 +217,6 @@ Console.WriteLine($"Rows written: {bulkCopy.RowsWritten}"); *** - ### SELECT クエリの実行 {#performing-select-queries} SELECT クエリを実行して結果を処理します。 @@ -249,7 +244,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### 生データストリーミング {#raw-streaming} ```csharp @@ -263,7 +257,6 @@ var json = reader.ReadToEnd(); *** - ### ネストされたカラムのサポート {#nested-columns} ClickHouse のネスト型(`Nested(...)`)は、配列と同様のセマンティクスで読み書きできます。 @@ -289,7 +282,6 @@ await bulkCopy.WriteToServerAsync(new[] { row1, row2 }); *** - ### AggregateFunction 列 {#aggregatefunction-columns} `AggregateFunction(...)` 型の列は、直接クエリしたりデータを挿入したりすることはできません。 @@ -308,7 +300,6 @@ SELECT uniqMerge(c) FROM t; *** - ### SQL パラメータ {#sql-parameters} クエリにパラメータを渡すには、次の形式で ClickHouse のパラメータ書式を使用する必要があります。 @@ -339,7 +330,6 @@ INSERT INTO table VALUES ({val1:Int32}, {val2:Array(UInt8)}) *** - ## サポートされているデータ型 {#supported-data-types} `ClickHouse.Driver` は、次の ClickHouse のデータ型を、それぞれ対応する .NET 型にマッピングしてサポートします。 @@ -455,7 +445,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### appsettings.json の使用 {#logging-appsettings-config} 標準的な .NET の構成機能を使用してログレベルを設定できます。 @@ -486,7 +475,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### インメモリ設定を使用する {#logging-inmemory-config} コード内でカテゴリごとにログ出力の詳細度を設定することもできます。 @@ -523,7 +511,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - ### カテゴリと出力元 {#logging-categories} このドライバーは専用のカテゴリを使用しており、コンポーネントごとにログレベルをきめ細かく調整できます。 @@ -558,7 +545,6 @@ await connection.OpenAsync(); * 接続のオープン/クローズ イベント * セッション ID の追跡 - ### デバッグモード: ネットワークトレースと診断 {#logging-debugmode} ネットワークに関する問題の診断を支援するために、ドライバーライブラリには .NET のネットワーク内部処理を低レベルでトレースできるヘルパー機能が含まれています。これを有効にするには、ログレベルを Trace に設定した LoggerFactory を渡し、EnableDebugMode を true に設定する必要があります(または `ClickHouse.Driver.Diagnostic.TraceHelper` クラスを使用して手動で有効化します)。警告: これは非常に冗長なログを大量に生成し、パフォーマンスに影響します。本番環境でデバッグモードを有効にすることは推奨されません。 @@ -580,7 +566,6 @@ var settings = new ClickHouseClientSettings() *** - ### ORM & Dapper サポート {#orm-support} `ClickHouse.Driver` は Dapper(いくつかの制限付きで)をサポートします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md index b39e5da02b3..c908d7cc42c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md @@ -13,7 +13,6 @@ integration: import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; - # ClickHouse Go {#clickhouse-go} ## 簡単な例 {#a-simple-example} @@ -32,7 +31,6 @@ cd clickhouse-golang-example go mod init clickhouse-golang-example ``` - ### サンプルコードをコピーする {#copy-in-some-sample-code} このコードを `clickhouse-golang-example` ディレクトリに `main.go` として保存します。 @@ -113,14 +111,12 @@ func connect() (driver.Conn, error) { } ``` - ### go mod tidy を実行する {#run-go-mod-tidy} ```bash go mod tidy ``` - ### 接続情報を設定する {#set-your-connection-details} 先ほど接続情報を確認しました。その値を `main.go` の `connect()` 関数内で設定します。 @@ -141,7 +137,6 @@ func connect() (driver.Conn, error) { }, ``` - ### サンプルを実行する {#run-the-example} ```bash @@ -156,7 +151,6 @@ go run . 2023/03/06 14:18:33 name: hourly_data, uuid: a4e36bd4-1e82-45b3-be77-74a0fe65c52b ``` - ### さらに詳しく {#learn-more} このカテゴリの他のドキュメントでは、ClickHouse Go クライアントの詳細について説明します。 @@ -248,7 +242,6 @@ go run main.go ``` - ### バージョニングと互換性 {#versioning--compatibility} このクライアントは ClickHouse とは独立してリリースされます。2.x は現在開発中のメジャーバージョンを表します。2.x 系のすべてのバージョンは互いに互換性があるように設計されています。 @@ -297,7 +290,6 @@ fmt.Println(v) **以降のすべてのサンプルでは、特に明記がない限り、ClickHouse の `conn` 変数はすでに作成済みで利用可能であるものとします。** - #### 接続設定 {#connection-settings} 接続を開く際、`Options` 構造体を使用してクライアントの動作を制御できます。利用可能な設定は次のとおりです。 @@ -355,7 +347,6 @@ if err != nil { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/connect_settings.go) - #### コネクションプーリング {#connection-pooling} クライアントはコネクションプールを保持し、必要に応じてクエリ間でコネクションを再利用します。任意の時点で使用されるコネクション数は最大で `MaxOpenConns` までであり、プールの最大サイズは `MaxIdleConns` によって制御されます。クライアントは各クエリ実行時にプールからコネクションを取得し、実行後は再利用のためにプールへ戻します。1つのバッチの存続期間中は同じコネクションが使用され、`Send()` の呼び出し時に解放されます。 @@ -433,7 +424,6 @@ v, err := conn.ServerVersion() 追加の TLS パラメータが必要な場合は、アプリケーションコード側で `tls.Config` 構造体の該当フィールドを設定する必要があります。これには、特定の暗号スイートの指定、特定の TLS バージョン (1.2 や 1.3 など) の強制、内部 CA 証明書チェーンの追加、ClickHouse サーバーによって要求される場合のクライアント証明書 (および秘密鍵) の追加など、より高度なセキュリティ構成で利用されるほとんどのオプションが含まれます。 - ### 認証 {#authentication} 接続設定で `Auth` 構造体を指定し、ユーザー名とパスワードを設定します。 @@ -456,7 +446,6 @@ v, err := conn.ServerVersion() [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/auth.go) - ### 複数ノードへの接続 {#connecting-to-multiple-nodes} 複数のアドレスを `Addr` 構造体で指定できます。 @@ -510,7 +499,6 @@ if err != nil { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/1c0d81d0b1388dbb9e09209e535667df212f4ae4/examples/clickhouse_api/multi_host.go#L50-L67) - ### 実行 {#execution} 任意のステートメントは `Exec` メソッドで実行できます。これは DDL や簡単なステートメントを実行する場合に便利です。大量データの挿入やクエリの反復実行には使用すべきではありません。 @@ -533,7 +521,6 @@ conn.Exec(context.Background(), "INSERT INTO example VALUES (1, 'test-1')") クエリに `Context` を渡せることに注意してください。これは、クエリごとの特定の設定を渡すために使用できます。詳しくは [Using Context](#using-context) を参照してください。 - ### バッチ挿入 {#batch-insert} 多数の行を挿入するには、クライアントはバッチ挿入用のセマンティクスを提供します。そのためには、行を追加していくためのバッチを事前に用意する必要があります。最終的にこのバッチは `Send()` メソッド経由で送信されます。バッチは `Send` が実行されるまでメモリ上に保持されます。 @@ -626,7 +613,6 @@ return batch.Send() 各カラム型ごとにサポートされる Go 型の一覧については、[型変換](#type-conversions) を参照してください。 - ### 行のクエリ実行 {#querying-rows} ユーザーは、`QueryRow` メソッドを使用して 1 行だけを取得するか、`Query` を使用して結果セットを反復処理するためのカーソルを取得できます。前者はシリアライズ結果の格納先を引数として受け取りますが、後者では各行に対して `Scan` を呼び出す必要があります。 @@ -677,7 +663,6 @@ return rows.Err() 最後に、`Query` および `QueryRow` メソッドに `Context` を渡すことができる点に注意してください。これはクエリレベルの設定に利用できます。詳細については [Using Context](#using-context) を参照してください。 - ### 非同期挿入 {#async-insert} 非同期挿入は Async メソッドで利用できます。これにより、クライアントがサーバー側で挿入処理の完了を待機するか、データが受信された時点で応答を返すかを指定できます。これは実質的にパラメータ [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert) の挙動を制御します。 @@ -717,7 +702,6 @@ for i := 0; i < 100; i++ { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/async.go) - ### カラム単位の挿入 {#columnar-insert} データはカラム形式で挿入できます。データがすでにこの構造で用意されている場合、行形式への変換が不要になるため、パフォーマンス上の利点が得られることがあります。 @@ -759,7 +743,6 @@ return batch.Send() [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/columnar_insert.go) - ### struct の使用 {#using-structs} ユーザーにとって、Go 言語の struct は ClickHouse における 1 行分のデータを論理的に表現する手段となります。これを支援するために、ネイティブインターフェイスはいくつかの便利な関数を提供しています。 @@ -786,7 +769,6 @@ for _, v := range result { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/select_struct.go) - #### 構造体のスキャン {#scan-struct} `ScanStruct` を使用すると、クエリ結果の単一の Row を構造体にマッピングできます。 @@ -803,7 +785,6 @@ if err := conn.QueryRow(context.Background(), "SELECT Col1, COUNT() AS count FRO [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/scan_struct.go) - #### Append struct {#append-struct} `AppendStruct` を使用すると、既存の[バッチ](#batch-insert)に構造体を追加し、それを 1 行分の完全なレコードとして解釈できます。これには、構造体の列がテーブルの列と名前・型の両方で一致している必要があります。すべての列に対応する構造体フィールドが存在している必要がありますが、一部の構造体フィールドには対応する列が存在しない場合があります。そのようなフィールドは単に無視されます。 @@ -831,7 +812,6 @@ for i := 0; i < 1_000; i++ { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/append_struct.go) - ### 型変換 {#type-conversions} このクライアントは、挿入およびレスポンスのマーシャリングの両方において、受け付ける変数の型に関して可能な限り柔軟であることを目指しています。ほとんどの場合、ClickHouse のカラム型には対応する Go 言語の型が存在します。例えば、[UInt64](/sql-reference/data-types/int-uint/) に対する [uint64](https://pkg.go.dev/builtin#uint64) などです。これらの論理的なマッピングは常にサポートされるべきです。ユーザーは、変数または受信データの変換が先に行われるのであれば、カラムへの挿入やレスポンスの受け取りに利用できる別の型を使用したい場合があります。クライアントは、ユーザーが挿入前にデータを厳密に変換する必要がないように、またクエリ実行時に柔軟なマーシャリングを提供できるように、これらの変換を透過的にサポートすることを目指しています。この透過的な変換では精度の損失は許可されません。例えば、`uint32` を使用して `UInt64` カラムからデータを受け取ることはできません。一方、フォーマット要件を満たしている限り、`string` を `datetime64` フィールドに挿入することは可能です。 @@ -900,7 +880,6 @@ rows.Close() [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/array.go) - #### Map {#map} Map 型の値は、キーと値が[前述](#type-conversions)の型ルールに従う Golang の map として挿入します。 @@ -946,7 +925,6 @@ rows.Close() [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/map.go) - #### Tuples {#tuples} Tuple は任意の長さのカラムのグループを表します。各カラムは明示的に名前を付けることも、型だけを指定することもできます(例: )。 @@ -1008,7 +986,6 @@ fmt.Printf("row: col1=%v, col2=%v, col3=%v\n", col1, col2, col3) 注意: 名前付きタプル内のサブカラムがすべて同じ型である場合、型付きスライスおよびマップがサポートされます。 - #### Nested {#nested} Nested フィールドは、名前付き Tuple の配列に相当します。利用方法は、ユーザーが [flatten_nested](/operations/settings/settings#flatten_nested) を 1 にしているか 0 にしているかによって異なります。 @@ -1117,7 +1094,6 @@ rows.Close() `flatten_nested` のデフォルト値 1 を使用すると、ネストされたカラムは個別の配列に展開されます。これには、挿入および取得時にネストしたスライスを使用する必要があります。任意の深さのネストも動作する可能性はありますが、これは公式にはサポートされていません。 - ```go conn, err := GetNativeConnection(nil, nil, nil) if err != nil { @@ -1187,7 +1163,6 @@ if err := batch.Send(); err != nil { よりシンプルなインターフェースとネストに対する公式なサポートがあるため、`flatten_nested=0` の使用を推奨します。 - #### Geo 型 {#geo-types} クライアントは Geo 型である Point、Ring、Polygon、Multi Polygon をサポートしています。これらのフィールドは、Go 言語ではパッケージ [github.com/paulmach/orb](https://github.com/paulmach/orb) を使用して表現されます。 @@ -1271,7 +1246,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&point, &ring, &polygo [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/geo.go) - #### UUID {#uuid} UUID 型は [github.com/google/uuid](https://github.com/google/uuid) パッケージでサポートされています。また、UUID は文字列、または `sql.Scanner` もしくは `Stringify` を実装する任意の型として送信およびマーシャリングすることもできます。 @@ -1317,7 +1291,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2); err != [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/uuid.go) - #### Decimal {#decimal} Decimal 型は [github.com/shopspring/decimal](https://github.com/shopspring/decimal) パッケージによってサポートされています。 @@ -1371,7 +1344,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v\n", col1, col2, col3, co [完全なサンプルコード](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/decimal.go) - #### Nullable {#nullable} `Nil` の Go 値は ClickHouse の `NULL` を表します。これはフィールドが `Nullable` として宣言されている場合に使用できます。挿入時には、非 Nullable のカラムと Nullable のカラムの両方に対して `Nil` を渡すことができます。前者の場合、その型のデフォルト値が永続化されます(例: `string` 型であれば空文字列)。後者の Nullable カラムの場合は、ClickHouse に `NULL` 値が保存されます。 @@ -1426,7 +1398,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, & クライアントはこれに加えて、`sql.Null*` 型(例: `sql.NullInt64`)もサポートしています。これらは対応する ClickHouse の型と互換性があります。 - #### ビッグ整数 - Int128, Int256, UInt128, UInt256 {#big-ints---int128-int256-uint128-uint256} 64 ビットを超える数値型は、Go 標準の [big](https://pkg.go.dev/math/big) パッケージで表現されます。 @@ -1497,7 +1468,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v, col6=%v, col7=%v\n", co [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/big_int.go) - ### 圧縮 {#compression} サポートされる圧縮方式は、使用する下位プロトコルに依存します。ネイティブプロトコルの場合、クライアントは `LZ4` と `ZSTD` 圧縮をサポートします。圧縮はブロックレベルでのみ行われます。接続に `Compression` 設定を含めることで圧縮を有効にできます。 @@ -1547,7 +1517,6 @@ if err := batch.Send(); err != nil { 標準インターフェースを HTTP 経由で使用する場合は、追加の圧縮方式を利用できます。詳細は [database/sql API - Compression](#compression) を参照してください。 - ### パラメータバインディング {#parameter-binding} クライアントは `Exec`、`Query`、`QueryRow` メソッドに対してパラメータバインディングをサポートします。次の例のように、名前付きパラメータ、番号付きパラメータ、位置パラメータを利用できます。以下でそれぞれの例を示します。 @@ -1576,7 +1545,6 @@ fmt.Printf("名前付きバインドのカウント: %d\n", count) [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind.go) - #### 特殊なケース {#special-cases} デフォルトでは、スライスをクエリのパラメータとして渡した場合、値のカンマ区切りリストに展開されます。角括弧 `[ ]` で囲まれた値の集合として埋め込みたい場合は、`ArraySet` を使用する必要があります。 @@ -1616,7 +1584,6 @@ fmt.Printf("NamedDate のカウント: %d\n", count) [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind_special.go) - ### コンテキストの利用 {#using-context} Go の context は、期限(デッドライン)、キャンセルシグナル、その他のリクエストスコープの値を API 境界をまたいで受け渡す手段を提供します。コネクションのすべてのメソッドは、最初の引数として context を受け取ります。前の例では `context.Background()` を使用していましたが、この仕組みを利用して設定やデッドラインを渡したり、クエリをキャンセルしたりできます。 @@ -1717,7 +1684,6 @@ for i := 1; i <= 6; i++ { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/context.go) - ### 進捗 / プロファイル / ログ情報 {#progressprofilelog-information} クエリに対して、Progress、Profile、Log の情報を要求できます。Progress 情報は、ClickHouse 内で読み取りおよび処理された行数とバイト数に関する統計を報告します。一方、Profile 情報はクライアントに返されたデータの概要を提供し、(非圧縮の)バイト数、行数、およびブロック数の合計を含みます。最後に、Log 情報は、メモリ使用量やデータ処理速度などのスレッドに関する統計を提供します。 @@ -1749,7 +1715,6 @@ rows.Close() [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/progress.go) - ### 動的スキャン {#dynamic-scanning} 返されるフィールドのスキーマや型が分からないテーブルを読み取る必要がある場合があります。これは、アドホックなデータ分析を行う場合や、汎用的なツールを作成する場合によくあります。そのため、クエリのレスポンスには列の型情報が含まれています。これを Go のリフレクションと組み合わせることで、実行時に正しい型の変数インスタンスを生成し、それらを Scan に渡すことができます。 @@ -1788,7 +1753,6 @@ for rows.Next() { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/dynamic_scan_types.go) - ### 外部テーブル {#external-tables} [外部テーブル](/engines/table-engines/special/external-data/) を使用すると、クライアントは SELECT クエリとともにデータを ClickHouse に送信できます。このデータは一時テーブルに格納され、評価のためにクエリ自体の中で使用できます。 @@ -1855,7 +1819,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/external_data.go) - ### OpenTelemetry {#open-telemetry} ClickHouse では、ネイティブプロトコルの一部として [トレースコンテキスト](/operations/opentelemetry/) を渡せます。クライアントは、関数 `clickhouse.withSpan` を使用して Span を作成し、これを Context 経由で渡すことで、この機能を利用できます。 @@ -1878,7 +1841,6 @@ fmt.Printf("count: %d\n", count) トレーシングの活用方法の詳細については、[OpenTelemetry サポート](/operations/opentelemetry/)をご覧ください。 - ## Database/SQL API {#databasesql-api} `database/sql` や「標準」API は、標準インターフェイスに従うことで、アプリケーションコードを基盤となるデータベースに依存させずにクライアントを利用できるようにします。これは、追加の抽象化レイヤーや間接参照、さらに ClickHouse と必ずしも整合しないプリミティブを導入するというコストを伴います。しかし、ツールが複数のデータベースへ接続する必要があるようなシナリオでは、これらのコストは通常は許容可能です。 @@ -1927,7 +1889,6 @@ func ConnectDSN() error { **以降のすべてのサンプルでは、特に断りのない限り、ClickHouse の `conn` 変数はすでに作成されており、利用可能であるものとします。** - #### 接続設定 {#connection-settings-1} 以下のパラメータを DSN 文字列で指定できます: @@ -1966,7 +1927,6 @@ func ConnectSettings() error { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_settings.go) - #### 接続プーリング {#connection-pooling-1} ユーザーは、[複数ノードへの接続](#connecting-to-multiple-nodes)で説明されているように、提供されたノードアドレス一覧の使われ方を制御できます。ただし、接続管理およびプーリングは、設計上 `sql.DB` に委任されています。 @@ -2008,7 +1968,6 @@ func ConnectDSNHTTP() error { [完全なサンプルコード](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_http.go) - #### 複数ノードへの接続 {#connecting-to-multiple-nodes-1} `OpenDB` を使用する場合は、ClickHouse API と同じオプション指定方法で複数のホストに接続し、必要に応じて `ConnOpenStrategy` を指定します。 @@ -2056,7 +2015,6 @@ func MultiStdHostDSN() error { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/multi_host.go) - ### TLS の使用 {#using-tls-1} DSN 接続文字列を使用する場合は、パラメータ `secure=true` によって SSL を有効化できます。`OpenDB` メソッドは、非 nil の TLS struct を指定するという点で、[TLS 用のネイティブ API](#using-tls) と同じアプローチを取ります。DSN 接続文字列では SSL 検証をスキップするためのパラメータ `skip_verify` がサポートされていますが、より高度な TLS 設定を行うには、設定を渡すことができる `OpenDB` メソッドを使用する必要があります。 @@ -2110,7 +2068,6 @@ func ConnectDSNSSL() error { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/ssl.go) - ### 認証 {#authentication-1} `OpenDB` を使用する場合は、通常どおりオプションで認証情報を渡すことができます。DSN ベースの接続の場合、ユーザー名とパスワードは接続文字列内で指定できます。パラメーターとして渡すか、アドレスに資格情報としてエンコードして含めることができます。 @@ -2151,7 +2108,6 @@ func ConnectDSNAuth() error { [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/auth.go) - ### 実行 {#execution-1} 接続を取得したら、ユーザーは Exec メソッドで `sql` ステートメントを実行できます。 @@ -2174,7 +2130,6 @@ _, err = conn.Exec("INSERT INTO example VALUES (1, 'test-1')") このメソッドは context の受け取りをサポートしていません。デフォルトでは background context で実行されます。必要な場合は `ExecContext` を使用してください。詳しくは [Using Context](#using-context) を参照してください。 - ### バッチ挿入 {#batch-insert-1} バッチ挿入は、`Being` メソッドで `sql.Tx` を作成することで実現できます。そこから、`INSERT` 文を指定して `Prepare` メソッドを呼び出すことでバッチを取得できます。これにより `sql.Stmt` が返され、`Exec` メソッドを使って行を追加していくことができます。バッチは、元の `sql.Tx` に対して `Commit` が実行されるまでメモリ上に蓄積されます。 @@ -2209,7 +2164,6 @@ return scope.Commit() [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/batch.go) - ### 行のクエリ実行 {#querying-rows-1} 単一行のクエリは `QueryRow` メソッドを使って実行できます。これは *sql.Row を返し、その上で `Scan` をポインタを渡した変数に対して呼び出すことで、列の値をそれらの変数に詰め替えることができます。`QueryRowContext` バリアントを使用すると、バックグラウンド以外の context を渡すことができます。詳しくは [Using Context](#using-context) を参照してください。 @@ -2256,7 +2210,6 @@ for rows.Next() { [完全なコード例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/query_rows.go) - ### 非同期 Insert {#async-insert-1} 非同期 insert は、`ExecContext` メソッドで insert を実行することで実現できます。その際、以下の例のように非同期モードを有効にした context を渡す必要があります。これにより、クライアントがサーバーによる insert の完了を待つか、データが受信された時点で応答するかをユーザーが指定できるようになります。これは実質的にパラメータ [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert) を制御します。 @@ -2288,7 +2241,6 @@ ctx := clickhouse.Context(context.Background(), clickhouse.WithStdAsync(false)) [完全な例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/async.go) - ### 列指向挿入 {#columnar-insert-1} 標準インターフェースではサポートされていません。 @@ -2352,7 +2304,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v", col1, col2, col3, col4 Insert の動作は ClickHouse API と同じです。 - ### 圧縮 {#compression-1} 標準 API は、ネイティブの [ClickHouse API](#compression) と同じ圧縮アルゴリズムをサポートしており、ブロック単位の `lz4` および `zstd` 圧縮を利用できます。さらに、HTTP 接続では gzip、deflate、br 圧縮もサポートされます。これらのいずれかが有効になっている場合、挿入時およびクエリレスポンスに対してブロック単位で圧縮が行われます。その他のリクエスト(例:ping リクエストやクエリの送信)は非圧縮のままです。これは `lz4` および `zstd` オプションの挙動と一貫しています。 @@ -2390,7 +2341,6 @@ conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&passwo * `br` - `0` (最高速) から `11` (最高圧縮) * `zstd`, `lz4` - 無視されます - ### パラメータバインディング {#parameter-binding-1} 標準 API では [ClickHouse API](#parameter-binding) と同じパラメータバインディング機能がサポートされており、`Exec`、`Query`、`QueryRow` メソッド(およびそれらに対応する [Context](#using-context) 版)にパラメータを渡すことができます。位置指定パラメータ、名前付きパラメータ、および番号付きパラメータがサポートされています。 @@ -2421,7 +2371,6 @@ fmt.Printf("名前付きバインドのカウント: %d\n", count) なお、[特殊ケース](#special-cases) は引き続き有効です。 - ### コンテキストの使用 {#using-context-1} 標準 API は、[ClickHouse API](#using-context) と同様に、コンテキストを介して期限、キャンセルシグナル、その他のリクエストスコープの値を渡す機能をサポートします。ClickHouse API と異なり、これはメソッドの `Context` 付きバリアントを使用することで実現されます。つまり、デフォルトではバックグラウンドコンテキストを使用する `Exec` のようなメソッドには、最初の引数としてコンテキストを渡せる `ExecContext` というバリアントが用意されています。これにより、アプリケーションフローの任意の段階でコンテキストを渡すことができます。例えば、`ConnContext` を使用して接続を確立する際や、`QueryRowContext` を使用してクエリの行を取得する際にコンテキストを渡すことができます。利用可能なすべてのメソッドの例を以下に示します。 @@ -2509,7 +2458,6 @@ for rows.Next() { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/context.go) - ### セッション {#sessions} ネイティブ接続では暗黙的にセッションが存在しますが、HTTP 経由の接続では、コンテキストを設定として渡すためにユーザーがセッション ID を作成する必要があります。これにより、セッションに紐づく一時テーブルなどの機能を利用できるようになります。 @@ -2571,7 +2519,6 @@ for rows.Next() { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/session.go) - ### 動的スキャン {#dynamic-scanning-1} [ClickHouse API](#dynamic-scanning) と同様に、カラム型の情報を利用できるため、ユーザーは実行時に正しい型の変数のインスタンスを作成し、それを Scan に渡すことができます。これにより、事前に型が分からないカラムでも読み取ることが可能になります。 @@ -2611,7 +2558,6 @@ for rows.Next() { [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/dynamic_scan_types.go) - ### 外部テーブル {#external-tables-1} [外部テーブル](/engines/table-engines/special/external-data/) を使用すると、クライアントは `SELECT` クエリと一緒にデータを ClickHouse に送信できます。このデータは一時テーブルに格納され、クエリ内で評価に利用できます。 @@ -2678,7 +2624,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/external_data.go) - ### OpenTelemetry {#open-telemetry-1} ClickHouse では、ネイティブプロトコルの一部として [trace context](/operations/opentelemetry/) を渡すことが可能です。クライアントは、関数 `clickhouse.withSpan` を使用して Span を作成し、Context 経由で渡すことでこれを実現できます。この機能は、HTTP をトランスポートとして使用している場合にはサポートされません。 @@ -2699,7 +2644,6 @@ fmt.Printf("count: %d\n", count) [完全なサンプル](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/open_telemetry.go) - ## パフォーマンスのヒント {#performance-tips} * 可能な限り、特にプリミティブ型については ClickHouse API を利用してください。これにより、大きなオーバーヘッドを伴うリフレクションや間接参照を避けられます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx index 5b1a5dc04f8..5298dd622a6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx @@ -128,50 +128,50 @@ SSL 認証は、本番環境ではトラブルシューティングが難しい | `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | * `timeout` - ある時間単位で指定されたタイムアウト値。
- `unit` - `timeout` の時間単位 | HTTP 接続における Keep-Alive のタイムアウトを設定します。タイムアウトを `0` に設定することで、Keep-Alive を無効化することができます。

デフォルト: -
列挙型: `ClientConfigProperties.HTTP_KEEP_ALIVE_TIMEOUT`
キー: `http_keep_alive_timeout` | | `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | - `strategy` - enum `com.clickhouse.client.api.ConnectionReuseStrategy` の定数値 | 接続プールが使用する再利用戦略を選択します。`LIFO` を選択すると、接続がプールに返却されるとすぐに再利用されます。`FIFO` を選択すると、利用可能になった順番で接続を使用します(返却された接続は直ちには再利用されません)。

デフォルト: `FIFO`
Enum: `ClientConfigProperties.CONNECTION_REUSE_STRATEGY`
Key: `connection_reuse_strategy` | | `setSocketTimeout(long timeout, ChronoUnit unit`)` | *`timeout`- タイムアウト値。ある時間単位で指定します。
-`unit`-`timeout`の時間単位 | ソケットの読み取りおよび書き込み処理に影響するタイムアウトを設定します

デフォルト:`0`
列挙型:`ClientConfigProperties.SOCKET_OPERATION_TIMEOUT`
キー:`socket_timeout` | - |`setSocketRcvbuf(long size)` | -`size`- サイズ(バイト単位) | TCP ソケットの受信バッファを設定します。このバッファは JVM が管理するメモリ領域の外側に確保されます。

デフォルト:`8196`
Enum:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
Key:`socket_rcvbuf` | - |`setSocketSndbuf(long size)` | *`size`- バイト数 | TCP ソケットの受信バッファを設定します。このバッファは JVM メモリの外側に確保されます。

デフォルト:`8196`
Enum:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
Key:`socket_sndbuf` | - |`setSocketKeepAlive(boolean value)` | -`value`- オプションを有効化するかどうかを示すフラグ。 | クライアントによって作成されるすべての TCP ソケットに対して、オプション`SO_KEEPALIVE`を設定します。TCP Keep-Alive は接続の生存状態を確認するメカニズムを有効にし、予期せず切断された接続を検出するのに役立ちます。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
Key:`socket_keepalive` | - |`setSocketTcpNodelay(boolean value)` | *`value`- オプションを有効にするかどうかを示すフラグ。 | クライアントによって作成されるすべての TCP ソケットに対してオプション`SO_NODELAY`を設定します。この TCP オプションにより、ソケットは可能な限り早くデータを送信します。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
Key:`socket_tcp_nodelay` | - |`setSocketLinger(int secondsToWait)` | -`secondsToWait`- 待機する秒数。 | クライアントが作成するすべての TCP ソケットに対して linger 時間を設定します。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_LINGER_OPT`
Key:`socket_linger` | - |`compressServerResponse(boolean enabled)` | *`enabled`- オプションを有効化するかどうかを示すフラグ | サーバーがレスポンスを圧縮するかどうかを設定します。

デフォルト:`true`
列挙値:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
キー:`compress` | - |`compressClientRequest(boolean enabled)` | -`enabled`- このオプションを有効化するかどうかを示すフラグ | クライアント側が送信リクエストを圧縮するかどうかを設定します。

デフォルト:`false`
Enum:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
Key:`decompress` | - |`useHttpCompression(boolean enabled)` | *`enabled`- オプションを有効化するかどうかを示すフラグ | 対応するオプションが有効になっている場合に、クライアント/サーバー間の通信で HTTP 圧縮を使用するかどうかを設定します | - |`appCompressedData(boolean enabled)` | -`enabled`- オプションを有効化するかどうかを示すフラグ | クライアントに、圧縮はアプリケーション側で処理されることを通知します。

デフォルト:`false`
Enum:`ClientConfigProperties.APP_COMPRESSED_DATA`
Key:`app_compressed_data` | - |`setLZ4UncompressedBufferSize(int size)` | *`size`- サイズ(バイト単位) | 非圧縮データストリームの一部を受信するバッファのサイズを設定します。バッファサイズが不足している場合は新しいバッファが作成され、その旨の警告がログに出力されます。

Default:`65536`
Enum:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
Key:`compression.lz4.uncompressed_buffer_size` | - |`disableNativeCompression` | -`disable` - オプションを無効にするかどうかを示すフラグ | ネイティブ圧縮を無効にします。`true`に設定すると、ネイティブ圧縮が無効になります。

デフォルト:`false`
Enum:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
Key:`disable_native_compression` | - |`setDefaultDatabase(String database)` | *`database`- データベースの名前 | 既定のデータベースを設定します。

既定値:`default`
列挙値:`ClientConfigProperties.DATABASE`
キー:`database` | - |`addProxy(ProxyType type, String host, int port)` | -`type`- プロキシの種類。
-`host`- プロキシのホスト名または IP アドレス。
-`port`- プロキシのポート。 | サーバーとの通信に使用するプロキシを設定します。プロキシで認証が必要な場合は、このプロキシ設定が必要です。

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_TYPE`
キー:`proxy_type`

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_HOST`
キー:`proxy_host`

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_PORT`
キー:`proxy_port` | - |`setProxyCredentials(String user, String pass)` | *`user`- プロキシユーザー名。
-`pass`- パスワード | プロキシでの認証に使用するユーザー資格情報を設定します。

Default: -
Enum:`ClientConfigProperties.PROXY_USER`
Key:`proxy_user`

Default: -
Enum:`ClientConfigProperties.PROXY_PASSWORD`
Key:`proxy_password` | - |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- タイムアウト値。
-`timeUnit`-`timeout`の時間単位 | クエリの最大実行時間を設定します

デフォルト:`0`
列挙型:`ClientConfigProperties.MAX_EXECUTION_TIME`
キー:`max_execution_time` | - |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- オプションを有効にするかどうかを示すフラグ | HTTP クッキーを保持し、サーバーに再送信するかどうかを設定します。 | - |`setSSLTrustStore(String path)` |`path`- ローカル(クライアント側)システム上のファイルパス | サーバーホストの検証にクライアントが SSL トラストストアを使用するかどうかを設定します。

デフォルト: -
列挙値:`ClientConfigProperties.SSL_TRUST_STORE`
キー:`trust_store` | - |`setSSLTrustStorePassword(String password)` |`password`- シークレット値 |`setSSLTrustStore(String path)`で指定された SSL トラストストアのロック解除に使用するパスワードを設定します。

デフォルト: -
Enum:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
Key:`key_store_password` | - |`setSSLTrustStoreType(String type)` |`type`- トラストストアの種類名 |`setSSLTrustStore(String path)`で指定されたトラストストアの型を設定します。

デフォルト値: -
列挙型:`ClientConfigProperties.SSL_KEYSTORE_TYPE`
キー:`key_store_type` | - |`setRootCertificate(String path)` |`path`- ローカル(クライアント側)システム上のファイルのパス | クライアントがサーバーホストを検証するために指定したルート (CA) 証明書を使用するかどうかを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.CA_CERTIFICATE`
キー:`sslrootcert` | - |`setClientCertificate(String path)` |`path`- ローカル(クライアント側)システム上のファイルパス | SSL 接続の確立時および SSL 認証で使用するクライアント証明書のパスを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.SSL_CERTIFICATE`
キー:`sslcert` | - |`setClientKey(String path)` |`path`- ローカル(クライアント側)システム上のファイルのパス | サーバーとの SSL 通信を暗号化するために使用するクライアントの秘密鍵を設定します。

デフォルト値: -
Enum:`ClientConfigProperties.SSL_KEY`
Key:`ssl_key` | - |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- この設定を有効にするかどうかを示すフラグ | クライアントが DateTime および Date 列の値をデコードする際にサーバーのタイムゾーンを使用するかどうかを指定します。有効にした場合、サーバーのタイムゾーンは`setServerTimeZone(String timeZone)`で設定する必要があります。

デフォルト:`true`
Enum:`ClientConfigProperties.USE_SERVER_TIMEZONE`
Key:`use_server_time_zone` | - |`useTimeZone(String timeZone)` |`timeZone` - Java で有効なタイムゾーン ID を表す文字列値(`java.time.ZoneId`を参照) | 指定したタイムゾーンを、DateTime および Date 列の値をデコードする際に使用するかどうかを設定します。サーバー側のタイムゾーン設定を上書きします。

デフォルト: -
Enum:`ClientConfigProperties.USE_TIMEZONE`
Key:`use_time_zone` | - |`setServerTimeZone(String timeZone)` |`timeZone` - Java の有効なタイムゾーン ID を表す文字列値(`java.time.ZoneId`を参照) | サーバー側のタイムゾーンを設定します。デフォルトでは UTC タイムゾーンが使用されます。

デフォルト:`UTC`
列挙:`ClientConfigProperties.SERVER_TIMEZONE`
キー:`server_time_zone` | - |`useAsyncRequests(boolean async)` |`async`- オプションを有効化するかどうかを示すフラグ。 | クライアントがリクエストを別スレッドで実行するかどうかを設定します。アプリケーション側の方がマルチスレッド処理の設計を適切に行えること、またタスクを別スレッドで実行してもパフォーマンス向上には寄与しないことから、デフォルトでは無効になっています。

デフォルト:`false`
Enum:`ClientConfigProperties.ASYNC_OPERATIONS`
Key:`async` | - |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService`- ExecutorService のインスタンス。 | 操作タスク用のexecutor serviceを設定します。

デフォルト:`none`
Enum:`none`
Key:`none` | - |`setClientNetworkBufferSize(int size)` | *`size`- バイト数 | ソケットとアプリケーション間でデータを往復コピーするために使用される、アプリケーションメモリ空間内のバッファのサイズを設定します。値を大きくすると TCP スタックへのシステムコールは減少しますが、接続ごとに消費されるメモリ量に影響します。接続が短時間で終了するため、このバッファも GC(ガーベジコレクション)の対象になります。また、連続した大きなメモリブロックの割り当てが問題になる可能性がある点にも注意してください。

Default:`300000`
Enum:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
Key:`client_network_buffer_size`| - |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`-`com.clickhouse.client.api.ClientFaultCause`の列挙型定数 | 再試行対象とする障害タイプを設定します。

既定値:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
列挙定数:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
キー:`client_retry_on_failures` | - |`setMaxRetries(int maxRetries)` | *`maxRetries`- 再試行回数 |`retryOnFailures(ClientFaultCause ...causes)`で定義された失敗に対する最大再試行回数を設定します

デフォルト:`3`
列挙型:`ClientConfigProperties.RETRY_ON_FAILURE`
キー:`retry` | - |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`- このオプションを有効にするかどうかを示すフラグ | ほとんどのデータセットには、小さなバイト列としてエンコードされた数値データが含まれます。デフォルトでは、リーダーは必要なバッファーを割り当て、その中にデータを読み込み、その後ターゲットの`Number`クラスへ変換します。これは、多数の小さなオブジェクトが割り当ておよび解放されるため、GC に大きな負荷をかける可能性があります。このオプションを有効にすると、リーダーは事前に割り当てられたバッファーを使用して数値のトランスコードを行います。各リーダーは独自のバッファーセットを持ち、かつ各リーダーは単一スレッドからのみ使用されるため、安全に利用できます。 | - |`httpHeader(String key, String value)` | *`key`- HTTP ヘッダーのキー。
-`value`- ヘッダー値の文字列。 | 単一の HTTP ヘッダーに値を設定します。既存の値は上書きされます。

Default:`none`
Enum:`none`
Key:`none` | - |`httpHeader(String key, Collection values)` | -`key`- HTTP ヘッダーキー。
-`values`- 文字列の値のリスト。 | 1 つの HTTP ヘッダーの値を設定します。既存の値は上書きされます。

デフォルト:`none`
列挙型:`none`
キー:`none` | - |`httpHeaders(Map headers)` | *`header`- HTTP ヘッダーとその値のマップ。 | 複数の HTTP ヘッダー値をまとめて設定します。

Default:`none`
Enum:`none`
Key:`none` | - |`serverSetting(String name, String value)` | -`name`- クエリレベルの設定名。
-`value`- 設定値(文字列)。 | 各クエリとともにサーバーに渡す設定を指定します。個々の操作で設定された値によって上書きされる場合があります。設定の[一覧](/operations/settings/query-level)

Default:`none`
Enum:`none`
Key:`none` | - |`serverSetting(String name, Collection values)` | *`name`- クエリレベル設定の名前。
-`values`- 設定の文字列値。 | 各クエリに対してサーバーへ渡す設定を指定します。個々の操作ごとの設定によって上書きされる場合があります。設定の一覧は [List of settings](/operations/settings/query-level) を参照してください。このメソッドは、たとえば [roles](/interfaces/http#setting-role-with-query-parameters) のように複数の値を持つ設定を指定する場合に便利です。

Default:`none`
Enum:`none`
Key:`none` | - |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`- カラムとフィールドの対応付け戦略の実装 | DTO を登録する際に、DTO クラスのフィールドと DB カラムのマッピングに使用するカスタム戦略を設定します。

デフォルト:`none`
列挙:`none`
キー:`none` | - |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth`- オプションを有効化するかどうかを示すフラグ | ユーザー名とパスワードによる認証に Basic HTTP 認証を使用するかどうかを設定します。デフォルトでは有効です。この認証方式を使用すると、HTTP ヘッダー経由では正しく送信できない特殊文字を含むパスワードに関する問題を解消できます。

Default:`true`
Enum:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
Key:`http_use_basic_auth` | - |`setClientName(String clientName)` | -`clientName` - アプリケーション名を表す文字列 | 呼び出し元アプリケーションに関する追加情報を設定します。この文字列はクライアント名としてサーバーに渡されます。HTTP プロトコルを使用する場合は、`User-Agent`ヘッダーとして送信されます。

デフォルト: -
列挙型:`ClientConfigProperties.CLIENT_NAME`
キー:`client_name` | - |`useBearerTokenAuth(String bearerToken)` | *`bearerToken`- エンコード済みのベアラートークン | Bearer 認証を使用するかどうかと、使用するトークンを指定します。トークンはそのまま送信されるため、このメソッドに渡す前にエンコードしておく必要があります。

デフォルト: -
Enum:`ClientConfigProperties.BEARERTOKEN_AUTH`
Key:`bearer_token` | - |`registerClientMetrics(Object registry, String name)` | -`registry`- Micrometer のレジストリインスタンス
-`name`- メトリクスグループの名前 | Micrometer ([https://micrometer.io/](https://micrometer.io/)) のレジストリインスタンスにセンサーを登録します。 | - |`setServerVersion(String version)` | *`version`- サーバーのバージョンを表す文字列値 | バージョン検出を行わせないためにサーバーのバージョンを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.SERVER_VERSION`
キー:`server_version` | - |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping`- 型ヒントのマッピング | ClickHouse の型に対する型ヒントのマッピングを設定します。たとえば、多次元配列を独自の Array オブジェクトではなく、Java のコンテナー型として表現できるようにします。

デフォルト: -
Enum:`ClientConfigProperties.TYPE_HINT_MAPPING`
Key:`type_hint_mapping` | - |`sslSocketSNI(String sni)` | *`sni`- サーバー名を表す文字列値 | SSL/TLS 接続において SNI(Server Name Indication)として使用するサーバー名を設定します。

デフォルト: -
Enum:`ClientConfigProperties.SSL_SOCKET_SNI`
キー:`ssl_socket_sni` | + |`setSocketRcvbuf(long size)` | -`size`- サイズ(バイト単位) | TCP ソケットの受信バッファを設定します。このバッファは JVM が管理するメモリ領域の外側に確保されます。

デフォルト:`8196`
Enum:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
Key:`socket_rcvbuf` | + |`setSocketSndbuf(long size)` | *`size`- バイト数 | TCP ソケットの受信バッファを設定します。このバッファは JVM メモリの外側に確保されます。

デフォルト:`8196`
Enum:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
Key:`socket_sndbuf` | + |`setSocketKeepAlive(boolean value)` | -`value`- オプションを有効化するかどうかを示すフラグ。 | クライアントによって作成されるすべての TCP ソケットに対して、オプション`SO_KEEPALIVE`を設定します。TCP Keep-Alive は接続の生存状態を確認するメカニズムを有効にし、予期せず切断された接続を検出するのに役立ちます。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
Key:`socket_keepalive` | + |`setSocketTcpNodelay(boolean value)` | *`value`- オプションを有効にするかどうかを示すフラグ。 | クライアントによって作成されるすべての TCP ソケットに対してオプション`SO_NODELAY`を設定します。この TCP オプションにより、ソケットは可能な限り早くデータを送信します。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
Key:`socket_tcp_nodelay` | + |`setSocketLinger(int secondsToWait)` | -`secondsToWait`- 待機する秒数。 | クライアントが作成するすべての TCP ソケットに対して linger 時間を設定します。

デフォルト: -
Enum:`ClientConfigProperties.SOCKET_LINGER_OPT`
Key:`socket_linger` | + |`compressServerResponse(boolean enabled)` | *`enabled`- オプションを有効化するかどうかを示すフラグ | サーバーがレスポンスを圧縮するかどうかを設定します。

デフォルト:`true`
列挙値:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
キー:`compress` | + |`compressClientRequest(boolean enabled)` | -`enabled`- このオプションを有効化するかどうかを示すフラグ | クライアント側が送信リクエストを圧縮するかどうかを設定します。

デフォルト:`false`
Enum:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
Key:`decompress` | + |`useHttpCompression(boolean enabled)` | *`enabled`- オプションを有効化するかどうかを示すフラグ | 対応するオプションが有効になっている場合に、クライアント/サーバー間の通信で HTTP 圧縮を使用するかどうかを設定します | + |`appCompressedData(boolean enabled)` | -`enabled`- オプションを有効化するかどうかを示すフラグ | クライアントに、圧縮はアプリケーション側で処理されることを通知します。

デフォルト:`false`
Enum:`ClientConfigProperties.APP_COMPRESSED_DATA`
Key:`app_compressed_data` | + |`setLZ4UncompressedBufferSize(int size)` | *`size`- サイズ(バイト単位) | 非圧縮データストリームの一部を受信するバッファのサイズを設定します。バッファサイズが不足している場合は新しいバッファが作成され、その旨の警告がログに出力されます。

Default:`65536`
Enum:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
Key:`compression.lz4.uncompressed_buffer_size` | + |`disableNativeCompression` | -`disable` - オプションを無効にするかどうかを示すフラグ | ネイティブ圧縮を無効にします。`true`に設定すると、ネイティブ圧縮が無効になります。

デフォルト:`false`
Enum:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
Key:`disable_native_compression` | + |`setDefaultDatabase(String database)` | *`database`- データベースの名前 | 既定のデータベースを設定します。

既定値:`default`
列挙値:`ClientConfigProperties.DATABASE`
キー:`database` | + |`addProxy(ProxyType type, String host, int port)` | -`type`- プロキシの種類。
-`host`- プロキシのホスト名または IP アドレス。
-`port`- プロキシのポート。 | サーバーとの通信に使用するプロキシを設定します。プロキシで認証が必要な場合は、このプロキシ設定が必要です。

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_TYPE`
キー:`proxy_type`

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_HOST`
キー:`proxy_host`

デフォルト: -
列挙型:`ClientConfigProperties.PROXY_PORT`
キー:`proxy_port` | + |`setProxyCredentials(String user, String pass)` | *`user`- プロキシユーザー名。
-`pass`- パスワード | プロキシでの認証に使用するユーザー資格情報を設定します。

Default: -
Enum:`ClientConfigProperties.PROXY_USER`
Key:`proxy_user`

Default: -
Enum:`ClientConfigProperties.PROXY_PASSWORD`
Key:`proxy_password` | + |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- タイムアウト値。
-`timeUnit`-`timeout`の時間単位 | クエリの最大実行時間を設定します

デフォルト:`0`
列挙型:`ClientConfigProperties.MAX_EXECUTION_TIME`
キー:`max_execution_time` | + |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- オプションを有効にするかどうかを示すフラグ | HTTP クッキーを保持し、サーバーに再送信するかどうかを設定します。 | + |`setSSLTrustStore(String path)` |`path`- ローカル(クライアント側)システム上のファイルパス | サーバーホストの検証にクライアントが SSL トラストストアを使用するかどうかを設定します。

デフォルト: -
列挙値:`ClientConfigProperties.SSL_TRUST_STORE`
キー:`trust_store` | + |`setSSLTrustStorePassword(String password)` |`password`- シークレット値 |`setSSLTrustStore(String path)`で指定された SSL トラストストアのロック解除に使用するパスワードを設定します。

デフォルト: -
Enum:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
Key:`key_store_password` | + |`setSSLTrustStoreType(String type)` |`type`- トラストストアの種類名 |`setSSLTrustStore(String path)`で指定されたトラストストアの型を設定します。

デフォルト値: -
列挙型:`ClientConfigProperties.SSL_KEYSTORE_TYPE`
キー:`key_store_type` | + |`setRootCertificate(String path)` |`path`- ローカル(クライアント側)システム上のファイルのパス | クライアントがサーバーホストを検証するために指定したルート (CA) 証明書を使用するかどうかを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.CA_CERTIFICATE`
キー:`sslrootcert` | + |`setClientCertificate(String path)` |`path`- ローカル(クライアント側)システム上のファイルパス | SSL 接続の確立時および SSL 認証で使用するクライアント証明書のパスを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.SSL_CERTIFICATE`
キー:`sslcert` | + |`setClientKey(String path)` |`path`- ローカル(クライアント側)システム上のファイルのパス | サーバーとの SSL 通信を暗号化するために使用するクライアントの秘密鍵を設定します。

デフォルト値: -
Enum:`ClientConfigProperties.SSL_KEY`
Key:`ssl_key` | + |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- この設定を有効にするかどうかを示すフラグ | クライアントが DateTime および Date 列の値をデコードする際にサーバーのタイムゾーンを使用するかどうかを指定します。有効にした場合、サーバーのタイムゾーンは`setServerTimeZone(String timeZone)`で設定する必要があります。

デフォルト:`true`
Enum:`ClientConfigProperties.USE_SERVER_TIMEZONE`
Key:`use_server_time_zone` | + |`useTimeZone(String timeZone)` |`timeZone` - Java で有効なタイムゾーン ID を表す文字列値(`java.time.ZoneId`を参照) | 指定したタイムゾーンを、DateTime および Date 列の値をデコードする際に使用するかどうかを設定します。サーバー側のタイムゾーン設定を上書きします。

デフォルト: -
Enum:`ClientConfigProperties.USE_TIMEZONE`
Key:`use_time_zone` | + |`setServerTimeZone(String timeZone)` |`timeZone` - Java の有効なタイムゾーン ID を表す文字列値(`java.time.ZoneId`を参照) | サーバー側のタイムゾーンを設定します。デフォルトでは UTC タイムゾーンが使用されます。

デフォルト:`UTC`
列挙:`ClientConfigProperties.SERVER_TIMEZONE`
キー:`server_time_zone` | + |`useAsyncRequests(boolean async)` |`async`- オプションを有効化するかどうかを示すフラグ。 | クライアントがリクエストを別スレッドで実行するかどうかを設定します。アプリケーション側の方がマルチスレッド処理の設計を適切に行えること、またタスクを別スレッドで実行してもパフォーマンス向上には寄与しないことから、デフォルトでは無効になっています。

デフォルト:`false`
Enum:`ClientConfigProperties.ASYNC_OPERATIONS`
Key:`async` | + |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService`- ExecutorService のインスタンス。 | 操作タスク用のexecutor serviceを設定します。

デフォルト:`none`
Enum:`none`
Key:`none` | + |`setClientNetworkBufferSize(int size)` | *`size`- バイト数 | ソケットとアプリケーション間でデータを往復コピーするために使用される、アプリケーションメモリ空間内のバッファのサイズを設定します。値を大きくすると TCP スタックへのシステムコールは減少しますが、接続ごとに消費されるメモリ量に影響します。接続が短時間で終了するため、このバッファも GC(ガーベジコレクション)の対象になります。また、連続した大きなメモリブロックの割り当てが問題になる可能性がある点にも注意してください。

Default:`300000`
Enum:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
Key:`client_network_buffer_size`| + |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`-`com.clickhouse.client.api.ClientFaultCause`の列挙型定数 | 再試行対象とする障害タイプを設定します。

既定値:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
列挙定数:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
キー:`client_retry_on_failures` | + |`setMaxRetries(int maxRetries)` | *`maxRetries`- 再試行回数 |`retryOnFailures(ClientFaultCause ...causes)`で定義された失敗に対する最大再試行回数を設定します

デフォルト:`3`
列挙型:`ClientConfigProperties.RETRY_ON_FAILURE`
キー:`retry` | + |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`- このオプションを有効にするかどうかを示すフラグ | ほとんどのデータセットには、小さなバイト列としてエンコードされた数値データが含まれます。デフォルトでは、リーダーは必要なバッファーを割り当て、その中にデータを読み込み、その後ターゲットの`Number`クラスへ変換します。これは、多数の小さなオブジェクトが割り当ておよび解放されるため、GC に大きな負荷をかける可能性があります。このオプションを有効にすると、リーダーは事前に割り当てられたバッファーを使用して数値のトランスコードを行います。各リーダーは独自のバッファーセットを持ち、かつ各リーダーは単一スレッドからのみ使用されるため、安全に利用できます。 | + |`httpHeader(String key, String value)` | *`key`- HTTP ヘッダーのキー。
-`value`- ヘッダー値の文字列。 | 単一の HTTP ヘッダーに値を設定します。既存の値は上書きされます。

Default:`none`
Enum:`none`
Key:`none` | + |`httpHeader(String key, Collection values)` | -`key`- HTTP ヘッダーキー。
-`values`- 文字列の値のリスト。 | 1 つの HTTP ヘッダーの値を設定します。既存の値は上書きされます。

デフォルト:`none`
列挙型:`none`
キー:`none` | + |`httpHeaders(Map headers)` | *`header`- HTTP ヘッダーとその値のマップ。 | 複数の HTTP ヘッダー値をまとめて設定します。

Default:`none`
Enum:`none`
Key:`none` | + |`serverSetting(String name, String value)` | -`name`- クエリレベルの設定名。
-`value`- 設定値(文字列)。 | 各クエリとともにサーバーに渡す設定を指定します。個々の操作で設定された値によって上書きされる場合があります。設定の[一覧](/operations/settings/query-level)

Default:`none`
Enum:`none`
Key:`none` | + |`serverSetting(String name, Collection values)` | *`name`- クエリレベル設定の名前。
-`values`- 設定の文字列値。 | 各クエリに対してサーバーへ渡す設定を指定します。個々の操作ごとの設定によって上書きされる場合があります。設定の一覧は [List of settings](/operations/settings/query-level) を参照してください。このメソッドは、たとえば [roles](/interfaces/http#setting-role-with-query-parameters) のように複数の値を持つ設定を指定する場合に便利です。

Default:`none`
Enum:`none`
Key:`none` | + |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`- カラムとフィールドの対応付け戦略の実装 | DTO を登録する際に、DTO クラスのフィールドと DB カラムのマッピングに使用するカスタム戦略を設定します。

デフォルト:`none`
列挙:`none`
キー:`none` | + |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth`- オプションを有効化するかどうかを示すフラグ | ユーザー名とパスワードによる認証に Basic HTTP 認証を使用するかどうかを設定します。デフォルトでは有効です。この認証方式を使用すると、HTTP ヘッダー経由では正しく送信できない特殊文字を含むパスワードに関する問題を解消できます。

Default:`true`
Enum:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
Key:`http_use_basic_auth` | + |`setClientName(String clientName)` | -`clientName` - アプリケーション名を表す文字列 | 呼び出し元アプリケーションに関する追加情報を設定します。この文字列はクライアント名としてサーバーに渡されます。HTTP プロトコルを使用する場合は、`User-Agent`ヘッダーとして送信されます。

デフォルト: -
列挙型:`ClientConfigProperties.CLIENT_NAME`
キー:`client_name` | + |`useBearerTokenAuth(String bearerToken)` | *`bearerToken`- エンコード済みのベアラートークン | Bearer 認証を使用するかどうかと、使用するトークンを指定します。トークンはそのまま送信されるため、このメソッドに渡す前にエンコードしておく必要があります。

デフォルト: -
Enum:`ClientConfigProperties.BEARERTOKEN_AUTH`
Key:`bearer_token` | + |`registerClientMetrics(Object registry, String name)` | -`registry`- Micrometer のレジストリインスタンス
-`name`- メトリクスグループの名前 | Micrometer ([https://micrometer.io/](https://micrometer.io/)) のレジストリインスタンスにセンサーを登録します。 | + |`setServerVersion(String version)` | *`version`- サーバーのバージョンを表す文字列値 | バージョン検出を行わせないためにサーバーのバージョンを設定します。

デフォルト: -
列挙型:`ClientConfigProperties.SERVER_VERSION`
キー:`server_version` | + |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping`- 型ヒントのマッピング | ClickHouse の型に対する型ヒントのマッピングを設定します。たとえば、多次元配列を独自の Array オブジェクトではなく、Java のコンテナー型として表現できるようにします。

デフォルト: -
Enum:`ClientConfigProperties.TYPE_HINT_MAPPING`
Key:`type_hint_mapping` | + |`sslSocketSNI(String sni)` | *`sni`- サーバー名を表す文字列値 | SSL/TLS 接続において SNI(Server Name Indication)として使用するサーバー名を設定します。

デフォルト: -
Enum:`ClientConfigProperties.SSL_SOCKET_SNI`
キー:`ssl_socket_sni` | ### サーバー設定 \{#server-settings\} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md index 2f15d1f0e1b..9aac6e9bf34 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md @@ -10,7 +10,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # Java クライアントの概要 {#java-clients-overview} - [Client 0.8+](./client/client.mdx) @@ -152,7 +151,6 @@ Java クライアントはロギングに [SLF4J](https://www.slf4j.org/) を使 ``` - #### ロギングの設定 {#configuring-logging} ロギングの設定方法は、使用しているロギングフレームワークによって異なります。たとえば `Logback` を使用している場合は、`logback.xml` という名前のファイルでロギングを設定できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md index d2105d53a60..b93b25eb5b6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md @@ -12,7 +12,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # R2DBC ドライバ {#r2dbc-driver} ## R2DBC ドライバー {#r2dbc-driver} @@ -42,7 +41,6 @@ ClickHouse 向け非同期 Java クライアントの [R2DBC](https://r2dbc.io/) ``` - ### ClickHouse に接続する {#connect-to-clickhouse} ```java showLineNumbers @@ -53,7 +51,6 @@ ConnectionFactory connectionFactory = ConnectionFactories .flatMapMany(connection -> connection ``` - ### クエリ {#query} ```java showLineNumbers @@ -71,7 +68,6 @@ connection .subscribe(); ``` - ### 挿入 {#insert} ```java showLineNumbers diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md index 4116340e78e..48c1dbd06e8 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # ClickHouse JS {#clickhouse-js} ClickHouse へ接続するための公式の JS クライアントです。 @@ -66,7 +65,6 @@ Web 版のインストール: npm i @clickhouse/client-web ``` - ## ClickHouse との互換性 {#compatibility-with-clickhouse} | クライアントのバージョン | ClickHouse | @@ -111,7 +109,6 @@ const client = createClient({ クライアントインスタンスは、生成時に[あらかじめ構成](./js.md#configuration)できます。 - #### 設定 {#configuration} クライアントインスタンスを作成する際、次の接続設定を調整できます: @@ -190,7 +187,6 @@ createClient({ }) ``` - ### 接続 {#connecting} #### 接続情報を確認する {#gather-your-connection-details} @@ -217,7 +213,6 @@ const client = createClient({ クライアントリポジトリには、[ClickHouse Cloud にテーブルを作成する](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_cloud.ts)、[非同期インサートを使用する](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/async_insert.ts) など、環境変数を使用するサンプルが複数含まれており、そのほかにも多数の例があります。 - #### 接続プール(Node.js のみ) {#connection-pool-nodejs-only} 各リクエストごとに接続を確立するオーバーヘッドを回避するため、クライアントは ClickHouse への接続を再利用するための接続プールを作成し、Keep-Alive メカニズムを利用します。デフォルトでは Keep-Alive は有効になっており、接続プールのサイズは `10` に設定されていますが、`max_open_connections` [設定オプション](./js.md#configuration)で変更できます。 @@ -257,7 +252,6 @@ interface BaseQueryParams { } ``` - ### クエリメソッド {#query-method} これは、`SELECT` のようなレスポンスを返すほとんどのステートメントや、`CREATE TABLE` のような DDL を送信する際に使用し、`await` して結果を受け取る必要があります。返された結果セットは、アプリケーション側で利用されることを前提としています。 @@ -285,7 +279,6 @@ interface ClickHouseClient { `query` 内で FORMAT 句は指定せず、代わりに `format` パラメータを使用してください。 ::: - #### 結果セットおよび行の抽象化 {#result-set-and-row-abstractions} `ResultSet` は、アプリケーション内でのデータ処理を容易にするための、いくつかの便利なメソッドを提供します。 @@ -370,7 +363,6 @@ await new Promise((resolve, reject) => { **例:** (`Node.js` のみ) 従来の `on('data')` アプローチを使用して、クエリ結果を `CSV` 形式でストリーミングします。これは `for await const` 構文と置き換えて使用できます。 [ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_text_line_by_line.ts) - ```ts const resultSet = await client.query({ query: 'SELECT number FROM system.numbers_mt LIMIT 5', @@ -429,7 +421,6 @@ while (true) { } ``` - ### Insert メソッド {#insert-method} これはデータを挿入するための基本的なメソッドです。 @@ -451,7 +442,6 @@ interface ClickHouseClient { insert 文がサーバーに送信された場合、`executed` フラグは `true` になります。 - #### Node.js における insert メソッドとストリーミング {#insert-method-and-streaming-in-nodejs} `insert` メソッドに指定された [データ形式](./js.md#supported-data-formats) に応じて、`Stream.Readable` と通常の `Array` のいずれにも対応します。あわせて、[ファイルストリーミング](./js.md#streaming-files-nodejs-only) に関するセクションも参照してください。 @@ -554,7 +544,6 @@ await client.insert({ 詳細については[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_exclude_columns.ts)を参照してください。 - **例**: クライアントインスタンスで指定されたものとは異なるデータベースに `INSERT` する。[ソースコード](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_into_different_db.ts)。 ```ts @@ -565,7 +554,6 @@ await client.insert({ }) ``` - #### Web バージョンの制限事項 {#web-version-limitations} 現在、`@clickhouse/client-web` での insert 処理は `Array` と `JSON*` フォーマットでのみ動作します。 @@ -593,7 +581,6 @@ interface InsertParams extends BaseQueryParams { これは今後変更される可能性があります。あわせてこちらも参照してください: [すべてのクライアントメソッドに共通の基本パラメーター](./js.md#base-parameters-for-all-client-methods)。 - ### Command メソッド {#command-method} 出力を伴わないステートメント、`FORMAT` 句が適用できないステートメント、あるいはレスポンスにまったく関心がない場合に使用できます。このようなステートメントの例としては、`CREATE TABLE` や `ALTER TABLE` があります。 @@ -664,7 +651,6 @@ await client.command({ `abort_signal` によってリクエストがキャンセルされても、そのステートメントがサーバー側で実行されなかったことが保証されるわけではありません。 ::: - ### Exec メソッド {#exec-method} `query`/`insert` に収まらないカスタムクエリがあり、 @@ -705,7 +691,6 @@ export interface QueryResult { } ``` - ### Ping {#ping} 接続状態を確認するために用意されている `ping` メソッドは、サーバーに到達可能な場合は `true` を返します。 @@ -763,7 +748,6 @@ const result = await client.ping({ select: true, /* query_id、abort_signal、ht `ping` メソッドでは、標準的な `query` メソッドのパラメータのほとんどを指定できます。詳細は `PingParamsWithSelectQuery` の型定義を参照してください。 - ### Close(Node.js のみ) {#close-nodejs-only} 開いているすべての接続を閉じ、リソースを解放します。Web 版では何も行われません。 @@ -772,7 +756,6 @@ const result = await client.ping({ select: true, /* query_id、abort_signal、ht await client.close() ``` - ## ファイルのストリーミング(Node.js のみ) {#streaming-files-nodejs-only} クライアントのリポジトリには、一般的なデータ形式(NDJSON、CSV、Parquet)を用いたファイルストリーミングのサンプルコードがいくつか用意されています。 @@ -901,7 +884,6 @@ await client.insert({ ただし、`DateTime` や `DateTime64` の列を使用している場合は、文字列と JS Date オブジェクトの両方を利用できます。JS Date オブジェクトは、`date_time_input_format` を `best_effort` に設定した状態で、そのまま `insert` に渡すことができます。詳細については、この[サンプル](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_js_dates.ts)を参照してください。 - ### Decimal* 型の注意事項 {#decimal-types-caveats} `JSON*` 系のフォーマットを使用して Decimal 型の値を挿入できます。次のようにテーブルが定義されているとします: @@ -952,7 +934,6 @@ await client.query({ 詳しくは[この例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_decimals.ts)を参照してください。 - ### 整数型: Int64, Int128, Int256, UInt64, UInt128, UInt256 {#integral-types-int64-int128-int256-uint64-uint128-uint256} サーバーはこれらの値を数値として受け取ることができますが、これらの型の最大値は `Number.MAX_SAFE_INTEGER` よりも大きいため、整数オーバーフローを避ける目的で、`JSON*` ファミリーの出力フォーマットでは文字列として返されます。 @@ -982,7 +963,6 @@ const resultSet = await client.query({ expect(await resultSet.json()).toEqual([ { number: 0 } ]) ``` - ## ClickHouse 設定 {#clickhouse-settings} クライアントは [settings](/operations/settings/settings/) メカニズムを通じて ClickHouse の動作を調整できます。 @@ -1009,7 +989,6 @@ client.query({ クエリを実行するユーザーが、設定を変更するのに十分な権限を持っていることを確認してください。 ::: - ## 高度なトピック {#advanced-topics} ### パラメータ付きクエリ {#queries-with-parameters} @@ -1044,7 +1023,6 @@ await client.query({ 詳細については、[https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax](https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax) を参照してください。 - ### 圧縮 {#compression} 注意: リクエスト圧縮は現在 Web 版では利用できません。レスポンス圧縮は通常どおり動作します。Node.js 版は両方をサポートしています。 @@ -1065,7 +1043,6 @@ createClient({ * `response: true` は、ClickHouse サーバーに圧縮されたレスポンスボディで応答するよう指示します。デフォルト値: `response: false` * `request: true` は、クライアントから送信されるリクエストボディの圧縮を有効にします。デフォルト値: `request: false` - ### ロギング(Node.js のみ) {#logging-nodejs-only} :::important @@ -1123,7 +1100,6 @@ const client = createClient({ デフォルトの Logger 実装は[こちら](https://github.com/ClickHouse/clickhouse-js/blob/main/packages/client-common/src/logger.ts)で確認できます。 - ### TLS 証明書(Node.js のみ) {#tls-certificates-nodejs-only} Node.js クライアントは、オプションで基本(認証局のみ)および相互(認証局とクライアント証明書の両方)の TLS をサポートします。 @@ -1157,7 +1133,6 @@ const client = createClient({ リポジトリ内の [basic](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/basic_tls.ts) および [mutual](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/mutual_tls.ts) TLS の完全なサンプルコードを参照してください。 - ### Keep-alive configuration (Node.js only) {#keep-alive-configuration-nodejs-only} このクライアントは、基盤となる HTTP エージェントで Keep-Alive をデフォルトで有効化しており、これにより確立済みのソケットが後続のリクエストで再利用され、`Connection: keep-alive` ヘッダーが送信されます。アイドル状態のソケットは、デフォルトでは 2500 ミリ秒間接続プール内に保持されます(このオプションの調整に関する[注意事項](./js.md#adjusting-idle_socket_ttl)を参照してください)。 @@ -1189,7 +1164,6 @@ curl -v --data-binary "SELECT 1" この場合、`keep_alive_timeout` は 10 秒なので、アイドル中のソケットをデフォルトより少し長く開いたままにしておくために、`keep_alive.idle_socket_ttl` を 9000 や 9500 ミリ秒まで増やしてみることができます。「Socket hang-up」エラーが発生しないか注意して監視し、このエラーが、クライアントより先にサーバー側が接続を切断していることを示すので、エラーが出なくなるまで値を下げて調整してください。 - #### トラブルシューティング {#troubleshooting} 最新バージョンのクライアントを使用していても `socket hang up` エラーが発生する場合、この問題を解決するためには次のような選択肢があります。 @@ -1249,7 +1223,6 @@ const client = createClient({ `readonly=1` ユーザーの制限事項についてさらに詳しく説明している [例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/read_only_user.ts) を参照してください。 - ### パス名付きプロキシ {#proxy-with-a-pathname} ClickHouse インスタンスがプロキシの背後にあり、たとえば [http://proxy:8123/clickhouse_server](http://proxy:8123/clickhouse_server) のように URL にパス名が含まれている場合は、`pathname` 設定オプションとして `clickhouse_server` を指定してください(先頭のスラッシュの有無は問いません)。そうせずに `url` に直接含めた場合は、それが `database` オプションとして解釈されます。`/my_proxy/db` のように複数セグメントを含めることもできます。 @@ -1261,7 +1234,6 @@ const client = createClient({ }) ``` - ### 認証付きリバースプロキシ {#reverse-proxy-with-authentication} ClickHouse デプロイメントの前段に認証付きのリバースプロキシがある場合は、`http_headers` 設定を使用して、そのプロキシ側で必要なヘッダーを付与できます。 @@ -1274,7 +1246,6 @@ const client = createClient({ }) ``` - ### カスタム HTTP/HTTPS エージェント(実験的、Node.js のみ) {#custom-httphttps-agent-experimental-nodejs-only} :::warning @@ -1356,7 +1327,6 @@ const client = createClient({ 証明書とカスタムの *HTTPS* Agent を併用する場合、TLS ヘッダーと競合するため、`set_basic_auth_header` 設定(1.2.0 で導入)でデフォルトの Authorization ヘッダーを無効化する必要がある可能性があります。TLS 関連のヘッダーはすべて手動で指定する必要があります。 - ## 既知の制限事項 (Node.js/web) {#known-limitations-nodejsweb} - 結果セット用のデータマッパーは用意されておらず、言語のプリミティブ型のみが使用されます。特定のデータ型マッパーについては、[RowBinary フォーマットのサポート](https://github.com/ClickHouse/clickhouse-js/issues/216)を伴って追加が予定されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md index f4ec590bb50..3247c57d569 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md @@ -10,7 +10,6 @@ doc_type: 'guide' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Moose OLAP を使用した ClickHouse 上での開発 {#developing-on-clickhouse-with-moose-olap} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md index 5aaa05bc898..829cdecee06 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md @@ -45,7 +45,6 @@ common.get_setting('invalid_setting_action') | http_buffer_size | 10MB | | HTTP ストリーミングクエリに使用されるインメモリバッファのサイズ(バイト単位)です。 | | preserve_pandas_datetime_resolution | False | True, False | True かつ pandas 2.x を使用している場合、datetime64/timedelta64 の dtype 解像度(例: 's', 'ms', 'us', 'ns')を保持します。False(または pandas <2.x の場合)は、互換性のためにナノ秒('ns')解像度に変換します。 | - ## 圧縮 {#compression} ClickHouse Connect は、クエリ結果および挿入の両方に対して lz4、zstd、brotli、gzip 圧縮をサポートします。圧縮を使用する場合、一般的にネットワーク帯域幅/転送速度と CPU 使用率(クライアントおよびサーバー双方)の間のトレードオフが発生することを常に念頭に置いてください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md index d99d0353230..e2c2f3c1ba4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md @@ -31,7 +31,6 @@ assert qr[0][0] == 4 `InsertContext` には挿入処理中に更新される可変な状態が含まれるため、スレッドセーフではありません。 - ### 書き込みフォーマット {#write-formats} 書き込みフォーマットは、現在は限られた数の型に対してのみ実装されています。ほとんどの場合、ClickHouse Connect は、最初の(null でない)データ値の型を確認することで、その列に対して適切な書き込みフォーマットを自動的に判定しようとします。たとえば、`DateTime` 列に挿入する際に、その列の最初の挿入値が Python の整数であれば、ClickHouse Connect はそれが実際にはエポック秒であるとみなし、その整数値をそのまま挿入します。 @@ -97,7 +96,6 @@ df = pd.DataFrame({ client.insert_df("users", df) ``` - #### PyArrow テーブルへの挿入 {#pyarrow-table-insert} ```python @@ -115,7 +113,6 @@ arrow_table = pa.table({ client.insert_arrow("users", arrow_table) ``` - #### Arrow バックエンドを利用した DataFrame 挿入(pandas 2.x) {#arrow-backed-dataframe-insert-pandas-2} ```python @@ -134,7 +131,6 @@ df = pd.DataFrame({ client.insert_df_arrow("users", df) ``` - ### タイムゾーン {#time-zones} Python の `datetime.datetime` オブジェクトを ClickHouse の `DateTime` または `DateTime64` カラムに挿入する際、ClickHouse Connect はタイムゾーン情報を自動的に処理します。ClickHouse はすべての DateTime 値を内部的にはタイムゾーン情報を持たない Unix タイムスタンプ(エポックからの秒または小数秒)として保存するため、タイムゾーン変換は挿入時にクライアント側で自動的に行われます。 @@ -176,7 +172,6 @@ print(*results.result_rows, sep="\n") pytz を使用する場合、タイムゾーン情報のない(naive な)datetime にタイムゾーン情報を付与するには、`localize()` メソッドを使用する必要があります。`tzinfo=` を直接 datetime コンストラクタに渡すと、過去のオフセットが誤った値になります。UTC の場合は、`tzinfo=pytz.UTC` は正しく動作します。詳細は [pytz docs](https://pythonhosted.org/pytz/#localized-times-and-date-arithmetic) を参照してください。 ::: - #### タイムゾーン情報を持たない datetime オブジェクト {#timezone-naive-datetime-objects} タイムゾーン情報を持たない Python の `datetime.datetime` オブジェクト(`tzinfo` が設定されていないもの)を挿入すると、`.timestamp()` メソッドはそれをシステムのローカルタイムゾーンとして解釈します。曖昧さを避けるため、次のいずれかを推奨します。 @@ -202,7 +197,6 @@ epoch_timestamp = int(naive_time.replace(tzinfo=pytz.UTC).timestamp()) client.insert('events', [[epoch_timestamp]], column_names=['event_time']) ``` - #### タイムゾーンメタデータを持つ DateTime カラム {#datetime-columns-with-timezone-metadata} ClickHouse のカラムはタイムゾーンメタデータ付きで定義できます(例: `DateTime('America/Denver')` や `DateTime64(3, 'Asia/Tokyo')`)。このメタデータはデータの保存方法には影響せず(データは引き続き UTC タイムスタンプとして保存されます)、ClickHouse からデータをクエリする際に使用されるタイムゾーンを制御します。 @@ -232,7 +226,6 @@ print(*results.result_rows, sep="\n") # (datetime.datetime(2023, 6, 15, 7, 30, tzinfo=),) {#datetimedatetime2023-6-15-7-30-tzinfodsttzinfo-americalos_angeles-pdt-1-day-170000-dst} ``` - ## ファイルからの挿入 {#file-inserts} `clickhouse_connect.driver.tools` パッケージには、既存の ClickHouse テーブルへファイルシステムから直接データを挿入できる `insert_file` メソッドが含まれています。パース処理は ClickHouse サーバー側で行われます。`insert_file` は次のパラメータを受け取ります: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md index 7961cadbfbf..b66f3bc0531 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md @@ -31,7 +31,6 @@ assert result.result_set[1][0] == 'first_value2' `QueryContext` はスレッドセーフではありませんが、マルチスレッド環境で使用する場合は、`QueryContext.updated_copy` メソッドを呼び出してコピーを取得できます。 - ## ストリーミングクエリ {#streaming-queries} ClickHouse Connect Client は、ストリームとしてデータを取得するための複数のメソッド(Python のジェネレーターとして実装されています)を提供します。 @@ -76,7 +75,6 @@ with client.query_row_block_stream('SELECT pickup, dropoff, pickup_longitude, pi `StreamContext` の `source` プロパティを使用して、親の `QueryResult` オブジェクトにアクセスできます。`QueryResult` には、列名やデータ型が含まれています。 - ### ストリームの種類 {#stream-types} `query_column_block_stream` メソッドは、ブロックをネイティブな Python データ型として保存されたカラムデータのシーケンスとして返します。上記の `taxi_trips` クエリを使用した場合、返されるデータはリストになり、そのリストの各要素は、対応するカラムのすべてのデータを含む別のリスト(またはタプル)になります。したがって `block[0]` は文字列だけを含むタプルになります。カラム指向フォーマットは、合計料金の合算のように、そのカラム内のすべての値に対する集約処理を行う用途で最もよく使用されます。 @@ -101,7 +99,6 @@ with df_stream: 最後に、`query_arrow_stream` メソッドは、ClickHouse の `ArrowStream` 形式の結果を、`StreamContext` でラップされた `pyarrow.ipc.RecordBatchStreamReader` として返します。ストリームの各イテレーションでは、PyArrow の RecordBlock が返されます。 - ### ストリーミングの例 {#streaming-examples} #### 行のストリーミング {#stream-rows} @@ -122,7 +119,6 @@ with client.query_rows_stream("SELECT number, number * 2 as doubled FROM system. # .... ``` - #### 行ブロックのストリーミング {#stream-row-blocks} ```python @@ -139,7 +135,6 @@ with client.query_row_block_stream("SELECT number, number * 2 FROM system.number # Received block with 34591 rows ``` - #### Pandas の DataFrame をストリーミングする {#stream-pandas-dataframes} ```python @@ -166,7 +161,6 @@ with client.query_df_stream("SELECT number, toString(number) AS str FROM system. # 2 65411 65411 ``` - #### Arrow バッチのストリーミング {#stream-arrow-batches} ```python @@ -184,7 +178,6 @@ with client.query_arrow_stream("SELECT * FROM large_table") as stream: # Received Arrow batch with 34591 rows ``` - ## NumPy、Pandas、Arrow クエリ {#numpy-pandas-and-arrow-queries} ClickHouse Connect は、NumPy、Pandas、Arrow のデータ構造を扱うための専用クエリメソッドを提供します。これらのメソッドを使用すると、手動で変換することなく、クエリ結果をこれらの広く利用されているデータ形式で直接取得できます。 @@ -214,7 +207,6 @@ print(np_array) # [4 8]] {#4-8} ``` - ### Pandas クエリ {#pandas-queries} `query_df` メソッドは、ClickHouse Connect の `QueryResult` ではなく、Pandas の DataFrame としてクエリ結果を返します。 @@ -239,7 +231,6 @@ print(df) # 4 4 8 {#4-4-8} ``` - ### PyArrow クエリ {#pyarrow-queries} `query_arrow` メソッドは、クエリ結果を PyArrow テーブルとして返します。ClickHouse の `Arrow` フォーマットを直接利用するため、メインの `query` メソッドと共通する引数は `query`、`parameters`、`settings` の 3 つのみです。さらに、`use_strings` という追加の引数があり、Arrow テーブルが ClickHouse の String 型を文字列(True の場合)として扱うか、バイト列(False の場合)として扱うかを制御します。 @@ -266,7 +257,6 @@ print(arrow_table) # str: [["0","1","2"]] {#str-012} ``` - ### Arrow バックエンド DataFrame {#arrow-backed-dataframes} ClickHouse Connect は、`query_df_arrow` メソッドと `query_df_arrow_stream` メソッドを通じて、Arrow の結果から高速かつメモリ効率の高い DataFrame の作成をサポートします。これらは Arrow クエリメソッドの薄いラッパーであり、可能な場合にはゼロコピーで DataFrame に変換します。 @@ -316,7 +306,6 @@ with client.query_df_arrow_stream( # バッチを受信: 34591 行、データ型: [UInt64, String] ``` - #### 注意事項と補足 {#notes-and-caveats} - Arrow 型のマッピング: データを Arrow フォーマットで返す際、ClickHouse は型をサポートされている最も近い Arrow 型にマッピングします。一部の ClickHouse 型にはネイティブな Arrow の対応型がなく、その場合は Arrow フィールド内で生のバイト列として返されます(通常は `BINARY` または `FIXED_SIZE_BINARY`)。 @@ -366,7 +355,6 @@ print([int.from_bytes(n, byteorder="little") for n in df["int_128_col"].to_list( 重要なポイントは、アプリケーションコードは、選択した DataFrame ライブラリの機能と許容可能なパフォーマンス上のトレードオフに基づいて、これらの変換を処理しなければならないということです。DataFrame ネイティブな変換が利用できない場合は、純粋な Python ベースのアプローチが引き続き選択肢として残ります。 - ## 読み取りフォーマット {#read-formats} 読み取りフォーマットは、クライアントの `query`、`query_np`、`query_df` メソッドから返される値のデータ型を制御します(`raw_query` と `query_arrow` は ClickHouse から受信したデータを変更しないため、フォーマット制御は適用されません)。たとえば、UUID の読み取りフォーマットをデフォルトの `native` フォーマットから代替の `string` フォーマットに変更すると、UUID 型カラムに対する ClickHouse のクエリ結果は、Python の UUID オブジェクトではなく、(標準的な 8-4-4-4-12 の RFC 1422 形式を使用した)文字列値として返されます。 @@ -401,7 +389,6 @@ client.query('SELECT user_id, user_uuid, device_uuid from users', query_formats= client.query('SELECT device_id, dev_address, gw_address from devices', column_formats={'dev_address':'string'}) ``` - ### 読み取りフォーマットオプション(Python 型) {#read-format-options-python-types} | ClickHouse Type | Native Python Type | Read Formats | Comments | @@ -462,7 +449,6 @@ result = client.query('SELECT name, avg(rating) FROM directors INNER JOIN movies 追加の外部データファイルは、コンストラクタと同じパラメータを受け取る `add_file` メソッドを使用して、最初に作成した `ExternalData` オブジェクトに追加できます。HTTPの場合、すべての外部データは `multipart/form-data` によるファイルアップロードの一部として送信されます。 - ## タイムゾーン {#time-zones} ClickHouse の DateTime および DateTime64 値にタイムゾーンを適用する方法はいくつかあります。内部的には、ClickHouse サーバーはすべての DateTime および `DateTime64` オブジェクトを、「エポック (1970-01-01 00:00:00 UTC) からの経過秒数」を表すタイムゾーン情報を持たない数値として常に保存します。`DateTime64` 値の場合、その表現は精度に応じて、エポックからのミリ秒数、マイクロ秒数、またはナノ秒数になります。その結果、タイムゾーン情報の適用は常にクライアント側で行われます。これは無視できない追加計算を伴うため、性能が重要なアプリケーションでは、ユーザーへの表示や変換の場合を除き、DateTime 型はエポックタイムスタンプとして扱うことを推奨します (たとえば Pandas の Timestamps は、性能向上のため常にエポックナノ秒を表す 64 ビット整数です)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md index a2594f5f662..bb6764975c6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md @@ -77,7 +77,6 @@ if __name__ == '__main__': 同様に、データを [TabSeparated](/interfaces/formats/TabSeparated) やその他の形式で保存することもできます。利用可能なすべてのフォーマットの概要については、[入力および出力データのフォーマット](/interfaces/formats) を参照してください。 - ## マルチスレッド、マルチプロセス、および非同期/イベント駆動のユースケース {#multithreaded-multiprocess-and-asyncevent-driven-use-cases} ClickHouse Connect は、マルチスレッド、マルチプロセス、さらにイベントループ駆動/非同期アプリケーションでも良好に動作します。すべてのクエリおよび INSERT の処理は単一スレッド内で実行されるため、操作は一般的にスレッドセーフです。(一部の処理を低レベルで並列化し、単一スレッドであることに起因する性能上のペナルティを解消する将来的な拡張の可能性はありますが、その場合でもスレッドセーフであることは維持されます。) @@ -116,7 +115,6 @@ asyncio.run(main()) 関連項目: [run_async の例](https://github.com/ClickHouse/clickhouse-connect/blob/main/examples/run_async.py)。 - ## ClickHouse セッション ID の管理 {#managing-clickhouse-session-ids} 各 ClickHouse クエリは、ClickHouse の「セッション」コンテキスト内で実行されます。セッションは現在、次の 2 つの目的で使用されています。 @@ -142,7 +140,6 @@ client = clickhouse_connect.get_client(host='somehost.com', user='dbuser', passw この場合、ClickHouse Connect は `session_id` を送信せず、サーバーは個々のリクエストを同じセッションに属するものとして扱いません。一時テーブルおよびセッションレベルの設定は、リクエスト間で保持されません。 - ## HTTP コネクションプールのカスタマイズ {#customizing-the-http-connection-pool} ClickHouse Connect は、サーバーへの下位レベルの HTTP 接続を処理するために `urllib3` のコネクションプールを使用します。デフォルトでは、すべてのクライアントインスタンスが同じコネクションプールを共有しており、これはほとんどのユースケースに対して十分です。このデフォルトプールは、アプリケーションで使用される各 ClickHouse サーバーごとに最大 8 個の HTTP Keep-Alive 接続を維持します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md index 06f0503a0bc..46ecde6b685 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md @@ -118,7 +118,6 @@ print(client.database) # 出力: 'github' {#output-github} ``` - ## クライアントのライフサイクルとベストプラクティス {#client-lifecycle-and-best-practices} ClickHouse Connect クライアントの作成は、接続の確立、サーバーのメタデータの取得、設定の初期化などを伴う負荷の高い処理です。最適なパフォーマンスを得るために、次のベストプラクティスに従ってください。 @@ -158,7 +157,6 @@ for i in range(1000): client.close() ``` - ### マルチスレッドアプリケーション {#multi-threaded-applications} :::warning @@ -216,7 +214,6 @@ def worker(thread_id): client.close() ``` - ### 適切なクリーンアップ {#proper-cleanup} シャットダウン時には必ずクライアントを閉じてください。`client.close()` は、クライアントが自身のプールマネージャーを所有している場合(たとえばカスタム TLS/プロキシオプションを指定して作成した場合)にのみ、クライアントを解放し、プールされた HTTP 接続を閉じます。デフォルトの共有プールを使用している場合は、`client.close_connections()` を使用してソケットを明示的に閉じてください。そうしない場合でも、接続はアイドル時間の経過およびプロセス終了時に自動的にクリーンアップされます。 @@ -236,7 +233,6 @@ with clickhouse_connect.get_client(host='my-host', username='default', password= result = client.query('SELECT 1') ``` - ### 複数のクライアントを使用すべき場合 {#when-to-use-multiple-clients} 複数のクライアントが適切となるケース: @@ -283,7 +279,6 @@ WHERE date >= '2022-10-01 15:20:05' サーバー側バインディングは、ClickHouse サーバーでは `SELECT` クエリでのみサポートされています。`ALTER`、`DELETE`、`INSERT`、およびその他の種類のクエリでは動作しません。将来的に変更される可能性があります。詳細については [https://github.com/ClickHouse/ClickHouse/issues/42092](https://github.com/ClickHouse/ClickHouse/issues/42092) を参照してください。 ::: - #### クライアントサイドバインディング {#client-side-binding} ClickHouse Connect はクライアントサイドでのパラメータバインディングにも対応しており、テンプレート化された SQL クエリを生成する際に、より柔軟にクエリを生成できます。クライアントサイドバインディングでは、`parameters` 引数は辞書またはシーケンスである必要があります。クライアントサイドバインディングでは、パラメータ置換に Python の ["printf" 形式](https://docs.python.org/3/library/stdtypes.html#old-string-formatting)の文字列フォーマットを使用します。 @@ -348,7 +343,6 @@ DateTime64 引数(サブ秒精度を持つ ClickHouse の型)をバインド ::: - ### Settings 引数 {#settings-argument-1} すべての主要な ClickHouse Connect Client の「insert」および「select」メソッドは、指定した SQL ステートメントに対して ClickHouse サーバーの [ユーザー設定](/operations/settings/settings.md) を渡すための、省略可能な `settings` キーワード引数を受け取ります。`settings` 引数は辞書型である必要があります。各要素は ClickHouse の設定名と、その設定に対応する値です。値は、サーバーにクエリパラメータとして送信される際に文字列へ変換される点に注意してください。 @@ -364,7 +358,6 @@ settings = {'merge_tree_min_rows_for_concurrent_read': 65535, client.query("SELECT event_type, sum(timeout) FROM event_errors WHERE event_time > '2022-08-01'", settings=settings) ``` - ## Client `command` メソッド {#client-command-method} `Client.command` メソッドを使用して、通常はデータを返さない SQL クエリ、または完全なデータセットではなく単一のプリミティブ値もしくは配列値を返す SQL クエリを ClickHouse サーバーに送信します。このメソッドは次のパラメータを受け取ります: @@ -408,7 +401,6 @@ print(result) client.command("DROP TABLE test_command") ``` - #### 単一値を返すシンプルなクエリ {#simple-queries-returning-single-values} ```python @@ -427,7 +419,6 @@ print(version) # 出力: "25.8.2.29" {#output-258229} ``` - #### パラメーター付きコマンド {#commands-with-parameters} ```python @@ -449,7 +440,6 @@ result = client.command( ) ``` - #### 設定付きのコマンド {#commands-with-settings} ```python @@ -464,7 +454,6 @@ result = client.command( ) ``` - ## Client `query` Method {#client-query-method} `Client.query` メソッドは、ClickHouse サーバーから単一の「バッチ」データセットを取得するための主な手段です。HTTP 経由で Native ClickHouse フォーマットを利用して、大規模なデータセット(最大およそ 100 万行)を効率的に転送します。このメソッドは次のパラメータを受け取ります。 @@ -512,7 +501,6 @@ print([col_type.name for col_type in result.column_types]) # 出力: ['String', 'String'] {#output-string-string} ``` - #### クエリ結果へのアクセス {#accessing-query-results} ```python @@ -547,7 +535,6 @@ print(result.first_row) # 出力: (0, "0") {#output-0-0} ``` - #### クライアントサイドパラメータを使用したクエリ {#query-with-client-side-parameters} ```python @@ -566,7 +553,6 @@ parameters = ("system", 5) result = client.query(query, parameters=parameters) ``` - #### サーバー側パラメータを使ったクエリ {#query-with-server-side-parameters} ```python @@ -581,7 +567,6 @@ parameters = {"db": "system", "tbl": "query_log"} result = client.query(query, parameters=parameters) ``` - #### 設定付きクエリの実行 {#query-with-settings} ```python @@ -599,7 +584,6 @@ result = client.query( ) ``` - ### `QueryResult` オブジェクト {#the-queryresult-object} 基本的な `query` メソッドは、次の公開プロパティを持つ `QueryResult` オブジェクトを返します: @@ -675,7 +659,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"]) ``` - #### カラム指向の挿入 {#column-oriented-insert} ```python @@ -693,7 +676,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"], column_oriented=True) ``` - #### 明示的な列型指定による INSERT {#insert-with-explicit-column-types} ```python @@ -716,7 +698,6 @@ client.insert( ) ``` - #### 特定のデータベースに挿入する {#insert-into-specific-database} ```python @@ -738,7 +719,6 @@ client.insert( ) ``` - ## ファイルからの挿入 {#file-inserts} ファイルから ClickHouse のテーブルに直接データを挿入する方法については、[高度な挿入(ファイルからの挿入)](advanced-inserting.md#file-inserts) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md index e42a13a66bb..c3f68a2d302 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md @@ -16,7 +16,6 @@ import CodeBlock from '@theme/CodeBlock'; import ConnectionDetails from '@site/i18n/jp/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # Introduction {#introduction} ClickHouse Connect は、幅広い Python アプリケーションとの相互運用性を提供する中核となるデータベースドライバーです。 @@ -88,7 +87,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='localhost', username='default', password='password') ``` - #### ClickHouse Connect クライアントインスタンスを使用して ClickHouse Cloud サービスに接続します: {#use-a-clickhouse-connect-client-instance-to-connect-to-a-clickhouse-cloud-service} :::tip @@ -101,7 +99,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='HOSTNAME.clickhouse.cloud', port=8443, username='default', password='your password') ``` - ### データベースを操作する {#interact-with-your-database} ClickHouse の SQL コマンドを実行するには、クライアントの `command` メソッドを使用します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md index 19fd3e4cea7..2241d398047 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md @@ -34,7 +34,6 @@ URL/クエリパラメータに関する注意: サポートされているオプションの全一覧については、以下のセクションにある [Connection arguments and Settings](driver-api.md#connection-arguments) を参照してください。これらは SQLAlchemy の DSN で指定することもできます。 - ## コアクエリ {#sqlalchemy-core-queries} このダイアレクトは、結合、フィルタリング、並べ替え、LIMIT/OFFSET、`DISTINCT` を伴う SQLAlchemy Core の `SELECT` クエリをサポートします。 @@ -68,7 +67,6 @@ with engine.begin() as conn: conn.execute(delete(users).where(users.c.name.like("%temp%"))) ``` - ## DDL とリフレクション {#sqlalchemy-ddl-reflection} 提供されている DDL ヘルパーと型/エンジンの構成要素を使用して、データベースおよびテーブルを作成できます。テーブルのリフレクション(カラム型やエンジンを含む)にも対応しています。 @@ -103,7 +101,6 @@ with engine.begin() as conn: 反映された列には、サーバー上に存在する場合、`clickhousedb_default_type`、`clickhousedb_codec_expression`、`clickhousedb_ttl_expression` などのダイアレクト固有の属性が含まれます。 - ## INSERT(Core と基本的な ORM) {#sqlalchemy-inserts} INSERT は、SQLAlchemy Core 経由だけでなく、利便性のためにシンプルな ORM モデルを使っても実行できます。 @@ -132,7 +129,6 @@ with Session(engine) as session: session.commit() ``` - ## 対象範囲と制限事項 {#scope-and-limitations} - 主な対象範囲: `SELECT` と `JOIN`(`INNER`、`LEFT OUTER`、`FULL OUTER`、`CROSS`)、`WHERE`、`ORDER BY`、`LIMIT`/`OFFSET`、`DISTINCT` などの SQLAlchemy Core 機能を利用できるようにすること。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md index 59e3fcd439c..1108c4a52af 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md @@ -37,7 +37,6 @@ clickhouse = { version = "0.12.2", features = ["test-util"] } あわせて [crates.io のページ](https://crates.io/crates/clickhouse) も参照してください。 - ## Cargo features {#cargo-features} * `lz4`(デフォルトで有効) — `Compression::Lz4` と `Compression::Lz4Hc(_)` バリアントを有効にします。有効な場合、`Compression::Lz4` は `WATCH` を除くすべてのクエリでデフォルトとして使用されます。 @@ -90,7 +89,6 @@ let client = Client::default() .with_database("test"); ``` - ### HTTPS または ClickHouse Cloud への接続 {#https-or-clickhouse-cloud-connection} HTTPS 接続は、`rustls-tls` または `native-tls` のいずれかの Cargo 機能で動作します。 @@ -116,7 +114,6 @@ let client = Client::default() * クライアントリポジトリにある [ClickHouse Cloud を利用した HTTPS のサンプル](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/clickhouse_cloud.rs)。これはオンプレミス環境での HTTPS 接続にも利用できます。 - ### 行を選択する {#selecting-rows} ```rust @@ -152,7 +149,6 @@ while let Some(row) = cursor.next().await? { .. } 行を選択する際に `wait_end_of_query` を使用する場合は注意してください。サーバー側でのメモリ使用量が増加し、全体的なパフォーマンスが低下する可能性が高くなります。 ::: - ### 行を挿入する {#inserting-rows} ```rust @@ -175,7 +171,6 @@ insert.end().await?; * 行はネットワーク負荷を分散するために、ストリームとして順次送信されます。 * ClickHouse は、すべての行が同じパーティションに収まり、かつ行数が [`max_insert_block_size`](https://clickhouse.tech/docs/operations/settings/settings/#settings-max_insert_block_size) 未満である場合にのみ、バッチをアトミックに挿入します。 - ### 非同期挿入(サーバー側バッチ処理) {#async-insert-server-side-batching} 受信データをクライアント側でバッチ処理しないようにするには、[ClickHouse asynchronous inserts](/optimize/asynchronous-inserts) を利用できます。これは、`insert` メソッドに `async_insert` オプションを指定する(あるいは `Client` インスタンス自体に指定して、すべての `insert` 呼び出しに適用する)だけで実現できます。 @@ -191,7 +186,6 @@ let client = Client::default() * クライアントリポジトリの [非同期インサートの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/async_insert.rs)。 - ### Inserter 機能(クライアント側バッチ処理) {#inserter-feature-client-side-batching} `inserter` Cargo フィーチャが必要です。 @@ -233,7 +227,6 @@ inserter.end().await?; ::: - ### DDL の実行 {#executing-ddls} シングルノードデプロイメント環境では、DDL は次のように実行するだけで十分です。 @@ -252,7 +245,6 @@ client .await?; ``` - ### ClickHouse の設定 {#clickhouse-settings} `with_option` メソッドを使用して、さまざまな [ClickHouse の設定](/operations/settings/settings) を適用できます。例: @@ -269,7 +261,6 @@ let numbers = client `query` だけでなく、`insert` および `inserter` メソッドでも同様に動作します。さらに、同じメソッドを `Client` インスタンスに対して呼び出すことで、すべてのクエリに適用されるグローバル設定を行うことができます。 - ### クエリ ID {#query-id} `.with_option` を使用すると、ClickHouse のクエリログでクエリを識別するための `query_id` オプションを設定できます。 @@ -290,7 +281,6 @@ let numbers = client 参考: クライアントリポジトリ内の [query_id のサンプル](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/query_id.rs) も参照してください。 - ### セッション ID {#session-id} `query_id` と同様に、同じセッションでステートメントを実行するために `session_id` を設定できます。`session_id` はクライアントレベルでグローバルに設定することも、`query`、`insert`、`inserter` の各呼び出しごとに個別に設定することもできます。 @@ -307,7 +297,6 @@ let client = Client::default() 関連項目: クライアントリポジトリ内の [session_id の例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/session_id.rs) を参照してください。 - ### カスタム HTTP ヘッダー {#custom-http-headers} プロキシ認証を使用している場合やカスタムヘッダーを渡す必要がある場合は、次のように指定できます。 @@ -320,7 +309,6 @@ let client = Client::default() 参考: クライアントリポジトリ内の [カスタム HTTP ヘッダーの例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_headers.rs) も参照してください。 - ### カスタム HTTP クライアント {#custom-http-client} これは、内部の HTTP 接続プールの設定を調整する際に役立ちます。 @@ -349,7 +337,6 @@ let client = Client::with_http_client(hyper_client).with_url("http://localhost:8 あわせて、クライアントリポジトリ内の [custom HTTP client example](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_client.rs) も参照してください。 - ## データ型 {#data-types} :::info @@ -456,7 +443,6 @@ struct MyRow { } ``` - * `DateTime` は `u32` またはそれを包む newtype との間でマッピングされ、UNIX エポックからの経過秒数を表します。加えて、[`time::OffsetDateTime`](https://docs.rs/time/latest/time/struct.OffsetDateTime.html) も、`time` feature を必要とする `serde::time::datetime` を使用することでサポートされます。 ```rust @@ -535,7 +521,6 @@ struct MyRow { * `Variant`、`Dynamic`、(新しい)`JSON` データ型は現在まだサポートされていません。 - ## モック機能 {#mocking} このクレートは、ClickHouse サーバーのモックや DDL、`SELECT`、`INSERT`、`WATCH` クエリのテスト用ユーティリティを提供します。この機能は `test-util` フィーチャーを有効にすると利用できます。**開発時の依存関係(dev-dependency)としてのみ**使用してください。 @@ -580,7 +565,6 @@ struct EventLog { } ``` - ## 既知の制限事項 {#known-limitations} * `Variant`、`Dynamic`、(新しい)`JSON` データ型にはまだ対応していません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md index 90d90530710..129658b35a6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md @@ -19,24 +19,17 @@ import datagrip_6 from '@site/static/images/integrations/sql-clients/datagrip-6. import datagrip_7 from '@site/static/images/integrations/sql-clients/datagrip-7.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # DataGrip から ClickHouse へ接続する {#connecting-datagrip-to-clickhouse} - - ## DataGrip の起動またはダウンロード {#start-or-download-datagrip} DataGrip は https://www.jetbrains.com/datagrip/ からダウンロードできます。 - - ## 1. 接続情報を確認する {#1-gather-your-connection-details} - - ## 2. ClickHouse ドライバを読み込む {#2-load-the-clickhouse-driver} 1. DataGrip を起動し、**Data Sources and Drivers** ダイアログの **Data Sources** タブで **+** アイコンをクリックします。 @@ -58,8 +51,6 @@ DataGrip は https://www.jetbrains.com/datagrip/ からダウンロードでき - - ## 3. ClickHouse に接続する {#3-connect-to-clickhouse} - データベース接続情報を入力し、**Test Connection** をクリックします。 @@ -79,8 +70,6 @@ JDBC URL の設定の詳細については、[ClickHouse JDBC driver](https://gi - - ## さらに詳しく {#learn-more} 詳しくは DataGrip のドキュメントを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md index 914dcb17c41..19dc4af2f4d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md @@ -21,7 +21,6 @@ import dbeaver_sql_editor from '@site/static/images/integrations/sql-clients/dbe import dbeaver_query_log_select from '@site/static/images/integrations/sql-clients/dbeaver-query-log-select.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # DBeaver を ClickHouse に接続する {#connect-dbeaver-to-clickhouse} @@ -32,8 +31,6 @@ DBeaver には複数のエディションがあります。このガイドでは ClickHouse の `Nullable` 列に対するサポートが改善されているため、DBeaver バージョン 23.1.0 以降を使用してください。 ::: - - ## 1. ClickHouse の情報を確認する {#1-gather-your-clickhouse-details} DBeaver は JDBC を HTTP(S) 経由で使用して ClickHouse に接続します。接続には次の情報が必要です。 @@ -43,14 +40,10 @@ DBeaver は JDBC を HTTP(S) 経由で使用して ClickHouse に接続します - ユーザー名 - パスワード - - ## 2. DBeaver をダウンロードする {#2-download-dbeaver} DBeaver は https://dbeaver.io/download/ からダウンロードできます。 - - ## 3. データベースを追加する {#3-add-a-database} - **Database > New Database Connection** メニュー、または **Database Navigator** 内の **New Database Connection** アイコンを使用して、**Connect to a database** ダイアログを表示します。 @@ -79,8 +72,6 @@ DBeaver が ClickHouse ドライバーがインストールされていないこ - - ## 4. ClickHouse をクエリする {#4-query-clickhouse} クエリ エディタを開いてクエリを実行します。 @@ -93,8 +84,6 @@ DBeaver が ClickHouse ドライバーがインストールされていないこ - - ## 次のステップ {#next-steps} DBeaver の機能については [DBeaver wiki](https://github.com/dbeaver/dbeaver/wiki) を、ClickHouse の機能については [ClickHouse documentation](https://clickhouse.com/docs) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md index 1abbf5b2418..d156316b415 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md @@ -18,7 +18,6 @@ import dropdown_cell_chart from '@site/static/images/integrations/sql-clients/ma import run_app_view from '@site/static/images/integrations/sql-clients/marimo/run-app-view.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # ClickHouse で marimo を使う {#using-marimo-with-clickhouse} @@ -27,8 +26,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 1. SQL サポート付きの marimo をインストールする {#install-marimo-sql} ```shell @@ -38,7 +35,6 @@ marimo edit clickhouse_demo.py これにより、localhost を開いた Web ブラウザが起動します。 - ## 2. ClickHouse への接続 {#connect-to-clickhouse} marimo エディタ左側のデータソースパネルを開き、「Add database」をクリックします。 @@ -53,8 +49,6 @@ marimo エディタ左側のデータソースパネルを開き、「Add databa - - ## 3. SQL を実行する {#run-sql} 接続を設定したら、新しい SQL セルを作成し、ClickHouse エンジンを選択できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md index 2a07627e8f4..b1c5cbecac4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md @@ -47,7 +47,6 @@ import adjust_axis_scale from '@site/static/images/cloud/sqlconsole/adjust-axis- import give_a_query_a_name from '@site/static/images/cloud/sqlconsole/give-a-query-a-name.png' import save_the_query from '@site/static/images/cloud/sqlconsole/save-the-query.png' - # SQL コンソール {#sql-console} SQL コンソールは、ClickHouse Cloud 上のデータベースを調査し、クエリを実行するための最速かつ最も簡単な方法です。SQL コンソールを使用すると、次のことができます。 @@ -57,8 +56,6 @@ SQL コンソールは、ClickHouse Cloud 上のデータベースを調査し - クエリを実行し、数回のクリックで結果データを可視化する - クエリをチームメンバーと共有し、より効率的に共同作業を行う - - ## テーブルの探索 {#exploring-tables} ### テーブル一覧とスキーマ情報の表示 {#viewing-table-list-and-schema-info} @@ -83,8 +80,6 @@ Cell Inspector ツールを使用すると、単一セル内に含まれる大 - - ## テーブルのフィルタリングとソート {#filtering-and-sorting-tables} ### テーブルをソートする {#sorting-a-table} @@ -125,8 +120,6 @@ SQL Console は、ソートとフィルタの設定をワンクリックでク SQL Console でのクエリの実行については、(link) のクエリに関するドキュメントを参照してください。 - - ## クエリの作成と実行 {#creating-and-running-a-query} ### クエリの作成 {#creating-a-query} @@ -182,8 +175,6 @@ SQL コンソールで新しいクエリを作成する方法は 2 つありま - - ## GenAI を使用したクエリ管理 {#using-genai-to-manage-queries} この機能を使用すると、ユーザーはクエリを自然言語の質問として記述でき、それに基づいてクエリコンソールが、利用可能なテーブルのコンテキストに沿った SQL クエリを生成します。GenAI はクエリのデバッグにも役立ちます。 @@ -294,8 +285,6 @@ UK Price Paid のサンプルデータセットをインポートし、それを 1. _+_ アイコンをクリックして新しいクエリを作成し、次のコードを貼り付けます: - - ```sql -- uk_price_paid の全トランザクションについて、年ごとの合計金額とトランザクション総数を表示する。 SELECT year(date), sum(pricee) as total_price, Count(*) as total_transactions @@ -310,7 +299,6 @@ UK Price Paid のサンプルデータセットをインポートし、それを GenAI は実験的な機能であることに注意してください。GenAI によって生成されたクエリをいかなるデータセットに対して実行する場合も、十分注意して実行してください。 - ## 高度なクエリ機能 {#advanced-querying-features} ### クエリ結果の検索 {#searching-query-results} @@ -339,8 +327,6 @@ GenAI は実験的な機能であることに注意してください。GenAI - - ## クエリデータの可視化 {#visualizing-query-data} 一部のデータは、チャート形式にするとより理解しやすくなります。SQL コンソールからクエリ結果データを直接利用し、数回クリックするだけで素早く可視化を作成できます。例として、NYC タクシー乗車の週次統計を計算するクエリを使用します。 @@ -401,7 +387,6 @@ SQL コンソールでは 10 種類のチャートタイプをサポートして - ## クエリの共有 {#sharing-queries} SQL コンソールでは、クエリをチームと共有できます。クエリを共有すると、チームの全メンバーがそのクエリを閲覧および編集できるようになります。共有クエリは、チームで共同作業を行うための有効な手段です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md index bd34fbfe9c6..2c907e4d4ec 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md @@ -17,29 +17,22 @@ import tablum_ch_2 from '@site/static/images/integrations/sql-clients/tablum-ch- import tablum_ch_3 from '@site/static/images/integrations/sql-clients/tablum-ch-3.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # TABLUM.IO を ClickHouse に接続する {#connecting-tablumio-to-clickhouse} - - ## TABLUM.IO のスタートページを開く {#open-the-tablumio-startup-page} :::note Linux サーバー上で Docker を使って、TABLUM.IO の自己ホスト版をインストールできます。 ::: - - ## 1. サービスにサインアップまたはログインする {#1-sign-up-or-sign-in-to-the-service} まず、メールアドレスで TABLUM.IO にサインアップするか、Google または Facebook アカウントを使ったクイックログインを行ってください。 - - ## 2. Add a ClickHouse connector {#2-add-a-clickhouse-connector} ClickHouse の接続情報を用意し、**Connector** タブに移動して、ホスト URL、ポート、ユーザー名、パスワード、データベース名、およびコネクタ名を入力します。すべての項目を入力したら、**Test connection** ボタンをクリックして接続情報を検証し、その後 **Save connector for me** をクリックしてコネクタ設定を保存します。 @@ -54,16 +47,12 @@ ClickHouse の接続情報を用意し、**Connector** タブに移動して、 - - ## 3. コネクタを選択する {#3-select-the-connector} **Dataset** タブに移動します。ドロップダウンメニューから、直前に作成した ClickHouse コネクタを選択します。右側のパネルには、利用可能なテーブルとスキーマの一覧が表示されます。 - - ## 4. SQL クエリを入力して実行する {#4-input-a-sql-query-and-run-it} SQL Console にクエリを入力し、**Run Query** をクリックします。結果はスプレッドシート形式で表示されます。 @@ -81,8 +70,6 @@ TABLUM.IO を使用すると、次のことができます。 * 結果を新しい ClickHouse データベースとして共有する ::: - - ## 詳細情報 {#learn-more} TABLUM.IO の詳細については https://tablum.io をご覧ください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md index 9451562a874..25cfae865e0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md @@ -9,7 +9,6 @@ doc_type: 'guide' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Easypanel への ClickHouse のデプロイ {#deploying-clickhouse-on-easypanel} @@ -18,8 +17,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [![Deploy to Easypanel](https://easypanel.io/img/deploy-on-easypanel-40.svg)](https://easypanel.io/docs/templates/clickhouse) - - ## 手順 {#instructions} 1. クラウドプロバイダー上で Ubuntu が動作する VM を作成します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md index 7ee249f1b0f..18465a5deb3 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md @@ -19,18 +19,13 @@ import retool_04 from '@site/static/images/integrations/tools/data-integration/r import retool_05 from '@site/static/images/integrations/tools/data-integration/retool/retool_05.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Retool を ClickHouse に接続する {#connecting-retool-to-clickhouse} - - ## 1. 接続情報を準備する {#1-gather-your-connection-details} - - ## 2. ClickHouse リソースを作成する {#2-create-a-clickhouse-resource} Retool アカウントにログインし、_Resources_ タブに移動します。「Create New」 -> 「Resource」を選択します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md index 73548cc43df..537a84eae6d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md @@ -22,7 +22,6 @@ import splunk_011 from '@site/static/images/integrations/tools/data-integration/ import splunk_012 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_012.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # ClickHouse Cloud の監査ログを Splunk に保存する {#storing-clickhouse-cloud-audit-logs-into-splunk} @@ -33,12 +32,8 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; このアドオンにはモジュラー入力のみが含まれており、追加の UI は提供されません。 - - # インストール {#installation} - - ## Splunk Enterprise 向け {#for-splunk-enterprise} [Splunkbase](https://splunkbase.splunk.com/app/7709) から ClickHouse Cloud Audit Add-on for Splunk をダウンロードします。 @@ -55,8 +50,6 @@ Splunkbase からダウンロードしたアーカイブファイルを選択し インストールが正常に完了すると、ClickHouse Audit logs アプリケーションが表示されます。表示されない場合は、エラーがないか splunkd のログを確認してください。 - - # モジュラー入力の設定 {#modular-input-configuration} モジュラー入力を設定するには、まず ClickHouse Cloud デプロイメントから次の情報を取得する必要があります。 @@ -64,8 +57,6 @@ Splunkbase からダウンロードしたアーカイブファイルを選択し - 組織 ID - 管理者権限を持つ [API Key](/cloud/manage/openapi) - - ## ClickHouse Cloud から情報を取得する {#getting-information-from-clickhouse-cloud} [ClickHouse Cloud console](https://console.clickhouse.cloud/) にログインします。 @@ -86,8 +77,6 @@ API Key と secret を安全な場所に保存します。 - - ## Splunk でデータ入力を構成する {#configure-data-input-in-splunk} Splunk に戻り、Settings -> Data inputs に移動します。 @@ -108,8 +97,6 @@ ClickHouse Cloud Audit Logs のデータ入力を選択します。 データ入力の構成が完了したので、監査ログの参照を開始できます。 - - # 使用方法 {#usage} モジュラー入力はデータを Splunk に保存します。データを確認するには、Splunk の通常の検索ビューを使用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md index b19957946e2..a89a6bd926c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md @@ -7,16 +7,12 @@ title: 'Arrow Flight インターフェイス' doc_type: 'reference' --- - - # Apache Arrow Flight インターフェイス {#apache-arrow-flight-interface} ClickHouse は、Arrow IPC フォーマットを gRPC 上で利用して効率的なカラム型データ転送を行う、高性能な RPC フレームワークである [Apache Arrow Flight](https://arrow.apache.org/docs/format/Flight.html) プロトコルとの連携をサポートしています。 このインターフェイスにより、Flight SQL クライアントは ClickHouse に対してクエリを実行し、結果を Arrow フォーマットで取得できます。これにより、分析ワークロード向けに高スループットかつ低レイテンシなクエリ処理が可能になります。 - - ## 機能 {#features} * Arrow Flight SQL プロトコル経由で SQL クエリを実行 @@ -24,8 +20,6 @@ ClickHouse は、Arrow IPC フォーマットを gRPC 上で利用して効率 * Arrow Flight をサポートする BI ツールや独自のデータアプリケーションとの統合 * gRPC を用いた軽量かつ高性能な通信 - - ## 制限事項 {#limitations} Arrow Flight インターフェイスは現在、実験的な段階であり、活発に開発が進められています。既知の制限事項には次のようなものがあります。 @@ -36,8 +30,6 @@ Arrow Flight インターフェイスは現在、実験的な段階であり、 互換性の問題が発生した場合やコントリビュートを希望される場合は、ClickHouse リポジトリで[issue を作成](https://github.com/ClickHouse/ClickHouse/issues)してください。 - - ## Arrow Flight サーバーの実行 {#running-server} 自己管理の ClickHouse インスタンスで Arrow Flight サーバーを有効化するには、サーバー設定に次の構成を追加します。 @@ -54,7 +46,6 @@ ClickHouse サーバーを再起動します。起動に成功すると、次の {} Application: Arrow Flight互換プロトコル: 0.0.0.0:9005 ``` - ## Arrow Flight SQL を使用して ClickHouse に接続する {#connecting-to-clickhouse} Arrow Flight SQL をサポートする任意のクライアントを利用できます。たとえば、`pyarrow` を使う場合は次のとおりです。 @@ -70,7 +61,6 @@ for batch in reader: print(batch.to_pandas()) ``` - ## 互換性 {#compatibility} Arrow Flight インターフェースは、次のような技術スタックで構築されたカスタムアプリケーションを含め、Arrow Flight SQL をサポートするツールと互換性があります。 @@ -81,8 +71,6 @@ Arrow Flight インターフェースは、次のような技術スタックで 利用しているツール向けにネイティブな ClickHouse コネクタ(例: JDBC、ODBC)が利用可能な場合、パフォーマンスやフォーマット互換性の理由で Arrow Flight が明示的に必要な場合を除き、そちらを優先して使用してください。 - - ## クエリのキャンセル {#query-cancellation} 長時間実行中のクエリは、クライアント側で gRPC 接続を閉じることでキャンセルできます。より高度なキャンセル機能のサポートの追加が計画されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md index 743e873d7e3..80e56f5a86d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/cli.md @@ -20,7 +20,6 @@ ClickHouse は、ClickHouse サーバーに対して直接 SQL クエリを実 このクライアントでは、プログレスバーや読み取られた行数、処理されたバイト数、クエリの実行時間などを通じて、クエリの実行状況をリアルタイムに確認できます。 [コマンドラインオプション](#command-line-options)と[設定ファイル](#configuration_files)の両方をサポートします。 - ## インストール {#install} ClickHouse をダウンロードするには、次のコマンドを実行します。 @@ -39,7 +38,6 @@ sudo ./clickhouse install クライアントとサーバーのバージョンが異なっていても互換性はありますが、古いクライアントでは一部の機能が利用できない場合があります。クライアントとサーバーには同じバージョンを使用することを推奨します。 - ## 実行 {#run} :::note @@ -71,7 +69,6 @@ Connected to ClickHouse server version 24.12.2. コマンドラインオプションの完全な一覧については、[Command Line Options](#command-line-options) を参照してください。 - ### ClickHouse Cloud への接続 {#connecting-cloud} ClickHouse Cloud サービスの詳細は、ClickHouse Cloud コンソールで確認できます。接続したいサービスを選択し、**Connect** をクリックします。 @@ -123,7 +120,6 @@ ClickHouse Cloud サービスの詳細は、ClickHouse Cloud コンソールで クエリ構文に焦点を当てるため、以降の例では接続情報(`--host`、`--port` など)を省略しています。実際にコマンドを使用する際は、必ずこれらを指定してください。 ::: - ## インタラクティブモード {#interactive-mode} ### インタラクティブモードを使用する {#using-interactive-mode} @@ -168,7 +164,6 @@ ClickHouse Client は `replxx`(`readline` に類似)をベースとしてい * `q`、`Q` または `:q` * `logout` または `logout;` - ### クエリ処理情報 {#processing-info} クエリを処理するとき、クライアントは次の情報を表示します。 @@ -247,7 +242,6 @@ $ echo "Hello\nGoodbye" | clickhouse-client --query "INSERT INTO messages FORMAT `--query` が指定されている場合、入力された内容は改行文字の後にリクエストへ追加されます。 - ### リモート ClickHouse サービスに CSV ファイルを挿入する {#cloud-example} この例では、サンプルデータセットの CSV ファイル `cell_towers.csv` を、`default` データベース内の既存のテーブル `cell_towers` に挿入します。 @@ -261,7 +255,6 @@ clickhouse-client --host HOSTNAME.clickhouse.cloud \ < cell_towers.csv ``` - ### コマンドラインからデータを挿入する例 {#more-examples} コマンドラインからデータを挿入する方法はいくつかあります。 @@ -290,7 +283,6 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA バッチモードでは、デフォルトのデータ[フォーマット](formats.md)は `TabSeparated` です。 上の例に示したように、クエリの `FORMAT` 句でフォーマットを指定できます。 - ## パラメーター付きクエリ {#cli-queries-with-parameters} クエリ内でパラメーターを指定し、コマンドラインオプションを使って値を渡すことができます。 @@ -333,7 +325,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 1 row in set. Elapsed: 0.006 sec. ``` - ### クエリ構文 {#cli-queries-with-parameters-syntax} クエリ内で、コマンドライン引数で指定したい値は、次の形式で中かっこで囲んで記述します。 @@ -347,7 +338,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 | `name` | プレースホルダー用の識別子。対応するコマンドラインオプションは `--param_=value` です。 | | `data type` | パラメータの[データ型](../sql-reference/data-types/index.md)。

たとえば、`(integer, ('string', integer))` のようなデータ構造は、`Tuple(UInt8, Tuple(String, UInt8))` 型を持つことができます(他の[整数](../sql-reference/data-types/int-uint.md)型も使用できます)。

テーブル名、データベース名、カラム名をパラメータとして渡すことも可能であり、その場合はデータ型として `Identifier` を使用する必要があります。 | - ### 使用例 {#cli-queries-with-parameters-examples} ```bash @@ -358,7 +348,6 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe --query "SELECT {col:Identifier} as {alias:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` - ## AI を活用した SQL 生成 {#ai-sql-generation} ClickHouse クライアントには、自然言語による説明から SQL クエリを生成するための AI 支援機能が組み込まれています。この機能により、ユーザーは高度な SQL の知識がなくても複雑なクエリを作成できます。 @@ -379,7 +368,6 @@ AI は次のことを行います: 2. 把握したテーブルやカラムに基づいて、適切な SQL を生成します 3. 生成したクエリを直ちに実行します - ### 例 {#ai-sql-generation-example} ```bash @@ -413,7 +401,6 @@ GROUP BY c.name ORDER BY order_count DESC ``` - ### 設定 {#ai-sql-generation-configuration} AI による SQL 生成を行うには、ClickHouse Client の設定ファイルで AI プロバイダーを構成する必要があります。OpenAI、Anthropic、または OpenAI 互換の API サービスを使用できます。 @@ -438,7 +425,6 @@ export ANTHROPIC_API_KEY=your-anthropic-key clickhouse-client ``` - #### 設定ファイル {#ai-sql-generation-configuration-file} AI 設定をより細かく制御するには、次の場所にある ClickHouse Client の設定ファイルで設定します: @@ -543,7 +529,6 @@ ai: model: gpt-3.5-turbo ``` - ### パラメーター {#ai-sql-generation-parameters}
@@ -650,7 +635,6 @@ clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] | `database` | データベース名。 | `default` | | `query_parameters` | キーと値のペアのリスト `param1=value1[,¶m2=value2], ...`。一部のパラメータでは値を指定する必要はありません。パラメータ名と値は大文字・小文字が区別されます。 | - | - ### 注意事項 {#connection-string-notes} ユーザー名、パスワード、またはデータベースを接続文字列で指定している場合、`--user`、`--password`、`--database` で再度指定することはできません(その逆も同様です)。 @@ -685,7 +669,6 @@ ClickHouse クライアントは、これらのホストに左から右の順番 * `database` * `query parameters` - ### 例 {#connection_string_examples} `localhost` のポート 9000 に接続し、クエリ `SELECT 1` を実行します。 @@ -766,7 +749,6 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 clickhouse-client clickhouse://192.168.1.15,192.168.1.25 ``` - ## クエリ ID の形式 {#query-id-format} インタラクティブモードでは、ClickHouse Client は各クエリに対してクエリ ID を表示します。既定では、ID は次のような形式です。 @@ -794,7 +776,6 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25 speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d ``` - ## 設定ファイル {#configuration_files} ClickHouse Client は、次のうち最初に存在するファイルを使用します。 @@ -895,7 +876,6 @@ $ clickhouse-client --max_threads 1 設定の一覧は [Settings](../operations/settings/settings.md) を参照してください。 - ### フォーマットオプション {#command-line-options-formatting} | オプション | 説明 | デフォルト | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md index 67ed5a8c56a..06926a4aab8 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md @@ -72,7 +72,6 @@ ClickHouse テーブル列のデータ型は、対応する Arrow データフ $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" ``` - ### データの選択 {#selecting-data} 次のコマンドを使用して、ClickHouse のテーブルからデータを抽出し、Arrow 形式のファイルに保存できます。 @@ -81,7 +80,6 @@ $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow} ``` - ## フォーマット設定 {#format-settings} | 設定 | 説明 | デフォルト | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md index 18add860eda..3805c15f6ef 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md @@ -15,7 +15,6 @@ import DataTypeMapping from './_snippets/data-types-matching.md' | -- | -- | ----- | | ✔ | ✔ | | - ## 説明 {#description} [Apache Avro](https://avro.apache.org/) は、効率的なデータ処理のためにバイナリエンコーディングを使用する行指向のシリアル化フォーマットです。`Avro` フォーマットは、[Avro データファイル](https://avro.apache.org/docs/++version++/specification/#object-container-files) の読み書きをサポートします。このフォーマットは、スキーマを埋め込んだ自己記述型のメッセージを前提としています。Avro をスキーマレジストリと併用している場合は、[`AvroConfluent`](./AvroConfluent.md) フォーマットを参照してください。 @@ -54,7 +53,6 @@ ClickHouse テーブルのカラムのデータ型は、挿入される Avro デ データをインポートする際、スキーマ内でフィールドが見つからず、設定 [`input_format_avro_allow_missing_fields`](/operations/settings/settings-formats.md/#input_format_avro_allow_missing_fields) が有効になっている場合は、エラーを発生させる代わりにデフォルト値が使用されます。 - ### Avro データの書き込み {#writing-avro-data} ClickHouse テーブルのデータを Avro ファイルに書き出すには、次のようにします。 @@ -70,7 +68,6 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro Avro ファイルの出力圧縮と同期間隔は、それぞれ [`output_format_avro_codec`](/operations/settings/settings-formats.md/#output_format_avro_codec) および [`output_format_avro_sync_interval`](/operations/settings/settings-formats.md/#output_format_avro_sync_interval) 設定を使用して構成できます。 - ### Avro スキーマの推論 {#inferring-the-avro-schema} ClickHouse の [`DESCRIBE`](/sql-reference/statements/describe-table) 関数を使用すると、次の例のように Avro ファイルの推論されたスキーマをすばやく確認できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md index cb2a879932a..3a924717c6f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md @@ -15,7 +15,6 @@ import DataTypesMatching from './_snippets/data-types-matching.md' | -- | -- | ----- | | ✔ | ✗ | | - ## 説明 {#description} [Apache Avro](https://avro.apache.org/) は、効率的なデータ処理のためにバイナリエンコードを使用する行指向のシリアル化フォーマットです。`AvroConfluent` フォーマットは、[Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html)(またはその API 互換サービス)を用いてシリアル化された、単一オブジェクト形式の Avro でエンコードされた Kafka メッセージのデコードをサポートします。 @@ -61,7 +60,6 @@ format_avro_schema_registry_url = 'http://schema-registry-url'; SELECT * FROM topic1_stream; ``` - #### Basic 認証の使用 {#using-basic-authentication} スキーマレジストリが Basic 認証を必要とする場合(例:Confluent Cloud を使用している場合)、`format_avro_schema_registry_url` 設定に URL エンコード済みの認証情報を指定できます。 @@ -81,7 +79,6 @@ kafka_format = 'AvroConfluent', format_avro_schema_registry_url = 'https://:@schema-registry-url'; ``` - ## トラブルシューティング {#troubleshooting} インジェスト処理の進行状況を監視し、Kafka コンシューマーで発生するエラーをデバッグするには、[`system.kafka_consumers` システムテーブル](../../../operations/system-tables/kafka_consumers.md)に対してクエリを実行できます。デプロイメントに複数のレプリカがある場合(例:ClickHouse Cloud)、[`clusterAllReplicas`](../../../sql-reference/table-functions/cluster.md) テーブル関数を使用する必要があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md index bac4c7cc95b..bf2df7a5630 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md @@ -114,7 +114,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.bson' FORMAT BSONEachRow; ``` - ### データの読み込み {#reading-data} `BSONEachRow` 形式を使用してデータを読み込みます。 @@ -129,7 +128,6 @@ FORMAT BSONEachRow BSON はバイナリ形式のデータであり、ターミナル上では人間が読める形では表示されません。`INTO OUTFILE` を使用して BSON ファイルとして出力してください。 ::: - ## フォーマット設定 {#format-settings} | 設定 | 説明 | 既定値 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md index 298df0a5995..b382386070d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md @@ -46,7 +46,6 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR 照合に失敗し、かつ入力値が数値である場合は、この数値を ENUM ID と照合しようとします。 入力データに ENUM ID のみが含まれている場合は、`ENUM` のパースを最適化するために、設定 [input_format_csv_enum_as_number](/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) を有効にすることを推奨します。 - ## 使用例 {#example-usage} ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md index 02278c79c52..dbf188819ab 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md @@ -70,7 +70,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football.csv' FORMAT CSVWithNames; ``` - ### データの読み込み {#reading-data} `CSVWithNames` 形式を使用してデータを読み込みます。 @@ -104,7 +103,6 @@ FORMAT CSVWithNames "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md index e6d359a6719..271d0c02dd7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md @@ -71,7 +71,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football_types.csv' FORMAT CSVWithNamesAndTypes; ``` - ### データの読み込み {#reading-data} `CSVWithNamesAndTypes` 形式を使用してデータを読み込みます。 @@ -106,7 +105,6 @@ FORMAT CSVWithNamesAndTypes "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md index 4f6570392f1..d6d17a0422c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md @@ -17,7 +17,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | -- | -- | ----- | | ✔ | ✔ | | - ## 説明 {#description} `CapnProto` フォーマットは、[`Protocol Buffers`](https://developers.google.com/protocol-buffers/) フォーマットや [Thrift](https://en.wikipedia.org/wiki/Apache_Thrift) に似たバイナリメッセージフォーマットであり、[JSON](./JSON/JSON.md) や [MessagePack](https://msgpack.org/) とは異なります。 @@ -81,7 +80,6 @@ struct Message { $ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'" ``` - ### 自動生成スキーマの使用 {#using-autogenerated-capn-proto-schema} データ用の外部 `CapnProto` スキーマがない場合でも、自動生成されたスキーマを使用して、`CapnProto` 形式でデータを出力および入力できます。 @@ -102,7 +100,6 @@ SETTINGS format_capn_proto_use_autogenerated_schema=1 $ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_capn_proto_use_autogenerated_schema=1 FORMAT CapnProto" ``` - ## フォーマット設定 {#format-settings} 設定 [`format_capn_proto_use_autogenerated_schema`](../../operations/settings/settings-formats.md/#format_capn_proto_use_autogenerated_schema) はデフォルトで有効になっており、[`format_schema`](/interfaces/formats#formatschema) が設定されていない場合に適用されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md index 1f9d5f8813f..0d7eb5f9ab4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md @@ -56,7 +56,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparated; ``` - ### データの読み込み {#reading-data} カスタム区切り文字を設定します。 @@ -83,7 +82,6 @@ FORMAT CustomSeparated row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## フォーマット設定 {#format-settings} 追加の設定: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md index a5e4ac512cd..0bf24e1b6c6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpaces; ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md index 73806b02e2a..9ef309f933b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNames; ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md index 34276cc9efb..a7284e34162 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNamesAndTypes; ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md index 7c69dad0b94..9ab963e3a91 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNames; ``` - ### データの読み取り {#reading-data} カスタム区切り文字の設定を行います。 @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNames row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md index f1b89d25c67..885e4a28b78 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNamesAndTypes; ``` - ### データの読み込み {#reading-data} カスタム区切り文字を設定します。 @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNamesAndTypes row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('Date';'Int16';'LowCardinality(String)';'LowCardinality(String)';'Int8';'Int8'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md index 3429b0c80d8..94237bb30af 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md @@ -82,5 +82,4 @@ LIMIT 3 ピークメモリ使用量: 271.92 MiB。 ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md index 202772e221e..607a1e4d2d5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md @@ -40,5 +40,4 @@ rt.start: navigation rt.bmr: 390,11,10 ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md index def14744f15..0d7a7c54b11 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md @@ -62,5 +62,4 @@ df2ec2f0669b000edff6adee264e7d68 1 rows in set. Elapsed: 0.154 sec. ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md index f3983564ce1..04200beaab0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md @@ -99,7 +99,6 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA } ``` - ## フォーマット設定 {#format-settings} JSON 入力フォーマットの場合、[`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) 設定が `1` に設定されていると、入力データ内のメタデータに含まれる型が、テーブル内の対応する列の型と照合されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md index 8fba51853b2..267bc63c14f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md @@ -31,7 +31,6 @@ SELECT * FROM json_as_object FORMAT JSONEachRow; {"json":{"any json stucture":"1"}} ``` - ### JSON オブジェクトの配列 {#an-array-of-json-objects} ```sql title="Query" @@ -45,7 +44,6 @@ SELECT * FROM json_square_brackets FORMAT JSONEachRow; {"field":{"id":"2","name":"name2"}} ``` - ### デフォルト値を持つ列 {#columns-with-default-values} ```sql title="Query" @@ -62,5 +60,4 @@ SELECT time, json FROM json_as_object FORMAT JSONEachRow {"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}} ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md index dce25c787ab..ed5f84dc680 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md @@ -46,7 +46,6 @@ SELECT * FROM json_as_string; └───────────────────────────────────┘ ``` - ### JSON オブジェクトの配列 {#an-array-of-json-objects} ```sql title="Query" @@ -63,5 +62,4 @@ SELECT * FROM json_square_brackets; └────────────────────────────┘ ``` - ## フォーマットの設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md index ca280b3cf7e..233da9b74e5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONColumns; ``` - ### データの読み込み {#reading-data} `JSONColumns` 形式を使用してデータを読み込みます。 @@ -73,7 +72,6 @@ FORMAT JSONColumns } ``` - ## フォーマット設定 {#format-settings} インポート時に、名前が不明な列は、設定 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) が `1` に設定されている場合はスキップされます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md index a8cab2ae3ab..3c56dc03d10 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md @@ -67,5 +67,4 @@ doc_type: 'reference' `JSONColumnsWithMetadata` 入力形式では、[`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) の設定が `1` の場合、 入力データ内のメタデータに含まれる型が、テーブル内の対応するカラムの型と比較されます。 - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md index 21a6c69c23c..3bfd741ebd2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md @@ -81,7 +81,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompact; ``` - ### データの読み込み {#reading-data} `JSONCompact` 形式を使用してデータを読み込みます。 @@ -156,5 +155,4 @@ FORMAT JSONCompact } ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md index 5f07d1832f7..30ed8d4d96a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md @@ -44,7 +44,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactColumns; ``` - ### データの読み込み {#reading-data} `JSONCompactColumns` 形式を使用してデータを読み込みます。 @@ -70,5 +69,4 @@ FORMAT JSONCompactColumns ブロック内に存在しないカラムにはデフォルト値が補われます(ここでは [`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) 設定を使用できます) - ## フォーマットの設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md index 24c2ee5d02e..72f1d427c6e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRow; ``` - ### データの読み込み {#reading-data} `JSONCompactEachRow` フォーマットを使用してデータを読み込みます。 @@ -82,5 +81,4 @@ FORMAT JSONCompactEachRow ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md index b18be220b9d..76f533958a7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNames; ``` - ### データの読み込み {#reading-data} `JSONCompactEachRowWithNames` フォーマットを使用してデータを読み込みます。 @@ -84,7 +83,6 @@ FORMAT JSONCompactEachRowWithNames ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md index 2ae3da830a7..97f444ff90a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md @@ -51,7 +51,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNamesAndTypes; ``` - ### データの読み込み {#reading-data} `JSONCompactEachRowWithNamesAndTypes` 形式を使用してデータを読み込みます。 @@ -86,7 +85,6 @@ FORMAT JSONCompactEachRowWithNamesAndTypes ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md index 81df59f10d8..4bbd665e9cd 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md @@ -46,5 +46,4 @@ FORMAT JSONCompactEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md index 74ee5ef6389..7125516eaf4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md @@ -93,5 +93,4 @@ FORMAT JSONCompactStrings } ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md index e192c242453..a4b12ac1fcf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRow; ``` - ### データの読み込み {#reading-data} `JSONCompactStringsEachRow` 形式を使用してデータを読み込みます。 @@ -82,5 +81,4 @@ FORMAT JSONCompactStringsEachRow ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## 形式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md index c84fa4fc158..e741557ff63 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNames; ``` - ### データの読み込み {#reading-data} `JSONCompactStringsEachRowWithNames` フォーマットを使用してデータを読み込みます。 @@ -84,7 +83,6 @@ FORMAT JSONCompactStringsEachRowWithNames ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md index 5404c63a740..4be3786fe01 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md @@ -48,7 +48,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNamesAndTypes; ``` - ### データの読み込み {#reading-data} `JSONCompactStringsEachRowWithNamesAndTypes` 形式を使用してデータを読み込みます。 @@ -83,7 +82,6 @@ FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## フォーマット設定 {#format-settings} :::note diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md index 52998ed4866..8d65c732ba5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md @@ -47,5 +47,4 @@ FORMAT JSONCompactStringsEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md index 562d9f91a18..4d25b5f5b92 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md @@ -47,7 +47,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONEachRow; ``` - ### データの読み取り {#reading-data} `JSONEachRow` 形式を使ってデータを読み込みます。 @@ -82,5 +81,4 @@ FORMAT JSONEachRow [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 設定が 1 に設定されている場合、不明な名前のデータ列のインポートはスキップされます。 - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md index f29536916ab..9a2fd253a02 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md @@ -26,5 +26,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## 形式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md index 72bab9bb104..f65c18cadec 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONLines; ``` - ### データの読み取り {#reading-data} `JSONLines` フォーマットを使用してデータを読み込みます。 @@ -84,5 +83,4 @@ FORMAT JSONLines 列名が不明な列のインポートは、[input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 設定が 1 に設定されている場合、スキップされます。 - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md index 600843b8af9..896cc800741 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} この形式では、すべてのデータは 1 つの JSON オブジェクトとして表され、そのオブジェクト内で各行が個別のフィールドとして表現されます。これは [`JSONEachRow`](./JSONEachRow.md) 形式と同様です。 - - ## 使用例 {#example-usage} ### 基本的な例 {#basic-example} @@ -133,7 +129,6 @@ CREATE TABLE IF NOT EXISTS example_table 例として、`UserActivity` テーブルを使用します。 - ```response ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ │ 4324182021466249494 │ 5 │ 146 │ -1 │ @@ -213,11 +208,8 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` - ## フォーマット設定 {#format-settings} - - | 設定 | 概要 | デフォルト | 注記 | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [`input_format_import_nested_json`](/operations/settings/settings-formats.md/#input_format_import_nested_json) | ネストされた JSON データをネストされたテーブルにマッピングします(JSONEachRow フォーマットで動作します)。 | `false` | | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md index caa4ae546c5..57b9ee78559 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md @@ -200,7 +200,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONStrings; ``` - ### データの読み込み {#reading-data} `JSONStrings` 形式を使用してデータを読み込みます。 @@ -213,7 +212,6 @@ FORMAT JSONStrings 出力は JSON 形式で表示されます。 - ```json { "meta": diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md index ac2230fd114..75eb5026f98 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} [`JSONEachRow`](./JSONEachRow.md) との違いは、データフィールドが型付きのJSON値ではなく文字列として出力される点だけです。 - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -65,7 +61,6 @@ FORMAT JSONStringsEachRow 出力は JSON 形式です: - ```json {"date":"2022-04-30","season":"2021","home_team":"Sutton United","away_team":"Bradford City","home_team_goals":"1","away_team_goals":"4"} {"date":"2022-04-30","season":"2021","home_team":"Swindon Town","away_team":"Barrow","home_team_goals":"2","away_team_goals":"1"} @@ -86,5 +81,4 @@ FORMAT JSONStringsEachRow {"date":"2022-05-07","season":"2021","home_team":"Walsall","away_team":"Swindon Town","home_team_goals":"0","away_team_goals":"3"} ``` - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md index 0c48982985b..b53fc1ec915 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md @@ -6,14 +6,10 @@ title: 'JSONStringsEachRowWithProgress' doc_type: 'reference' --- - - ## 説明 {#description} `JSONEachRow`/`JSONStringsEachRow` と異なり、ClickHouse は進捗情報も JSON 形式で出力します。 - - ## 使用例 {#example-usage} ```json @@ -23,5 +19,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## 書式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md index 130f9ece07f..0c49f240d91 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md @@ -13,21 +13,15 @@ doc_type: 'guide' |------|------|-----------------------------------| | ✗ | ✔ | `PrettyJSONLines`, `PrettyNDJSON` | - - ## 説明 {#description} [JSONEachRow](./JSONEachRow.md) と異なる点は、JSON が改行区切りおよび 4 つのスペースによるインデントを付けて整形されていることだけです。 - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} 次のデータを含む JSON ファイル `football.json` を用意します。 - - ```json { "date": "2022-04-30", @@ -185,7 +179,6 @@ FORMAT PrettyJSONEachRow 出力は JSON 形式です。 - ```json { "date": "2022-04-30", @@ -327,6 +320,4 @@ FORMAT PrettyJSONEachRow - - ## 形式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md index a3ed834bfe5..0d67d4b1360 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} `LineAsString` フォーマットは、入力データの各行を 1 つの文字列値として解釈します。 このフォーマットは、[String](/sql-reference/data-types/string.md) 型の単一フィールドだけを持つテーブルでのみ使用できます。 残りのカラムは、[`DEFAULT`](/sql-reference/statements/create/table.md/#default)、[`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view) に設定するか、省略する必要があります。 - - ## 使用例 {#example-usage} ```sql title="Query" @@ -38,5 +34,4 @@ SELECT * FROM line_as_string; └───────────────────────────────────────────────────┘ ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md index 2575cd5c39b..1aae82d3fc1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} `LineAsStringWithNames` フォーマットは、[`LineAsString`](./LineAsString.md) フォーマットに似ていますが、列名を含むヘッダー行を出力します。 - - ## 使用例 {#example-usage} ```sql title="Query" @@ -42,5 +38,4 @@ Jane 25 Peter 35 ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md index c6a776acb80..ae8a9a37383 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} `LineAsStringWithNames` フォーマットは [`LineAsString`](./LineAsString.md) フォーマットに似ていますが、 2 行のヘッダー行を出力します。1 行目は列名、2 行目は型です。 - - ## 使用例 {#example-usage} ```sql @@ -44,5 +40,4 @@ Jane 25 Peter 35 ``` - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md index 5e7e48c3b70..ebd4329f9b0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md @@ -11,16 +11,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | `MD` | - - ## 説明 {#description} 結果を [Markdown](https://en.wikipedia.org/wiki/Markdown) 形式でエクスポートし、`.md` ファイルにそのまま貼り付けられる出力を生成できます。 Markdown 形式のテーブルは自動的に生成され、GitHub などの Markdown 対応プラットフォームで利用できます。この形式は出力専用です。 - - ## 使用例 {#example-usage} ```sql @@ -41,5 +37,4 @@ FORMAT Markdown | 4 | 8 | ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md index 4caebd65c13..30dccf1a03c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} ClickHouse は [MessagePack](https://msgpack.org/) データファイルの読み書きをサポートしています。 - - ## データ型の対応 {#data-types-matching} | MessagePack データ型(`INSERT`) | ClickHouse のデータ型 | MessagePack データ型(`SELECT`) | @@ -46,8 +42,6 @@ ClickHouse は [MessagePack](https://msgpack.org/) データファイルの読 | `int 64` | [`Decimal64`](/sql-reference/data-types/decimal.md) | `int 64` | | `bin 8` | [`Decimal128`/`Decimal256`](/sql-reference/data-types/decimal.md) | `bin 8` | - - ## 使用例 {#example-usage} 「.msgpk」ファイルへの書き込み: @@ -58,7 +52,6 @@ $ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 2 $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; ``` - ## フォーマット設定 {#format-settings} | 設定 | 説明 | 既定値 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md index feea1452ead..c8d9707dd4d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|---------|-------| | ✔ | ✗ | | - - ## 説明 {#description} ClickHouse は MySQL の [ダンプ](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html) の読み取りをサポートしています。 @@ -26,8 +24,6 @@ ClickHouse は MySQL の [ダンプ](https://dev.mysql.com/doc/refman/8.0/en/mys この形式はスキーマ推論をサポートします。ダンプに指定されたテーブルに対する `CREATE` クエリが含まれている場合は、そのクエリからテーブル構造を推論し、含まれていない場合は `INSERT` クエリのデータからスキーマを推論します。 ::: - - ## 使用例 {#example-usage} 次の SQL ダンプファイルがあるとします。 @@ -84,7 +80,6 @@ SETTINGS input_format_mysql_dump_table_name = 'test2' └───┘ ``` - ## フォーマット設定 {#format-settings} [`input_format_mysql_dump_table_name`](/operations/settings/settings-formats.md/#input_format_mysql_dump_table_name) 設定を使用して、データの読み取り元となるテーブル名を指定できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md index b9de218c80a..cf6467c7a75 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} `Npy` 形式は、`.npy` ファイルから NumPy 配列を ClickHouse に読み込むために設計されています。 @@ -23,8 +21,6 @@ NumPy のファイル形式は、数値データの配列を効率的に保存 下表は、サポートされている Npy データ型と、それに対応する ClickHouse の型を示します。 - - ## データ型の対応 {#data_types-matching} | Npy データ型(`INSERT`) | ClickHouse データ型 | Npy データ型(`SELECT`) | @@ -42,8 +38,6 @@ NumPy のファイル形式は、数値データの配列を効率的に保存 | `S`, `U` | [String](/sql-reference/data-types/string.md) | `S` | | | [FixedString](/sql-reference/data-types/fixedstring.md) | `S` | - - ## 使用例 {#example-usage} ### Python を使って配列を .npy 形式で保存する {#saving-an-array-in-npy-format-using-python} @@ -76,5 +70,4 @@ FROM file('example_array.npy', Npy) $ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy} ``` - ## 書式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md index dba8e60b819..5c7bacb0503 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} `Null` フォーマットでは、何も出力されません。 @@ -25,8 +23,6 @@ doc_type: 'reference' `Null` フォーマットは、パフォーマンス測定や性能テストに役立ちます。 ::: - - ## 使用例 {#example-usage} ### データの読み取り {#reading-data} @@ -69,5 +65,4 @@ FORMAT Null 0行のセット。経過時間: 0.154秒 ``` - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md index 6d905be6edc..1282284ee44 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} [Apache ORC](https://orc.apache.org/) は、[Hadoop](https://hadoop.apache.org/) エコシステムで広く使用されている列指向ストレージ形式です。 - - ## データ型の対応関係 {#data-types-matching-orc} 次の表は、`INSERT` および `SELECT` クエリにおいてサポートされる ORC データ型と、それに対応する ClickHouse の [データ型](/sql-reference/data-types/index.md) を比較したものです。 @@ -50,8 +46,6 @@ doc_type: 'reference' - 配列はネスト可能であり、要素として `Nullable` 型の値を取ることができます。`Tuple` および `Map` 型もネスト可能です。 - ClickHouse テーブルの列のデータ型は、対応する ORC データフィールドと一致している必要はありません。データを挿入する際、ClickHouse は上記の表に従ってデータ型を解釈し、その後 ClickHouse テーブルの列に設定されているデータ型へデータを[キャスト](/sql-reference/functions/type-conversion-functions#cast)します。 - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -101,7 +95,6 @@ FORMAT ORC ORC はバイナリ形式のため、ターミナル上で人間が読める形で表示することはできません。`INTO OUTFILE` 句を使用して ORC ファイルとして出力してください。 ::: - ## フォーマット設定 {#format-settings} | 設定 | 説明 | デフォルト | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md index 0e8f1400194..eac1533131b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/One.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 説明 {#description} `One` フォーマットは、ファイルから一切データを読み込まず、[`UInt8`](../../sql-reference/data-types/int-uint.md) 型の `dummy` という名前のカラムを 1 列だけ持つ 1 行(値は `0`)だけを返す、特別な入力フォーマットです(`system.one` テーブルと同様)。 仮想カラム `_file/_path` と組み合わせることで、実際のデータを読み込まずにすべてのファイルを一覧表示するために使用できます。 - - ## 使用例 {#example-usage} 例: @@ -45,5 +41,4 @@ SELECT _file FROM file('path/to/files/data*', One); └──────────────┘ ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md index bbad1b51cf2..c7b06d4648a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md @@ -13,13 +13,9 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} -[Apache Parquet](https://parquet.apache.org/) は、Hadoop エコシステムで広く利用されているカラムナストレージフォーマットです。ClickHouse は、このフォーマットの読み書きをサポートしています。 - - +[Apache Parquet](https://parquet.apache.org/) は、Hadoop エコシステムで広く利用されている列指向ストレージ形式です。ClickHouse は、この形式の読み書きをサポートしています。 ## データ型の対応 {#data-types-matching-parquet} @@ -66,15 +62,12 @@ Parquet ファイルを書き出す際、対応する Parquet 型が存在しな ClickHouse テーブルのカラムのデータ型は、挿入される Parquet データ内の対応するフィールドの型と異なる場合があります。データ挿入時、ClickHouse は上記の表に従ってデータ型を解釈し、その後 ClickHouse テーブルのカラムに設定されているデータ型へ[キャスト](/sql-reference/functions/type-conversion-functions#cast)します。たとえば、`UINT_32` の Parquet カラムは [IPv4](/sql-reference/data-types/ipv4.md) 型の ClickHouse カラムとして読み取ることができます。 +一部の Parquet 型には、近い ClickHouse 型が存在しません。これらは次のように読み取ります。 - -一部の Parquet 型には、対応する ClickHouse 型が存在しません。それらは次のように読み取られます。 * `TIME`(時刻)は `timestamp` として読み取られます。例: `10:23:13.000` は `1970-01-01 10:23:13.000` になります。 * `isAdjustedToUTC=false` の `TIMESTAMP`/`TIME` はローカルのウォールクロック時刻(どのタイムゾーンをローカルとみなすかにかかわらず、ローカルタイムゾーンにおける年・月・日・時・分・秒およびサブ秒フィールド)であり、SQL の `TIMESTAMP WITHOUT TIME ZONE` と同じです。ClickHouse はこれを、あたかも UTC の `timestamp` であるかのように読み取ります。例: `2025-09-29 18:42:13.000`(ローカルの時計の読み値を表す)は `2025-09-29 18:42:13.000`(ある時点を表す `DateTime64(3, 'UTC')`)になります。String に変換すると、年・月・日・時・分・秒およびサブ秒は正しい値として表示され、それを UTC ではなく何らかのローカルタイムゾーンの時刻として解釈できます。直感に反して、型を `DateTime64(3, 'UTC')` から `DateTime64(3)` に変更しても状況は改善しません。どちらの型も時計の読み値ではなくある時点を表すためですが、`DateTime64(3)` はローカルタイムゾーンを用いて誤ってフォーマットされてしまいます。 * `INTERVAL` は現在、Parquet ファイル内でエンコードされているとおりの時間間隔の生のバイナリ表現を持つ `FixedString(12)` として読み取られます。 - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -103,12 +96,13 @@ ClickHouse テーブルのカラムのデータ型は、挿入される Parquet └────────────┴────────┴───────────────────────┴─────────────────────┴─────────────────┴─────────────────┘ ``` -データを挿入する: +データを挿入します: ```sql INSERT INTO football FROM INFILE 'football.parquet' FORMAT Parquet; ``` + ### データの読み込み {#reading-data} `Parquet` 形式でデータを読み込みます。 @@ -129,33 +123,30 @@ Hadoop とデータを交換するには、[`HDFS table engine`](/engines/table- ## フォーマット設定 {#format-settings} - - -| 設定 | 概要 | デフォルト | -| ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_format_parquet_case_insensitive_column_matching` | Parquet の列と CH の列を照合する際に、大文字と小文字を区別しないようにします。 | `0` | -| `input_format_parquet_preserve_order` | Parquet ファイルを読み取る際に行の並べ替えは行わないでください。通常、処理が大幅に遅くなります。 | `0` | -| `input_format_parquet_filter_push_down` | Parquet ファイルを読み込む際、WHERE/PREWHERE 句と Parquet メタデータ内の最小値/最大値の統計量に基づいて、行グループ全体をスキップします。 | `1` | -| `input_format_parquet_bloom_filter_push_down` | Parquet ファイルを読み取る際、WHERE 句の条件式と Parquet メタデータ内のブルームフィルターに基づいて、行グループ全体をスキップします。 | `0` | -| `input_format_parquet_use_native_reader` | Parquet ファイルの読み込み時に、Arrow リーダーではなくネイティブリーダーを使用します。 | `0` | -| `input_format_parquet_allow_missing_columns` | Parquet 入力フォーマット読み込み時に欠落列を許可する | `1` | -| `input_format_parquet_local_file_min_bytes_for_seek` | Parquet 入力形式で、無視しながら読み進めるのではなくシークを行うために必要なローカル読み取り(ファイル)の最小バイト数 | `8192` | -| `input_format_parquet_enable_row_group_prefetch` | Parquet のパース中に行グループのプリフェッチを有効にします。現在は、単一スレッドでのパース時にのみプリフェッチが可能です。 | `1` | -| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | Parquet 形式でのスキーマ推論時に、未サポートの型の列をスキップする | `0` | -| `input_format_parquet_max_block_size` | Parquet リーダーにおけるブロックサイズの最大値。 | `65409` | -| `input_format_parquet_prefer_block_bytes` | Parquet リーダーの出力ブロックの平均バイト数 | `16744704` | -| `input_format_parquet_enable_json_parsing` | Parquet ファイルを読み込む際は、JSON 列を ClickHouse の JSON カラムとしてパースします。 | `1` | -| `output_format_parquet_row_group_size` | 行数で指定する行グループの目標サイズ。 | `1000000` | -| `output_format_parquet_row_group_size_bytes` | 圧縮前の行グループの目標サイズ(バイト単位)。 | `536870912` | -| `output_format_parquet_string_as_string` | String 列には Binary ではなく Parquet の String 型を使用してください。 | `1` | -| `output_format_parquet_fixed_string_as_fixed_byte_array` | FixedString 列には Binary 型ではなく、Parquet の FIXED_LEN_BYTE_ARRAY 型を使用してください。 | `1` | -| `output_format_parquet_version` | 出力フォーマットに使用する Parquet フォーマットのバージョン。サポートされているバージョン: 1.0、2.4、2.6、および 2.latest(デフォルト)。 | `2.latest` | -| `output_format_parquet_compression_method` | Parquet 出力フォーマットの圧縮方式。サポートされるコーデック:snappy、lz4、brotli、zstd、gzip、none(非圧縮) | `zstd` | -| `output_format_parquet_compliant_nested_types` | Parquet ファイルのスキーマでは、リスト要素には 'item' ではなく 'element' という名前を使用します。これは Arrow ライブラリの実装に起因する歴史的な経緯です。一般的には互換性が向上しますが、一部の古いバージョンの Arrow とは互換性がない可能性があります。 | `1` | -| `output_format_parquet_use_custom_encoder` | より高速な Parquet エンコーダー実装を使用する。 | `1` | -| `output_format_parquet_parallel_encoding` | 複数スレッドで Parquet エンコードを行います。output_format_parquet_use_custom_encoder を有効にする必要があります。 | `1` | -| `output_format_parquet_data_page_size` | 圧縮前のターゲットページのサイズ(バイト単位)。 | `1048576` | -| `output_format_parquet_batch_size` | この行数ごとにページサイズをチェックします。値の平均サイズが数KBを超える列がある場合は、この値を小さくすることを検討してください。 | `1024` | -| `output_format_parquet_write_page_index` | Parquet ファイルにページインデックスを書き込む機能を追加します。 | `1` | -| `input_format_parquet_import_nested` | この設定は廃止されており、指定しても何の効果もありません。 | `0` | -| `input_format_parquet_local_time_as_utc` | true | isAdjustedToUTC=false の Parquet タイムスタンプに対して、スキーマ推論時に使用されるデータ型を決定します。true の場合は DateTime64(..., 'UTC')、false の場合は DateTime64(...) になります。ClickHouse にはローカルの壁時計時刻を表すデータ型がないため、どちらの動作も完全には正しくありません。直感に反して、'true' の方がまだ誤りが少ない選択肢と考えられます。これは、'UTC' タイムスタンプを String としてフォーマットすると、正しいローカル時刻を表す文字列表現が得られるためです。 | +| 設定 | 概要 | デフォルト | +| ------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_format_parquet_case_insensitive_column_matching` | Parquet の列と CH の列を照合する際に大文字と小文字を区別しません。 | `0` | +| `input_format_parquet_preserve_order` | Parquet ファイルを読み込む際に行の順序を変更しないようにします。通常、処理が大幅に遅くなります。 | `0` | +| `input_format_parquet_filter_push_down` | Parquet ファイルを読み込む際、WHERE/PREWHERE 句と Parquet メタデータ内の最小値/最大値の統計量に基づいて、行グループ全体をスキップします。 | `1` | +| `input_format_parquet_bloom_filter_push_down` | Parquet ファイルを読み込む際、WHERE 句と Parquet メタデータ内のブルームフィルターに基づいて行グループ全体をスキップします。 | `0` | +| `input_format_parquet_allow_missing_columns` | Parquet 入力フォーマットを読み込む際に、存在しないカラムを許可する | `1` | +| `input_format_parquet_local_file_min_bytes_for_seek` | Parquet 入力フォーマットで、データを無視しつつ順次読み込むのではなくシークを行うために必要なローカルファイル読み取りの最小バイト数 | `8192` | +| `input_format_parquet_enable_row_group_prefetch` | Parquet の解析時に行グループのプリフェッチを有効にします。現在は単一スレッドでの解析時にのみプリフェッチが行えます。 | `1` | +| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | Parquet 形式のスキーマ推論時に、サポートされていない型を持つ列をスキップします。 | `0` | +| `input_format_parquet_max_block_size` | Parquet リーダーの最大ブロックサイズ | `65409` | +| `input_format_parquet_prefer_block_bytes` | Parquet リーダーから出力されるブロックの平均サイズ(バイト単位) | `16744704` | +| `input_format_parquet_enable_json_parsing` | Parquet ファイルを読み込む際は、JSON 列を ClickHouse の JSON カラムとしてパースします。 | `1` | +| `output_format_parquet_row_group_size` | 行グループの目標サイズ(行数単位)。 | `1000000` | +| `output_format_parquet_row_group_size_bytes` | 圧縮前のターゲット行グループのサイズ(バイト単位)。 | `536870912` | +| `output_format_parquet_string_as_string` | String 列には Binary 型ではなく、Parquet の String 型を使用してください。 | `1` | +| `output_format_parquet_fixed_string_as_fixed_byte_array` | FixedString 列には Binary ではなく Parquet の FIXED_LEN_BYTE_ARRAY 型を使用してください。 | `1` | +| `output_format_parquet_version` | 出力フォーマットで使用する Parquet フォーマットのバージョンです。サポートされているバージョンは 1.0、2.4、2.6、および 2.latest(デフォルト)です。 | `2.latest` | +| `output_format_parquet_compression_method` | Parquet 出力フォーマットの圧縮方式。サポートされるコーデック:snappy、lz4、brotli、zstd、gzip、none(非圧縮) | `zstd` | +| `output_format_parquet_compliant_nested_types` | Parquet ファイルのスキーマでは、リスト要素には 'item' ではなく 'element' という名前を使用します。これは Arrow ライブラリの実装に起因する歴史的な経緯によるものです。一般的には互換性が向上しますが、一部の古いバージョンの Arrow とは互換性がない可能性があります。 | `1` | +| `output_format_parquet_use_custom_encoder` | より高速な Parquet エンコーダー実装を使用します。 | `1` | +| `output_format_parquet_parallel_encoding` | 複数スレッドで Parquet エンコードを行う。output_format_parquet_use_custom_encoder を有効にする必要がある。 | `1` | +| `output_format_parquet_data_page_size` | 圧縮前のページの目標サイズ(バイト単位)。 | `1048576` | +| `output_format_parquet_batch_size` | 指定した行数ごとにページサイズをチェックします。カラム内の値の平均サイズが数 KB を超える場合は、この値を小さくすることを検討してください。 | `1024` | +| `output_format_parquet_write_page_index` | Parquet ファイルにページインデックスを書き込めるようにします。 | `1` | +| `input_format_parquet_import_nested` | この設定は廃止されており、指定しても何の効果もありません。 | `0` | +| `input_format_parquet_local_time_as_utc` | true | isAdjustedToUTC=false の Parquet タイムスタンプに対して、スキーマ推論時に使用されるデータ型を決定します。true の場合は DateTime64(..., 'UTC')、false の場合は DateTime64(...) になります。ClickHouse にはローカルの壁時計時刻を表すデータ型がないため、どちらの動作も完全には正しくありません。直感に反して、'true' の方がまだ誤りが少ない選択肢と考えられます。これは、'UTC' タイムスタンプを String 型としてフォーマットすると、正しいローカル時刻を表す文字列表現が得られるためです。 | \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md index f4ff2c9f6ff..957d88d1973 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md @@ -6,8 +6,6 @@ title: 'ParquetMetadata' doc_type: 'reference' --- - - ## 説明 {#description} Parquet ファイルメタデータ (https://parquet.apache.org/docs/file-format/metadata/) を読み取るための特別なフォーマットです。常に次の構造/内容を持つ 1 行を出力します: @@ -47,8 +45,6 @@ Parquet ファイルメタデータ (https://parquet.apache.org/docs/file-format - `min` - カラムチャンクの最小値 - `max` - カラムチャンクの最大値 - - ## 使用例 {#example-usage} 例: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md index f851d9b90f9..c2a46e5ca64 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md @@ -15,7 +15,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | -- | -- | ----- | | ✗ | ✔ | | - ## 説明 {#description} `Pretty` フォーマットは、Unicode アートによるテーブルとしてデータを出力し、 @@ -26,8 +25,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; [NULL](/sql-reference/syntax.md) は `ᴺᵁᴸᴸ` として出力されます。 - - ## 使用例 {#example-usage} 例([`PrettyCompact`](./PrettyCompact.md) 形式の場合): @@ -97,7 +94,6 @@ FORMAT PrettyCompact └────────────┴─────────┘ ``` - ## 書式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md index a510183028a..16e95074ad5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md @@ -15,14 +15,11 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | -- | -- | -- | | ✗ | ✔ | | - ## 説明 {#description} [Pretty](/interfaces/formats/Pretty) とは異なり、[ANSI エスケープシーケンス](http://en.wikipedia.org/wiki/ANSI_escape_code) を使用しません。 これは、この形式をブラウザで表示したり、`watch` コマンドラインユーティリティで使用したりするために必要です。 - - ## 使用例 {#example-usage} 例: @@ -35,7 +32,6 @@ $ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events F [HTTP インターフェイス](../../../interfaces/http.md)を使用して、この形式をブラウザで表示できます。 ::: - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md index 0cd43581f32..afbc01b23f9 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md @@ -95,7 +95,6 @@ Enum(および Enum8 または Enum16)は、`oneof` で取り得るすべて ClickHouse は、`length-delimited` 形式で protobuf メッセージを入力および出力します。 これは、各メッセージの前に、その長さを [可変長整数 (varint)](https://developers.google.com/protocol-buffers/docs/encoding#varints) として書き込む必要があることを意味します。 - ## 使用例 {#example-usage} ### データの読み取りと書き込み {#basic-examples} @@ -119,7 +118,6 @@ message MessageType { }; ``` -
バイナリファイルの生成 @@ -248,7 +246,6 @@ ENGINE = MergeTree() ORDER BY tuple() ``` - コマンドラインからテーブルにデータを挿入します: ```bash @@ -263,8 +260,7 @@ SELECT * FROM test.protobuf_messages INTO OUTFILE 'protobuf_message_from_clickho Protobuf スキーマを使用して、ClickHouse からファイル `protobuf_message_from_clickhouse.bin` に書き出されたデータをデシリアライズできます。 - -### ClickHouse Cloud を使用したデータの読み取りと書き込み +### ClickHouse Cloud を使用したデータの読み取りと書き込み {#basic-examples-cloud} ClickHouse Cloud では、Protobuf スキーマファイルをアップロードすることはできません。ただし、クエリ内でスキーマを指定するために `format_protobuf_schema` 設定を使用できます。この例では、ローカルマシン上のシリアル化されたデータを読み取り、ClickHouse Cloud のテーブルに挿入する方法を示します。 @@ -291,8 +287,7 @@ ORDER BY tuple() * 'string': `format_schema` はスキーマ内容そのもの(リテラル)です。 * 'query': `format_schema` はスキーマを取得するためのクエリです。 - -### `format_schema_source='string'` +### `format_schema_source='string'` {#format-schema-source-string} スキーマを文字列として指定して ClickHouse Cloud にデータを挿入するには、次のコマンドを実行します: @@ -312,8 +307,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### `format_schema_source='query'` +### `format_schema_source='query'` {#format-schema-source-query} Protobuf スキーマをテーブルに保存することもできます。 @@ -349,8 +343,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### 自動生成スキーマの利用 +### 自動生成スキーマの利用 {#using-autogenerated-protobuf-schema} データ用の外部 Protobuf スキーマがない場合でも、自動生成されたスキーマを利用することで、Protobuf 形式でデータを出力/入力できます。 この場合は `format_protobuf_use_autogenerated_schema` 設定を使用します。 @@ -379,7 +372,6 @@ SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerat この場合、自動生成された Protobuf スキーマは `path/to/schema/schema.capnp` というファイルに保存されます。 - ### Protobuf キャッシュの削除 {#basic-examples-cloud} [`format_schema_path`](/operations/server-configuration-parameters/settings.md/#format_schema_path) から読み込まれた Protobuf スキーマを再読み込むには、[`SYSTEM DROP ... FORMAT CACHE`](/sql-reference/statements/system.md/#system-drop-schema-format) ステートメントを使用します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md index f6c3c6f03df..f0e4f1aca40 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md @@ -17,13 +17,10 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | -- | -- | ----- | | ✔ | ✔ | | - ## 説明 {#description} `ProtobufList` フォーマットは [`Protobuf`](./Protobuf.md) フォーマットと似ていますが、行は固定名「Envelope」を持つメッセージ内に含まれるサブメッセージの列として表現されます。 - - ## 使用例 {#example-usage} 例えば、次のようにします。 @@ -51,5 +48,4 @@ message Envelope { }; ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md index d4766e30eac..8aaea9c5d5d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md @@ -6,8 +6,6 @@ title: 'RawBLOB' doc_type: 'reference' --- - - ## 説明 {#description} `RawBLOB` 形式は、すべての入力データを単一の値として読み取ります。これは、[`String`](/sql-reference/data-types/string.md) 型またはそれに類似した単一フィールドのみを持つテーブルだけをパースできます。 @@ -45,7 +43,6 @@ doc_type: 'reference' コード: 108. DB::Exception: 挿入するデータがありません ``` - ## 使用例 {#example-usage} ```bash title="Query" @@ -58,5 +55,4 @@ $ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum f9725a22f9191e064120d718e26862a9 - ``` - ## フォーマットの設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md index e1fd0b9e41e..9d1d6c0085d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 説明 {#description} `Regex` フォーマットは、指定された正規表現に従って、インポートされたデータの各行をパースします。 @@ -29,8 +27,6 @@ doc_type: 'reference' 正規表現が行にマッチせず、かつ [format_regexp_skip_unmatched](/operations/settings/settings-formats.md/#format_regexp_escaping_rule) が 1 に設定されている場合、その行は何の通知もなくスキップされます。そうでない場合は、例外がスローされます。 - - ## 使用例 {#example-usage} `data.tsv` というファイルがあるとします。 @@ -67,7 +63,6 @@ SELECT * FROM imp_regex_table; └────┴─────────┴────────┴────────────┘ ``` - ## フォーマット設定 {#format-settings} `Regexp` フォーマットを使用する場合、次の設定を使用できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md index 2928d241dbb..d614dff387d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md @@ -15,13 +15,10 @@ import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settin | -- | -- | ----- | | ✔ | ✗ | | - ## 説明 {#description} [`RowBinary`](./RowBinary.md) 形式と似ていますが、各列の前に 1 バイトが追加され、そのバイトでデフォルト値を使用するかどうかを示します。 - - ## 使用例 {#example-usage} 例: @@ -39,7 +36,6 @@ SELECT * FROM FORMAT('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x * 列 `x` には、デフォルト値を使用すべきことを示す 1 バイトの `01` だけがあり、このバイト以降には一切データがありません。 * 列 `y` では、データはバイト `00` から始まっており、これは列に実際の値が存在し、その値を後続のデータ `01000000` から読み取る必要があることを示します。 - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md index 931b6f31daf..349d3c42170 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} データを `INSERT INTO table (columns...) VALUES (...), (...) ...;` 文の連続として出力します。 - - ## 使用例 {#example-usage} 例: @@ -39,7 +35,6 @@ INSERT INTO table (x, y, z) VALUES (8, 9, 'こんにちは'), (9, 10, 'こんに このフォーマットで出力されたデータを読み取るには、[MySQLDump](../formats/MySQLDump.md) 入力フォーマットを使用できます。 - ## フォーマット設定 {#format-settings} | 設定 | 説明 | デフォルト | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md index d51d4ef0291..09d9b64195d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} [`TabSeparated`](./TabSeparated.md) フォーマットと似ていますが、値を `name=value` 形式で出力します。 @@ -57,7 +55,6 @@ x=1 y=\N [NULL](/sql-reference/syntax.md) は `\N` としてフォーマットされます。 - ## 利用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -102,7 +99,6 @@ FORMAT TSKV 出力は、列名と型を示す 2 行のヘッダー付きのタブ区切り形式になります。 - ```tsv date=2022-04-30 season=2021 home_team=Sutton United away_team=Bradford City home_team_goals=1 away_team_goals=4 date=2022-04-30 season=2021 home_team=Swindon Town away_team=Barrow home_team_goals=2 away_team_goals=1 @@ -123,5 +119,4 @@ date=2022-05-07 season=2021 home_team=Stevenage Borough away_team=Salfor date=2022-05-07 season=2021 home_team=Walsall away_team=Swindon Town home_team_goals=0 away_team_goals=3 ``` - ## フォーマットの設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md index 5db4351a2af..5ea9569595a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------| | ✔ | ✔ | `TSV` | - - ## 説明 {#description} TabSeparated フォーマットでは、データは行単位で書き込まれます。各行には、タブで区切られた値が含まれます。各値の後にはタブが続きますが、行の最後の値の後にはタブではなく改行コードが続きます。改行コードはいずれも Unix スタイルであることを前提とします。最後の行の末尾にも改行コードが付いていなければなりません。値はテキスト形式で、引用符で囲まず、特殊文字はエスケープして書き込まれます。 @@ -42,7 +40,6 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 2014-03-23 1406958 ``` - ## データの書式設定 {#tabseparated-data-formatting} 整数は 10 進数形式で記述されます。数値は先頭に追加の「+」記号を含むことができます(パース時には無視され、書式化時には出力されません)。非負の数値には負号を含めることはできません。読み取り時には、空文字列をゼロとしてパースすること、または(符号付き型の場合)マイナス記号だけから成る文字列をゼロとしてパースすることが許可されています。対応するデータ型に収まらない数値は、エラーを出さずに別の数値としてパースされる場合があります。 @@ -108,7 +105,6 @@ SELECT * FROM nestedt FORMAT TSV 1 [1] ['a'] ``` - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -173,7 +169,6 @@ FORMAT TabSeparated 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## フォーマット設定 {#format-settings} | Setting | Description | Default | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md index ee81f7168da..19d95279452 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|------|------------------| | ✔ | ✔ | `TSVRaw`, `Raw` | - - ## 説明 {#description} このフォーマットは [`TabSeparated`](/interfaces/formats/TabSeparated) フォーマットと異なり、行をエスケープせずに書き込みます。 @@ -25,8 +23,6 @@ doc_type: 'reference' `TabSeparatedRaw` フォーマットと `RawBlob` フォーマットの比較については、[Raw フォーマットの比較](../RawBLOB.md/#raw-formats-comparison) を参照してください。 - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -91,5 +87,4 @@ FORMAT TabSeparatedRaw 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md index 7273321547e..70d1345dbdb 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|------|-----------------------------------| | ✔ | ✔ | `TSVRawWithNames`, `RawWithNames` | - - ## 説明 {#description} このフォーマットは、行がエスケープ処理されずに書き込まれるという点で、[`TabSeparatedWithNames`](./TabSeparatedWithNames.md) フォーマットとは異なります。 @@ -23,8 +21,6 @@ doc_type: 'reference' このフォーマットで解析する際、各フィールド内でタブや改行文字は使用できません。 ::: - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -91,5 +87,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md index c7f7914dae3..3aaef098940 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|------|---------------------------------------------------| | ✔ | ✔ | `TSVRawWithNamesAndNames`, `RawWithNamesAndNames` | - - ## 説明 {#description} この形式は、行がエスケープ処理なしで書き出されるという点で、[`TabSeparatedWithNamesAndTypes`](./TabSeparatedWithNamesAndTypes.md) 形式と異なります。 @@ -23,8 +21,6 @@ doc_type: 'reference' この形式でパースする場合、各フィールド内にタブや改行文字を含めることはできません。 ::: - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -71,7 +67,6 @@ FORMAT TabSeparatedRawWithNamesAndTypes 出力は、列名と型を示す 2 行のヘッダー行を持つタブ区切り形式になります。 - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -94,5 +89,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 書式設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md index c8ccdd9f720..ee2eaff5fe0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------------------------------| | ✔ | ✔ | `TSVWithNames`, `RawWithNames` | - - ## 説明 {#description} 最初の行に列名が記載されている点が、[`TabSeparated`](./TabSeparated.md) 形式と異なります。 @@ -27,8 +25,6 @@ doc_type: 'reference' それ以外の場合、最初の行はスキップされます。 ::: - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -95,5 +91,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 書式設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md index 697bf3dea2d..3761eb78527 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md @@ -10,8 +10,6 @@ doc_type: 'reference' |-------|--------|------------------------------------------------| | ✔ | ✔ | `TSVWithNamesAndTypes`, `RawWithNamesAndTypes` | - - ## 説明 {#description} [`TabSeparated`](./TabSeparated.md) 形式とは異なり、最初の行にはカラム名が、2 行目にはカラム型が書き込まれます。 @@ -24,8 +22,6 @@ doc_type: 'reference' 入力データの型はテーブル内の対応するカラムの型と比較されます。それ以外の場合、2 行目はスキップされます。 ::: - - ## 使用例 {#example-usage} ### データの挿入 {#inserting-data} @@ -72,7 +68,6 @@ FORMAT TabSeparatedWithNamesAndTypes 出力はタブ区切り形式となり、列名と型を表す 2 行のヘッダーが付きます。 - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -95,5 +90,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md index e1bc070c87f..1079e65cf8b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md @@ -13,8 +13,6 @@ doc_type: 'guide' |-------|--------|-------| | ✔ | ✔ | | - - ## 説明 {#description} 他の標準フォーマットでは対応できない、より高度なカスタマイズが必要な場合に、 @@ -32,8 +30,6 @@ doc_type: 'guide' | `format_template_resultset_format` | 結果セットのフォーマット文字列を[インライン](#inline_specification)で指定します。 | | 他のフォーマットの一部の設定(例: `JSON` エスケープを使用する場合の `output_format_json_quote_64bit_integers` | | - - ## 設定とエスケープ規則 {#settings-and-escaping-rules} ### format_template_row {#format_template_row} @@ -115,7 +111,6 @@ Where: `format_template_resultset` 設定が空文字列の場合、デフォルト値として `${data}` が使用されます。 ::: - 挿入クエリでは、先頭または末尾を省略する場合(例を参照)、一部の列やフィールドをスキップできるフォーマットを利用できます。 ### インライン指定 {#inline_specification} @@ -131,8 +126,6 @@ Where: - `format_template_resultset_format` を使用する場合は [`format_template_resultset`](#format_template_resultset)。 ::: - - ## 使用例 {#example-usage} まずは `Template` 形式の利用例として、データの選択と挿入の 2 つのケースを見ていきます。 @@ -216,7 +209,6 @@ FORMAT Template Markdown テーブルを手作業で整形するのにうんざりしていませんか?この例では、`Template` フォーマットとインライン指定の設定を使って、簡単なタスクをどのように実現できるかを見ていきます。ここでは、`system.formats` テーブルからいくつかの ClickHouse フォーマット名を `SELECT` し、それらを Markdown テーブルとして整形します。これは、`Template` フォーマットと `format_template_row_format` および `format_template_resultset_format` 設定を使うことで容易に実現できます。 - 前の例では、結果セットおよび行フォーマットの文字列を別ファイルに記述し、それらファイルへのパスをそれぞれ `format_template_resultset` および `format_template_row` 設定で指定しました。ここではテンプレートがごく単純で、Markdown テーブルを作るためのいくつかの `|` と `-` だけで構成されるため、インラインで指定します。結果セットのテンプレート文字列は、`format_template_resultset_format` 設定を使って指定します。テーブルヘッダを作るために、`${data}` の前に `|ClickHouse Formats|\n|---|\n` を追加しています。行に対しては、`format_template_row_format` 設定を使用し、テンプレート文字列 ``|`{0:XML}`|`` を指定します。`Template` フォーマットは、指定したフォーマットで整形した行をプレースホルダ `${data}` に挿入します。この例ではカラムは 1 つだけですが、もし追加したい場合は、行テンプレート文字列に `{1:XML}`、`{2:XML}` ... のように追記し、適切なエスケープルールを選択すればかまいません。この例ではエスケープルールとして `XML` を使用しています。 ```sql title="Query" diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md index fb54b474462..be7570bfc22 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 説明 {#description} [`Template`] と似ていますが、入力ストリーム内のデリミタと値の間にある空白文字をスキップします。 @@ -27,8 +25,6 @@ doc_type: 'reference' このフォーマットは入力専用です。 ::: - - ## 使用例 {#example-usage} 以下のリクエストを使用すると、[JSON](/interfaces/formats/JSON) 形式の出力例からデータを挿入できます。 @@ -50,5 +46,4 @@ FORMAT TemplateIgnoreSpaces {${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} ``` - ## フォーマット設定 {#format-settings} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md index 7ca77cbe1df..a146f517568 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} 各値を、指定された列名とともに個別の行に出力します。この形式は、各行が多数の列で構成されている場合に、1 行または少数の行だけを出力するのに便利です。 [`NULL`](/sql-reference/syntax.md) は、文字列値 `NULL` と値が存在しないことを区別しやすくするために `ᴺᵁᴸᴸ` として出力されることに注意してください。JSON 列は整形して表示され、`NULL` は `null` として出力されます。これは有効な JSON 値であり、`"null"` と容易に区別できるためです。 - - ## 使用例 {#example-usage} 例: @@ -53,5 +49,4 @@ test: 'quotes' を含む文字列と いくつかの特殊 この形式はクエリ結果の出力にのみ適しており、パース(テーブルへの挿入用にデータを取得する処理)には適していません。 - ## フォーマット設定 {#format-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md index f28824ee1a7..51f9b7aaf7a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 説明 {#description} `XML` 形式は出力専用であり、入力のパースには使用できません。 @@ -26,8 +24,6 @@ JSON と同様に、不正な UTF-8 シーケンスは置換文字 `�` に置 配列は `HelloWorld...` のように、タプルは `HelloWorld...` のように出力されます。 - - ## 使用例 {#example-usage} 例: @@ -94,9 +90,6 @@ JSON と同様に、不正な UTF-8 シーケンスは置換文字 `�` に置 ``` - ## フォーマット設定 {#format-settings} - - ## XML {#xml} \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md index 1fb2a1a7197..4fdb483bc08 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/grpc.md @@ -7,12 +7,8 @@ title: 'gRPC インターフェイス' doc_type: 'reference' --- - - # gRPC インターフェース {#grpc-interface} - - ## はじめに {#grpc-interface-introduction} ClickHouse は [gRPC](https://grpc.io/) インターフェースをサポートしています。gRPC は、HTTP/2 と [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers) を使用するオープンソースのリモートプロシージャコールシステムです。ClickHouse における gRPC の実装は、次の機能をサポートします。 @@ -28,8 +24,6 @@ ClickHouse は [gRPC](https://grpc.io/) インターフェースをサポート このインターフェースの仕様は [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto) に記載されています。 - - ## gRPC 構成 {#grpc-interface-configuration} gRPC インターフェイスを使用するには、メインの[サーバー構成](../operations/configuration-files.md)で `grpc_port` を設定します。その他の構成オプションについては、以下の例を参照してください。 @@ -66,7 +60,6 @@ gRPC インターフェイスを使用するには、メインの[サーバー ``` - ## 組み込みクライアント {#grpc-client} 提供されている[仕様](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto)に基づき、gRPC がサポートしている任意のプログラミング言語でクライアントを実装できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md index c3953b24848..ce642759f6f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/http.md @@ -11,19 +11,14 @@ doc_type: 'reference' import PlayUI from '@site/static/images/play.png'; import Image from '@theme/IdealImage'; - # HTTP インターフェース {#http-interface} - - ## 前提条件 {#prerequisites} この記事の例を実行するには、次のものが必要です: - 稼働中の ClickHouse サーバーインスタンス - `curl` がインストールされていること。Ubuntu または Debian では `sudo apt install curl` を実行するか、インストール手順については[こちらのドキュメント](https://curl.se/download.html)を参照してください。 - - ## 概要 {#overview} HTTP インターフェイスを使用すると、REST API の形であらゆるプラットフォームやプログラミング言語から ClickHouse を利用できます。HTTP インターフェイスはネイティブインターフェイスよりも機能面では制限がありますが、言語サポートは優れています。 @@ -44,7 +39,6 @@ Ok. あわせて [HTTP 応答コードに関する注意事項](#http_response_codes_caveats) も参照してください。 - ## Web ユーザーインターフェイス {#web-ui} ClickHouse には Web ユーザーインターフェイスが用意されており、以下のアドレスからアクセスできます。 @@ -71,7 +65,6 @@ $ curl 'http://localhost:8123/replicas_status' Ok. ``` - ## HTTP/HTTPS でのクエリ実行 {#querying} HTTP/HTTPS 経由でクエリを実行する方法は次の 3 つです。 @@ -165,7 +158,6 @@ ECT 1 wget -nv -O- 'http://localhost:8123/?query=SELECT 1, 2, 3 FORMAT JSON' ``` - ```response title="Response" { "meta": @@ -223,7 +215,6 @@ $ curl -X POST -F 'query=select {p1:UInt8} + {p2:UInt8}' -F "param_p1=3" -F "par 7 ``` - ## HTTP/HTTPS 経由での INSERT クエリ {#insert-queries} `INSERT` クエリでは、データ送信に `POST` メソッドが必要です。この場合、クエリの先頭部分を URL パラメータに記述し、挿入するデータ本体を POST メソッドで送信できます。挿入するデータとしては、例えば MySQL からのタブ区切りダンプなどが利用できます。この方法では、`INSERT` クエリによって MySQL の `LOAD DATA LOCAL INFILE` と同等の処理を行えます。 @@ -290,7 +281,6 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- データテーブルを返さない成功したリクエストでは、空のレスポンスボディが返されます。 - ## 圧縮 {#compression} 大量のデータを送信する際のネットワークトラフィックを削減したり、その場で圧縮済みのダンプを作成したりするために、圧縮を使用できます。 @@ -322,8 +312,6 @@ ClickHouse にレスポンスを圧縮させるには、リクエストに `Acce 一部の HTTP クライアントは、デフォルトで(`gzip` および `deflate` を用いて)サーバーからのデータを自動的に解凍する場合があり、その場合は圧縮設定を正しく使用していても、解凍済みのデータを受け取ることがあります。 ::: - - ## 例 {#examples-compression} 圧縮データをサーバーに送信するには: @@ -355,7 +343,6 @@ curl -sS "http://localhost:8123/?enable_http_compression=1" \ 2 ``` - ## デフォルトデータベース {#default-database} デフォルトデータベースを指定するには、`database` URL パラメータまたは `X-ClickHouse-Database` ヘッダーを使用できます。 @@ -376,7 +363,6 @@ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?databa 既定では、サーバー設定で登録されているデータベースが既定のデータベースとして使用されます。インストール直後の状態では、これは `default` という名前のデータベースです。あるいは、テーブル名の前にドットを付けてデータベース名を明示的に指定することもできます。 - ## 認証 {#authentication} ユーザー名とパスワードは、次の3つの方法のいずれかで指定できます。 @@ -437,7 +423,6 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 * [設定](/operations/settings/settings) * [SET](/sql-reference/statements/set) - ## HTTP プロトコルでの ClickHouse セッションの使用 {#using-clickhouse-sessions-in-the-http-protocol} ClickHouse セッションは HTTP プロトコルでも使用できます。そのためには、リクエストに `session_id` の `GET` パラメータを追加する必要があります。セッション ID には任意の文字列を指定できます。 @@ -479,7 +464,6 @@ X-ClickHouse-Progress: {"read_rows":"1000000","read_bytes":"8000000","total_rows HTTP インターフェイスでは、クエリ用に外部データ(外部一時テーブル)を渡すことができます。詳細は [「External data for query processing」](/engines/table-engines/special/external-data) を参照してください。 - ## レスポンスのバッファリング {#response-buffering} レスポンスのバッファリングはサーバー側で有効化できます。このために、次の URL パラメータが用意されています。 @@ -506,7 +490,6 @@ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wa バッファリングを使用して、レスポンスコードおよび HTTP ヘッダーがクライアントに送信された後にクエリの処理エラーが発生する状況を回避してください。このような場合、エラーメッセージはレスポンスボディの末尾に書き込まれ、クライアント側ではパース処理の段階になって初めてエラーを検知できます。 ::: - ## クエリパラメーターを使用してロールを設定する {#setting-role-with-query-parameters} この機能は ClickHouse 24.4 で追加されました。 @@ -540,7 +523,6 @@ curl -sS "http://localhost:8123?role=my_role&role=my_other_role" --data-binary " この場合、`?role=my_role&role=my_other_role` は、ステートメントを実行する前に `SET ROLE my_role, my_other_role` を実行した場合と同様に動作します。 - ## HTTP レスポンスコードに関する注意点 {#http_response_codes_caveats} HTTP プロトコルの制約上、HTTP 200 のレスポンスコードであっても、クエリが成功したことは保証されません。 @@ -626,7 +608,6 @@ $ curl -v -Ss "http://localhost:8123/?max_block_size=1&query=select+sleepEachRow 0,0 ``` - **例外** rumfyutuqkncbgau Code: 395. DB::Exception: `throwIf` 関数に渡された値がゼロ以外です: `FUNCTION throwIf(equals(__table1.number, 2_UInt8) :: 1) -> throwIf(equals(__table1.number, 2_UInt8)) UInt8 : 0` を実行中に発生しました。 (FUNCTION_THROW_IF_VALUE_IS_NON_ZERO) (version 25.11.1.1) @@ -636,7 +617,6 @@ Code: 395. DB::Exception: `throwIf` 関数に渡された値がゼロ以外で ``` ``` - ## パラメーター付きクエリ {#cli-queries-with-parameters} パラメーター付きのクエリを作成し、対応する HTTP リクエストのパラメーターから値を渡すことができます。詳細については、[CLI 向けパラメーター付きクエリ](../interfaces/cli.md#cli-queries-with-parameters)を参照してください。 @@ -676,7 +656,6 @@ curl -sS "http://localhost:8123?param_arg1=abc%5C%09123" -d "SELECT splitByChar( ['abc','123'] ``` - ## あらかじめ定義された HTTP インターフェイス {#predefined_http_interface} ClickHouse は、HTTP インターフェイス経由で特定のクエリをサポートしています。たとえば、次のようにテーブルにデータを書き込むことができます。 @@ -708,7 +687,6 @@ ClickHouse は、[Prometheus exporter](https://github.com/ClickHouse/clickhouse_ これで、Prometheus 形式のデータを取得するための URL を直接リクエストできます。 - ```bash $ curl -v 'http://localhost:8123/predefined_query' * Trying ::1... @@ -735,25 +713,18 @@ $ curl -v 'http://localhost:8123/predefined_query' "Query" 1 ``` - # HELP "Merge" "実行中のバックグラウンドマージ数" {#help-merge-number-of-executing-background-merges} # TYPE "Merge" counter {#type-merge-counter} "Merge" 0 - - # HELP "PartMutation" "ミューテーション数 (ALTER DELETE/UPDATE)" {#help-partmutation-number-of-mutations-alter-deleteupdate} # TYPE "PartMutation" counter {#type-partmutation-counter} "PartMutation" 0 - - # HELP "ReplicatedFetch" "レプリカから取得中のデータパーツ数" {#help-replicatedfetch-number-of-data-parts-being-fetched-from-replica} # TYPE "ReplicatedFetch" counter {#type-replicatedfetch-counter} "ReplicatedFetch" 0 - - # HELP "ReplicatedSend" "レプリカへ送信中のデータパーツ数" {#help-replicatedsend-number-of-data-parts-being-sent-to-replicas} # TYPE "ReplicatedSend" counter {#type-replicatedsend-counter} @@ -826,7 +797,6 @@ $ curl -v 'http://localhost:8123/predefined_query' 例: ``` - ```yaml @@ -918,7 +888,6 @@ max_final_threads 2 `content_type` の代わりに `http_response_headers` を使用して Content-Type を設定できます。 - ```yaml @@ -1006,7 +975,6 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' クライアントに送信したファイル内の内容を確認するには、次のようにします。 - ```yaml @@ -1103,7 +1071,6 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' ``` - ## HTTP レスポンスヘッダー {#http-response-headers} ClickHouse では、設定可能なあらゆる種類のハンドラーに適用できるカスタム HTTP レスポンスヘッダーを設定できます。これらのヘッダーは、ヘッダー名とその値を表すキーと値のペアを指定する `http_response_headers` 設定を使用して設定します。この機能は、カスタムセキュリティヘッダーや CORS ポリシー、その他 ClickHouse の HTTP インターフェイス全体で必要となる HTTP ヘッダー要件を実装するのに特に有用です。 @@ -1140,7 +1107,6 @@ ClickHouse では、設定可能なあらゆる種類のハンドラーに適用 ``` - ## HTTP ストリーミング中の例外発生時における有効な JSON/XML レスポンス {#valid-output-on-exception-http-streaming} クエリが HTTP 経由で実行されている間に、データの一部がすでに送信された後で例外が発生することがあります。通常、例外はプレーンテキストとしてクライアントに送信されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md index 0ad531b1944..36fe12803f5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/mysql.md @@ -13,7 +13,6 @@ import mysql1 from '@site/static/images/interfaces/mysql1.png'; import mysql2 from '@site/static/images/interfaces/mysql2.png'; import mysql3 from '@site/static/images/interfaces/mysql3.png'; - # MySQL インターフェイス {#mysql-interface} ClickHouse は MySQL ワイヤープロトコルをサポートしています。これにより、ネイティブな ClickHouse コネクタを持たない一部のクライアントでも MySQL プロトコルを代わりに利用でき、次の BI ツールで動作検証が行われています: @@ -36,8 +35,6 @@ ClickHouse は MySQL ワイヤープロトコルをサポートしています この設定は無効化できず、まれなエッジケースでは、ClickHouse の通常のクエリインターフェイスと MySQL クエリインターフェイスに送信されたクエリの間で挙動が異なる原因となる場合があります。 :::: - - ## ClickHouse Cloud での MySQL インターフェイスの有効化 {#enabling-the-mysql-interface-on-clickhouse-cloud} 1. ClickHouse Cloud サービスを作成したら、`Connect` ボタンをクリックします。 @@ -62,8 +59,6 @@ ClickHouse は MySQL ワイヤープロトコルをサポートしています - - ## ClickHouse Cloud で複数の MySQL ユーザーを作成する {#creating-multiple-mysql-users-in-clickhouse-cloud} デフォルトでは、組み込みの `mysql4` ユーザーが存在し、このユーザーは `default` ユーザーと同じパスワードを使用します。`` 部分は、ClickHouse Cloud ホスト名の先頭のセグメントです。この形式は、安全な接続を実装しているものの [TLS ハンドシェイクで SNI 情報を提供しない](https://www.cloudflare.com/learning/ssl/what-is-sni) ツールと連携するために必要であり、ユーザー名に追加のヒントがないと内部ルーティングができないためです(MySQL コンソールクライアントはそのようなツールの一例です)。 @@ -116,7 +111,6 @@ ERROR 2013 (HY000): MySQLサーバーへの接続が切断されました at 're この場合は、ユーザー名が `mysql4<subdomain>_<username>` という形式([上記](#creating-multiple-mysql-users-in-clickhouse-cloud)で説明したとおり)になっていることを確認してください。 - ## セルフマネージド ClickHouse での MySQL インターフェイスの有効化 {#enabling-the-mysql-interface-on-self-managed-clickhouse} サーバーの構成ファイルに [mysql_port](../operations/server-configuration-parameters/settings.md#mysql_port) 設定を追加します。たとえば、`config.d/` [ディレクトリ](../operations/configuration-files) 内の新しい XML ファイルでこのポートを定義できます。 @@ -133,7 +127,6 @@ ClickHouse サーバーを起動し、`Listening for MySQL compatibility protoco {} Application: MySQL互換プロトコルでリッスン中: 127.0.0.1:9004 ``` - ## MySQL を ClickHouse に接続する {#connect-mysql-to-clickhouse} 次のコマンドは、MySQL クライアント `mysql` から ClickHouse へ接続する方法を示します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md index 39f53f45962..a7a003f8840 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md @@ -9,14 +9,10 @@ doc_type: 'reference' ClickHouse は、サポートされているほぼすべての[入力フォーマット](formats.md)において、入力データの構造を自動的に判定できます。 このドキュメントでは、スキーマ推論がいつ使用されるか、各種入力フォーマットでどのように動作するか、およびどの設定によって制御できるかについて説明します。 - - ## 使用方法 {#usage} スキーマ推論は、ClickHouse が特定のデータ形式でデータを読み取る必要があるものの、その構造が不明な場合に使用されます。 - - ## テーブル関数 [file](../sql-reference/table-functions/file.md)、[s3](../sql-reference/table-functions/s3.md)、[url](../sql-reference/table-functions/url.md)、[hdfs](../sql-reference/table-functions/hdfs.md)、[azureBlobStorage](../sql-reference/table-functions/azureBlobStorage.md)。 {#table-functions-file-s3-url-hdfs-azureblobstorage} これらのテーブル関数には、入力データの構造を表すオプションの引数 `structure` があります。この引数が指定されていないか、`auto` に設定されている場合は、構造がデータから自動的に推論されます。 @@ -64,7 +60,6 @@ DESCRIBE file('hobbies.jsonl') └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## テーブルエンジン [File](../engines/table-engines/special/file.md)、[S3](../engines/table-engines/integrations/s3.md)、[URL](../engines/table-engines/special/url.md)、[HDFS](../engines/table-engines/integrations/hdfs.md)、[azureBlobStorage](../engines/table-engines/integrations/azureBlobStorage.md) {#table-engines-file-s3-url-hdfs-azureblobstorage} `CREATE TABLE` クエリでカラムのリストを指定しない場合、テーブルの構造はデータから自動的に推論されます。 @@ -107,7 +102,6 @@ DESCRIBE TABLE hobbies └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## clickhouse-local {#clickhouse-local} `clickhouse-local` には、入力データの構造を指定するためのオプションのパラメータ `-S/--structure` があります。このパラメータが指定されていないか、`auto` に設定されている場合、構造はデータから自動的に推論されます。 @@ -138,7 +132,6 @@ clickhouse-local --file='hobbies.jsonl' --table='hobbies' --query='SELECT * FROM 4 47 Brayan ['movies','skydiving'] ``` - ## 挿入先テーブルの構造を使用する {#using-structure-from-insertion-table} テーブル関数 `file/s3/url/hdfs` を使用してテーブルにデータを挿入する場合、 @@ -247,7 +240,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob この場合、テーブルに挿入するための `SELECT` クエリ内でカラム `hobbies` に対していくつかの操作が実行されているため、ClickHouse は挿入元テーブルの構造を利用できず、スキーマ推論が行われます。 - ## スキーマ推論キャッシュ {#schema-inference-cache} ほとんどの入力フォーマットでは、スキーマ推論のために一部のデータを読み取り、その構造を判定しますが、この処理には一定の時間がかかります。 @@ -269,8 +261,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob S3 上のサンプルデータセット `github-2022.ndjson.gz` の構造推論を試し、スキーマ推論キャッシュがどのように動作するか確認してみます: - - ```sql DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/github/github-2022.ndjson.gz') ``` @@ -414,7 +404,6 @@ SELECT count() FROM system.schema_inference_cache WHERE storage='S3' └─────────┘ ``` - ## テキスト形式 {#text-formats} テキスト形式では、ClickHouse はデータを 1 行ずつ読み取り、フォーマットに従ってカラム値を抽出し、その後、再帰的なパーサーとヒューリスティクスを用いて各値の型を判定します。スキーマ推論時にデータから読み取られる最大行数および最大バイト数は、設定 `input_format_max_rows_to_read_for_schema_inference`(デフォルト 25000)および `input_format_max_bytes_to_read_for_schema_inference`(デフォルト 32MB)によって制御されます。 @@ -484,7 +473,6 @@ DESC format(JSONEachRow, '{"arr" : [null, 42, null]}') └──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 配列に異なる型の値が含まれていて、`input_format_json_infer_array_of_dynamic_from_array_of_different_types` 設定が有効になっている場合(デフォルトで有効)、その配列は `Array(Dynamic)` 型になります。 ```sql @@ -551,7 +539,6 @@ DESC format(JSONEachRow, $$ JSON から、値が Map 型の値と同じ型で揃っているオブジェクトを Map 型として読み取ることができます。 注意: これは、設定 `input_format_json_read_objects_as_strings` と `input_format_json_try_infer_named_tuples_from_objects` が無効になっている場合にのみ有効です。 - ```sql SET input_format_json_read_objects_as_strings = 0, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, '{"map" : {"key1" : 42, "key2" : 24, "key3" : 4}}') @@ -638,7 +625,6 @@ DESC format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello"}}, {"obj" : {"a" : 4 結果: - ```response ┌─name─┬─type───────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ obj │ Tuple(a Nullable(Int64), b Nullable(String), c Array(Nullable(Int64)), d Tuple(e Nullable(Int64))) │ │ │ │ │ │ @@ -713,7 +699,6 @@ SELECT * FROM format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : 注意: この設定の有効化は、設定 `input_format_json_try_infer_named_tuples_from_objects` が無効になっている場合にのみ効果があります。 - ```sql SET input_format_json_read_objects_as_strings = 1, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, $$ @@ -815,7 +800,6 @@ SELECT arr, toTypeName(arr), JSONExtractArrayRaw(arr)[3] from format(JSONEachRow ##### input_format_json_infer_incomplete_types_as_strings {#input_format_json_infer_incomplete_types_as_strings} - この設定を有効にすると、スキーマ推論時に、サンプルデータ内で `Null`・`{}`・`[]` のみを含む JSON キーに `String` 型を使用できるようになります。 JSON フォーマットでは、関連する設定がすべて有効になっている場合(既定でいずれも有効)には、任意の値を String として読み取ることができるため、スキーマ推論時に `Cannot determine type for column 'column_name' by first 25000 rows of data, most likely this column contains only Nulls or empty Arrays/Maps` のようなエラーが発生する状況でも、型が不明なキーに対して String 型を使用することでこれを回避できます。 @@ -879,7 +863,6 @@ DESC format(CSV, 'Hello world!,World hello!') 日付、日時: - ```sql DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"') ``` @@ -952,7 +935,6 @@ DESC format(CSV, $$"[{'key1' : [[42, 42], []], 'key2' : [[null], [42]]}]"$$) └──────┴───────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - データに null しか含まれず、引用符で囲まれた値の型を ClickHouse が判別できない場合、ClickHouse はそれを String 型として扱います。 ```sql @@ -1058,7 +1040,6 @@ DESC format(CSV, '42,42.42'); └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ### TSV/TSKV {#tsv-tskv} TSV/TSKV 形式では、ClickHouse はタブ区切りに従って行から列の値を抽出し、その後、再帰パーサーを使って抽出した値を解析し、最も適切な型を決定します。型を決定できない場合、ClickHouse はこの値を String として扱います。 @@ -1112,7 +1093,6 @@ DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000') └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 配列: ```sql @@ -1185,7 +1165,6 @@ DESC format(TSV, $$[{'key1' : [(42, 'Hello'), (24, NULL)], 'key2' : [(NULL, ',') └──────┴─────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - データに null しか含まれておらず、ClickHouse が型を決定できない場合、ClickHouse はそれを String として扱います。 ```sql @@ -1275,7 +1254,6 @@ Values 形式では、ClickHouse は行から列の値を抽出し、その後 **例:** - 整数、浮動小数点数、ブール値、文字列: ```sql @@ -1354,7 +1332,6 @@ DESC format(Values, $$({'key1' : 42, 'key2' : 24})$$) └──────┴──────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ネストされた配列、タプル、マップ: ```sql @@ -1430,7 +1407,6 @@ $$) ヘッダーの自動検出例(`input_format_custom_detect_header` が有効化されている場合): - ```sql SET format_custom_row_before_delimiter = '', format_custom_row_after_delimiter = '\n', @@ -1510,7 +1486,6 @@ SET format_regexp = '^Line: value_1=(.+?), value_2=(.+?), value_3=(.+?)', format_regexp_escaping_rule = 'CSV' ``` - DESC format(Regexp, $$Line: value_1=42, value_2="Some string 1", value_3="[1, NULL, 3]" Line: value_1=2, value_2="Some string 2", value_3="[4, 5, NULL]"$$) @@ -1577,7 +1552,6 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul #### schema_inference_make_columns_nullable $ {#schema-inference-make-columns-nullable} - NULL 許容性に関する情報を持たないフォーマットに対して、スキーマ推論時に推論された型を `Nullable` にするかどうかを制御します。設定可能な値: * 0 - 推論された型は決して `Nullable` になりません。 @@ -1644,7 +1618,6 @@ DESC format(JSONEachRow, $$ └─────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - #### input_format_try_infer_integers {#input-format-try-infer-integers} :::note @@ -1723,7 +1696,6 @@ DESC format(JSONEachRow, $$ **例** - ```sql SET input_format_try_infer_datetimes = 0; DESC format(JSONEachRow, $$ @@ -1794,7 +1766,6 @@ DESC format(JSONEachRow, $$ 注記: スキーマ推論時の日時のパースは、設定 [date_time_input_format](/operations/settings/settings-formats.md#date_time_input_format) に従います。 - #### input_format_try_infer_dates {#input-format-try-infer-dates} 有効にすると、ClickHouse はテキストフォーマットに対するスキーマ推論時に、文字列フィールドから型 `Date` を推定しようとします。 @@ -1869,7 +1840,6 @@ $$) └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## 自己記述フォーマット {#self-describing-formats} 自己記述フォーマットは、データ自体の中にデータ構造に関する情報を含むフォーマットです。 @@ -1959,7 +1929,6 @@ $$) Avro 形式では、ClickHouse はデータからスキーマを読み取り、以下の型の対応付けに基づいて ClickHouse のスキーマに変換します。 - | Avro データ型 | ClickHouse データ型 | |------------------------------------|--------------------------------------------------------------------------------| | `boolean` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2013,8 +1982,6 @@ Parquet フォーマットでは、ClickHouse はデータからスキーマを Arrow フォーマットでは、ClickHouse はデータからスキーマを読み取り、次の型対応に従って ClickHouse のスキーマに変換します。 - - | Arrow データ型 | ClickHouse データ型 | |---------------------------------|---------------------------------------------------------| | `BOOL` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2067,8 +2034,6 @@ ORC 形式では、ClickHouse はスキーマをデータから読み取り、 Native 形式は ClickHouse 内部で使用され、スキーマをデータ内に含みます。 スキーマ推論では、ClickHouse は変換を一切行わずにデータからスキーマを読み取ります。 - - ## 外部スキーマを用いるフォーマット {#formats-with-external-schema} この種のフォーマットでは、特定のスキーマ言語で記述された、データを表すスキーマを別ファイルで用意する必要があります。 @@ -2115,8 +2080,6 @@ CapnProto フォーマットのスキーマ推論では、ClickHouse は次の | `struct` | [Tuple](../sql-reference/data-types/tuple.md) | | `union(T, Void)`, `union(Void, T)` | [Nullable(T)](../sql-reference/data-types/nullable.md) | - - ## 強い型付けのバイナリ形式 {#strong-typed-binary-formats} この種の形式では、シリアル化された各値にはその型(および場合によっては名前)に関する情報が含まれますが、テーブル全体に関する情報は含まれません。 @@ -2163,8 +2126,6 @@ BSONEachRow では、各データ行は BSON ドキュメントとして表現 デフォルトでは、推論されたすべての型は `Nullable` でラップされますが、これは設定 `schema_inference_make_columns_nullable` を使用して変更できます。 - - ## 固定スキーマを持つフォーマット {#formats-with-constant-schema} このようなフォーマットのデータは、常に同じスキーマを持ちます。 @@ -2217,7 +2178,6 @@ DESC format(JSONAsObject, '{"x" : 42, "y" : "Hello, World!"}'); └──────┴──────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## スキーマ推論モード {#schema-inference-modes} データファイルの集合からのスキーマ推論は、`default` と `union` の 2 つのモードで動作します。 @@ -2328,7 +2288,6 @@ Union モードでは、ClickHouse はファイルごとに異なるスキーマ * ClickHouse がファイルの 1 つからスキーマを推論できない場合は、例外がスローされます。 * ファイル数が多い場合、すべてのファイルからスキーマを読み取る処理に多くの時間がかかる可能性があります。 - ## 自動フォーマット検出 {#automatic-format-detection} データのフォーマットが指定されておらず、かつファイル拡張子からも判定できない場合、ClickHouse はファイルの内容に基づいてフォーマットの検出を試みます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md index d181f593088..baaa8ad3922 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md @@ -7,12 +7,8 @@ title: 'サードパーティ製ビジュアルインターフェイス' doc_type: 'reference' --- - - # サードパーティ開発のビジュアルインターフェース {#visual-interfaces-from-third-party-developers} - - ## オープンソース {#open-source} ### agx {#agx} @@ -117,8 +113,6 @@ Features: ### LightHouse {#lighthouse} - - [LightHouse](https://github.com/VKCOM/lighthouse) は、ClickHouse 向けの軽量な Web インターフェイスです。 特徴: @@ -201,8 +195,6 @@ ClickHouse data source プラグインにより、ClickHouse をバックエン ### MindsDB Studio {#mindsdb} - - [MindsDB](https://mindsdb.com/) は、ClickHouse を含むデータベース向けのオープンソースの AI レイヤーであり、最先端の機械学習モデルを容易に開発・学習・デプロイできるようにします。MindsDB Studio(GUI)を使用すると、データベースから新しいモデルを学習させ、モデルによる予測結果を解釈し、潜在的なデータバイアスを特定し、Explainable AI 機能を用いてモデル精度を評価および可視化することで、機械学習モデルをより迅速に適応・チューニングできます。 ### DBM {#dbm} @@ -303,8 +295,6 @@ ClickHouse data source プラグインにより、ClickHouse をバックエン ### CKibana {#ckibana} - - [CKibana](https://github.com/TongchengOpenSource/ckibana) は、ネイティブな Kibana UI を使用して ClickHouse のデータを手軽に検索・探索・可視化できる軽量なサービスです。 機能: @@ -329,8 +319,6 @@ ClickHouse data source プラグインにより、ClickHouse をバックエン [Telescope ソースコード](https://github.com/iamtelescope/telescope) · [ライブデモ](https://demo.iamtelescope.net) - - ## 商用 {#commercial} ### DataGrip {#datagrip} @@ -411,8 +399,6 @@ SeekTable は、個人/個人用途での利用については[無料](https:/ [TABLUM.IO](https://tablum.io/) は、ETL と可視化のためのオンラインクエリおよび分析ツールです。ClickHouse へ接続し、柔軟な SQL コンソール経由でデータをクエリできるほか、静的ファイルやサードパーティサービスからデータをロードすることもできます。TABLUM.IO は、クエリ結果データをチャートやテーブルとして可視化できます。 - - 機能: - ETL: 一般的なデータベース、ローカルおよびリモートファイル、API 呼び出しからのデータのロード。 - シンタックスハイライトとビジュアルクエリビルダーを備えた多機能 SQL コンソール。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md index 9196ea27615..f54e9db764f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md @@ -7,12 +7,8 @@ title: 'サードパーティ製プロキシサーバー' doc_type: 'reference' --- - - # サードパーティ開発のプロキシサーバー {#proxy-servers-from-third-party-developers} - - ## chproxy {#chproxy} [chproxy](https://github.com/Vertamedia/chproxy) は、ClickHouse データベース向けの HTTP プロキシ兼ロードバランサーです。 @@ -25,8 +21,6 @@ doc_type: 'reference' Go で実装されています。 - - ## KittenHouse {#kittenhouse} [KittenHouse](https://github.com/VKCOM/kittenhouse) は、アプリケーション側で INSERT データをバッファリングすることができない、あるいは不便な場合に、ClickHouse とアプリケーションサーバーとの間に位置するローカルプロキシとして設計されています。 @@ -39,8 +33,6 @@ Go で実装されています。 Go で実装されています。 - - ## ClickHouse-Bulk {#clickhouse-bulk} [ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) は、ClickHouse への INSERT をまとめて処理するシンプルなコレクターです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/intro.md b/i18n/jp/docusaurus-plugin-content-docs/current/intro.md index 79f75377436..392399021ff 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/intro.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/intro.md @@ -14,7 +14,6 @@ import Image from '@theme/IdealImage'; ClickHouse® は、オンライン分析処理 (OLAP) 向けの高性能な列指向SQLデータベース管理システム (DBMS) です。[オープンソースソフトウェア](https://github.com/ClickHouse/ClickHouse) としても、[クラウドサービス](https://clickhouse.com/cloud) としても提供されています。 - ## アナリティクスとは何か {#what-are-analytics} アナリティクスは OLAP(Online Analytical Processing)とも呼ばれ、巨大なデータセットに対して、集約処理・文字列処理・算術演算などの複雑な計算を行う SQL クエリを指します。 @@ -23,8 +22,6 @@ ClickHouse® は、オンライン分析処理 (OLAP) 向けの高性能な列 多くのユースケースでは、[アナリティクスクエリは「リアルタイム」である必要があります](https://clickhouse.com/engineering-resources/what-is-real-time-analytics)。つまり、1 秒未満で結果を返す必要があります。 - - ## 行指向ストレージ vs. 列指向ストレージ {#row-oriented-vs-column-oriented-storage} このレベルのパフォーマンスは、データの「指向性」を適切に選択した場合にのみ達成できます。 @@ -65,51 +62,36 @@ LIMIT 8; **カラム指向 DBMS** - 各カラムの値がディスク上で連続して格納されているため、上記のクエリを実行しても不要なデータが読み込まれることはありません。 ブロック単位でのストレージとディスクからメモリへの転送は、分析クエリのデータアクセスパターンに合わせて最適化されているため、クエリに必要なカラムだけがディスクから読み出され、未使用データに対する不要な I/O を回避できます。これは、全行(不要なカラムを含む)を読み出す行指向ストレージと比較して [大幅に高速です](https://benchmark.clickhouse.com/)。 - - ## データレプリケーションと整合性 {#data-replication-and-integrity} ClickHouse は、非同期マルチマスターレプリケーション方式を使用して、データが複数ノードに冗長に保存されるようにしています。利用可能な任意のレプリカに書き込まれた後、残りのすべてのレプリカはバックグラウンドでそれぞれのコピーを取得します。システムは、異なるレプリカ間で同一のデータを維持します。ほとんどの障害からの復旧は自動的に行われ、複雑なケースでは半自動的に行われます。 - - ## ロールベースアクセス制御 {#role-based-access-control} ClickHouse は SQL クエリを使用してユーザーアカウントを管理し、ANSI SQL 標準や一般的なリレーショナルデータベース管理システムと同様のロールベースアクセス制御を構成できるようになっています。 - - ## SQL サポート {#sql-support} ClickHouse は、[SQL に基づく宣言型クエリ言語](/sql-reference) をサポートしており、多くの点で ANSI SQL 標準と同一です。サポートされているクエリ句には、[GROUP BY](/sql-reference/statements/select/group-by)、[ORDER BY](/sql-reference/statements/select/order-by)、[FROM](/sql-reference/statements/select/from) におけるサブクエリ、[JOIN](/sql-reference/statements/select/join) 句、[IN](/sql-reference/operators/in) 演算子、[ウィンドウ関数](/sql-reference/window-functions)、およびスカラーサブクエリが含まれます。 - - ## 近似計算 {#approximate-calculation} ClickHouse は、精度とパフォーマンスをトレードオフするための手段を提供します。たとえば、一部の集約関数は、異なる値の個数や中央値、分位数を近似的に計算します。また、データのサンプルに対してクエリを実行し、概算結果を素早く算出することもできます。さらに、すべてのキーに対してではなく、キー数を制限して集約を実行することも可能です。キーの分布の偏り具合によっては、厳密な計算に比べてはるかに少ないリソースで、十分に実用的な精度の結果を得られる場合があります。 - - ## アダプティブ結合アルゴリズム {#adaptive-join-algorithms} ClickHouse は状況に応じて結合アルゴリズムを選択します。まず高速なハッシュ結合を試し、大きなテーブルが複数存在する場合はマージ結合に切り替えます。 - - ## 優れたクエリパフォーマンス {#superior-query-performance} ClickHouse は、非常に高速なクエリパフォーマンスで広く知られています。 ClickHouse がこれほど高速な理由については、[Why is ClickHouse fast?](/concepts/why-clickhouse-is-so-fast.mdx) ガイドを参照してください。 - - - ## 関連リソース {#related-resources} - [ClickHouse の財務関数を解説した動画](https://www.youtube.com/watch?v=BePLPVa0w_o) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md index 6c6032dcf05..4c3782562c5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md @@ -6,16 +6,12 @@ title: 'Geohash を扱うための関数' doc_type: 'reference' --- - - ## Geohash {#geohash} [Geohash](https://en.wikipedia.org/wiki/Geohash) はジオコードシステムで、地球の表面を格子状のグリッドセル(バケット)に分割し、それぞれのセルを英数字からなる短い文字列としてエンコードします。これは階層的なデータ構造であり、Geohash 文字列が長くなるほど、地理的位置をより高い精度で表現できます。 地理座標を Geohash 文字列に手動で変換する必要がある場合は、[geohash.org](http://geohash.co/) を利用できます。 - - ## geohashEncode {#geohashencode} 緯度と経度を [geohash](#geohash) 形式の文字列にエンコードします。 @@ -58,7 +54,6 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; └──────────────┘ ``` - ## geohashDecode {#geohashdecode} [geohash](#geohash) でエンコードされた任意の文字列を緯度・経度にデコードします。 @@ -89,7 +84,6 @@ SELECT geohashDecode('ezs42') AS res; └─────────────────────────────────┘ ``` - ## geohashesInBox {#geohashesinbox} 指定されたボックスの内部および境界と交差する位置にある、指定した精度の [geohash](#geohash) でエンコードされた文字列の配列を返します。基本的には、2 次元グリッドを 1 次元の配列に平坦化したものです。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md index 2cde3254863..d9b2cb0101d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md @@ -6,8 +6,6 @@ title: 'H3 インデックスを操作する関数' doc_type: 'reference' --- - - ## H3 インデックス {#h3-index} [H3](https://h3geo.org/) は、地球の表面を同じ大きさの六角形セルのグリッドに分割する地理空間インデックスシステムです。このシステムは階層構造になっており、最上位レベルの各六角形(「親」)は、同じ形状でより小さい 7 個の六角形(「子」)に分割され、その先も同様に続きます。 @@ -20,8 +18,6 @@ H3 インデックスは主に、位置情報のバケット分けやその他 H3 システムの詳細な説明は [Uber Engineering サイト](https://www.uber.com/blog/h3/) にあります。 - - ## h3IsValid {#h3isvalid} 数値が有効な [H3](#h3-index) インデックスかどうかを検証します。 @@ -57,7 +53,6 @@ SELECT h3IsValid(630814730351855103) AS h3IsValid; └───────────┘ ``` - ## h3GetResolution {#h3getresolution} 指定された [H3](#h3-index) インデックスの解像度を返します。 @@ -93,7 +88,6 @@ SELECT h3GetResolution(639821929606596015) AS resolution; └────────────┘ ``` - ## h3EdgeAngle {#h3edgeangle} [H3](#h3-index) 六角形の辺の平均角度をグラード単位で計算します。 @@ -128,7 +122,6 @@ SELECT h3EdgeAngle(10) AS edgeAngle; └───────────────────────┘ ``` - ## h3EdgeLengthM {#h3edgelengthm} [H3](#h3-index) 六角形の辺の平均長をメートル単位で計算します。 @@ -163,7 +156,6 @@ SELECT h3EdgeLengthM(15) AS edgeLengthM; └─────────────┘ ``` - ## h3EdgeLengthKm {#h3edgelengthkm} [H3](#h3-index) 六角形セルの一辺の平均の長さをキロメートル単位で計算します。 @@ -198,7 +190,6 @@ SELECT h3EdgeLengthKm(15) AS edgeLengthKm; └──────────────┘ ``` - ## geoToH3 {#geotoh3} 指定した解像度で、`(lat, lon)` を表す [H3](#h3-index) インデックスを返します。 @@ -238,7 +229,6 @@ SELECT geoToH3(55.71290588, 37.79506683, 15) AS h3Index; └────────────────────┘ ``` - ## h3ToGeo {#h3togeo} 指定された [H3](#h3-index) インデックスに対応する中心点の緯度と経度を返します。 @@ -275,7 +265,6 @@ SELECT h3ToGeo(644325524701193974) AS coordinates; └───────────────────────────────────────┘ ``` - ## h3ToGeoBoundary {#h3togeoboundary} 指定された H3 インデックスの境界を表す `(lat, lon)` のペアからなる配列を返します。 @@ -310,7 +299,6 @@ SELECT h3ToGeoBoundary(644325524701193974) AS coordinates; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3kRing {#h3kring} 指定された六角形から半径 `k` の範囲内にあるすべての [H3](#h3-index) 六角形を、ランダムな順序で列挙します。 @@ -352,7 +340,6 @@ SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index; └────────────────────┘ ``` - ## h3PolygonToCells {#h3polygontocells} 指定した解像度で、指定されたジオメトリ(リングまたは(マルチ)ポリゴン)の内部に含まれる六角形セルを返します。 @@ -397,7 +384,6 @@ SELECT h3PolygonToCells([(-122.4089866999972145,37.813318999983238),(-122.354473 └────────────────────┘ ``` - ## h3GetBaseCell {#h3getbasecell} [H3](#h3-index) インデックスのベースセル番号を返します。 @@ -432,7 +418,6 @@ SELECT h3GetBaseCell(612916788725809151) AS basecell; └──────────┘ ``` - ## h3HexAreaM2 {#h3hexaream2} 指定した解像度における六角形の平均面積(平方メートル単位)を返します。 @@ -467,7 +452,6 @@ SELECT h3HexAreaM2(13) AS area; └──────┘ ``` - ## h3HexAreaKm2 {#h3hexareakm2} 指定した解像度における六角形の平均面積を平方キロメートル単位で返します。 @@ -502,7 +486,6 @@ SELECT h3HexAreaKm2(13) AS area; └───────────┘ ``` - ## h3IndexesAreNeighbors {#h3indexesareneighbors} 指定された [H3](#h3-index) インデックス同士が隣接しているかどうかを返します。 @@ -539,7 +522,6 @@ SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n; └───┘ ``` - ## h3ToChildren {#h3tochildren} 指定された [H3](#h3-index) インデックスに対応する子インデックスの配列を返します。 @@ -575,7 +557,6 @@ SELECT h3ToChildren(599405990164561919, 6) AS children; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3ToParent {#h3toparent} 指定された [H3](#h3-index) インデックスを含む親(より粗い解像度の)インデックスを返します。 @@ -611,7 +592,6 @@ SELECT h3ToParent(599405990164561919, 3) AS parent; └────────────────────┘ ``` - ## h3ToString {#h3tostring} インデックスを表す `H3Index` を文字列表現に変換します。 @@ -644,7 +624,6 @@ SELECT h3ToString(617420388352917503) AS h3_string; └─────────────────┘ ``` - ## stringToH3 {#stringtoh3} 文字列表現を `H3Index` (UInt64) 表現に変換します。 @@ -679,7 +658,6 @@ SELECT stringToH3('89184926cc3ffff') AS index; └────────────────────┘ ``` - ## h3GetResolution {#h3getresolution-1} [H3](#h3-index) インデックスの解像度を返します。 @@ -714,7 +692,6 @@ SELECT h3GetResolution(617420388352917503) AS res; └─────┘ ``` - ## h3IsResClassIII {#h3isresclassiii} [H3](#h3-index) インデックスの解像度が Class III の向きにあるかどうかを返します。 @@ -750,7 +727,6 @@ SELECT h3IsResClassIII(617420388352917503) AS res; └─────┘ ``` - ## h3IsPentagon {#h3ispentagon} この [H3](#h3-index) インデックスが五角形のセルを表しているかどうかを返します。 @@ -786,7 +762,6 @@ SELECT h3IsPentagon(644721767722457330) AS pentagon; └──────────┘ ``` - ## h3GetFaces {#h3getfaces} 指定された [H3](#h3-index) インデックスと交差する正二十面体の面を返します。 @@ -821,7 +796,6 @@ SELECT h3GetFaces(599686042433355775) AS faces; └───────┘ ``` - ## h3CellAreaM2 {#h3cellaream2} 指定された H3 インデックスに対応するセルの正確な面積を、平方メートル単位で返します。 @@ -856,7 +830,6 @@ SELECT h3CellAreaM2(579205133326352383) AS area; └────────────────────┘ ``` - ## h3CellAreaRads2 {#h3cellarearads2} 指定された入力 H3 インデックスに対応する特定のセルの正確な面積を、平方ラジアンで返します。 @@ -891,7 +864,6 @@ SELECT h3CellAreaRads2(579205133326352383) AS area; └─────────────────────┘ ``` - ## h3ToCenterChild {#h3tocenterchild} 指定した解像度で、指定した [H3](#h3-index) インデックスに含まれる中心の(より細かい) [H3](#h3-index) インデックスを返します。 @@ -927,7 +899,6 @@ SELECT h3ToCenterChild(577023702256844799,1) AS centerToChild; └────────────────────┘ ``` - ## h3ExactEdgeLengthM {#h3exactedgelengthm} 入力された h3 インデックスで表される単方向エッジの正確な長さをメートル単位で返します。 @@ -962,7 +933,6 @@ SELECT h3ExactEdgeLengthM(1310277011704381439) AS exactEdgeLengthM;; └────────────────────┘ ``` - ## h3ExactEdgeLengthKm {#h3exactedgelengthkm} 入力された h3 インデックスに対応する一方向エッジの厳密な辺長を、キロメートル単位で返します。 @@ -997,7 +967,6 @@ SELECT h3ExactEdgeLengthKm(1310277011704381439) AS exactEdgeLengthKm;; └────────────────────┘ ``` - ## h3ExactEdgeLengthRads {#h3exactedgelengthrads} 指定された h3 インデックスで表される単方向エッジの正確なエッジ長をラジアン単位で返します。 @@ -1032,7 +1001,6 @@ SELECT h3ExactEdgeLengthRads(1310277011704381439) AS exactEdgeLengthRads;; └──────────────────────┘ ``` - ## h3NumHexagons {#h3numhexagons} 指定した解像度における一意な H3 インデックスの数を返します。 @@ -1067,7 +1035,6 @@ SELECT h3NumHexagons(3) AS numHexagons; └─────────────┘ ``` - ## h3PointDistM {#h3pointdistm} GeoCoord の点(緯度/経度)ペア間の「大円」または「haversine」距離をメートル単位で返します。 @@ -1103,7 +1070,6 @@ SELECT h3PointDistM(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistM; └───────────────────┘ ``` - ## h3PointDistKm {#h3pointdistkm} GeoCoord ポイント(緯度/経度)ペア間の「大円」または「ハバーサイン(haversine)」距離をキロメートル単位で返します。 @@ -1139,7 +1105,6 @@ SELECT h3PointDistKm(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistKm; └───────────────────┘ ``` - ## h3PointDistRads {#h3pointdistrads} GeoCoord 座標(緯度/経度)のペア間の「大円距離」または「haversine 距離」をラジアン単位で返します。 @@ -1175,7 +1140,6 @@ SELECT h3PointDistRads(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistRads; └────────────────────┘ ``` - ## h3GetRes0Indexes {#h3getres0indexes} 解像度0のすべての H3 インデックスを要素とする配列を返します。 @@ -1206,7 +1170,6 @@ SELECT h3GetRes0Indexes AS indexes ; └─────────────────────────────────────────────┘ ``` - ## h3GetPentagonIndexes {#h3getpentagonindexes} 指定した解像度におけるすべての五角形の H3 インデックスを返します。 @@ -1241,7 +1204,6 @@ SELECT h3GetPentagonIndexes(3) AS indexes; └────────────────────────────────────────────────────────────────┘ ``` - ## h3Line {#h3line} 指定された 2 つのインデックス間にあるインデックスの列を返します。 @@ -1277,7 +1239,6 @@ h3Line(start,end) └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3Distance {#h3distance} 指定された 2 つのインデックス間の距離を、グリッドセル単位で返します。 @@ -1315,7 +1276,6 @@ h3Distance(start,end) └──────────┘ ``` - ## h3HexRing {#h3hexring} 指定された origin h3Index を中心とし、距離 k の六角形リングに含まれるインデックスを返します。 @@ -1353,7 +1313,6 @@ h3HexRing(index, k) └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdge {#h3getunidirectionaledge} 指定された始点と終点に基づいて単方向エッジの H3 インデックスを返し、エラーが発生した場合は 0 を返します。 @@ -1389,7 +1348,6 @@ h3GetUnidirectionalEdge(originIndex, destinationIndex) └─────────────────────┘ ``` - ## h3UnidirectionalEdgeIsValid {#h3unidirectionaledgeisvalid} 指定された H3Index が有効な単方向エッジインデックスかどうかを判定します。単方向エッジであれば 1、それ以外の場合は 0 を返します。 @@ -1425,7 +1383,6 @@ h3UnidirectionalEdgeisValid(index) └────────────┘ ``` - ## h3GetOriginIndexFromUnidirectionalEdge {#h3getoriginindexfromunidirectionaledge} 単方向エッジの H3Index から始点の六角形インデックスを返します。 @@ -1460,7 +1417,6 @@ h3GetOriginIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetDestinationIndexFromUnidirectionalEdge {#h3getdestinationindexfromunidirectionaledge} 単方向エッジを表す H3Index から、終点の六角形インデックスを返します。 @@ -1495,7 +1451,6 @@ h3GetDestinationIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetIndexesFromUnidirectionalEdge {#h3getindexesfromunidirectionaledge} 指定された単方向エッジ H3Index から、起点と終点の六角形インデックスを返します。 @@ -1535,7 +1490,6 @@ h3GetIndexesFromUnidirectionalEdge(edge) └─────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgesFromHexagon {#h3getunidirectionaledgesfromhexagon} 指定された H3Index から、すべての単方向エッジを取得します。 @@ -1570,7 +1524,6 @@ h3GetUnidirectionalEdgesFromHexagon(index) └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgeBoundary {#h3getunidirectionaledgeboundary} 単方向エッジを定義する座標を返します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md index 7e1cf1c8259..c6a4386cdd3 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md @@ -6,8 +6,6 @@ title: 'ポリゴンを扱うための関数' doc_type: 'reference' --- - - ## WKT {#wkt} さまざまな [Geo データ型](../../data-types/geo.md) から、WKT(Well Known Text)形式のジオメトリオブジェクトを返します。サポートされている WKT オブジェクトは次のとおりです。 @@ -75,7 +73,6 @@ SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10))) ``` - ## readWKTMultiPolygon {#readwktmultipolygon} WKT(Well Known Text)形式の MultiPolygon 表現を MultiPolygon 型に変換します。 @@ -101,7 +98,6 @@ SELECT MultiPolygon - ## readWKTPolygon {#readwktpolygon} WKT (Well Known Text) 形式の MultiPolygon を Polygon 型に変換します。 @@ -127,7 +123,6 @@ FORMAT Markdown Polygon - ## readWKTPoint {#readwktpoint} ClickHouse の `readWKTPoint` 関数は、Well-Known Text (WKT) で表現された Point ジオメトリを解析し、ClickHouse の内部形式による Point 値を返します。 @@ -156,7 +151,6 @@ SELECT readWKTPoint('POINT (1.2 3.4)'); (1.2,3.4) ``` - ## readWKTLineString {#readwktlinestring} LineString ジオメトリの Well-Known Text (WKT) 表現をパースし、ClickHouse の内部形式で返します。 @@ -185,7 +179,6 @@ SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKTMultiLineString {#readwktmultilinestring} MultiLineString ジオメトリの Well-Known Text (WKT) 表現をパースし、ClickHouse の内部形式で返します。 @@ -214,7 +207,6 @@ SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6) [[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] ``` - ## readWKTRing {#readwktring} Polygon ジオメトリの Well-Known Text (WKT) 表現をパースし、ClickHouse の内部形式のリング(閉じた LineString)を返します。 @@ -243,7 +235,6 @@ SELECT readWKTRing('POLYGON ((1 1, 2 2, 3 3, 1 1))'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## polygonsWithinSpherical {#polygonswithinspherical} ある多角形が別の多角形の内側に完全に含まれているかどうかを判定し、その結果として true または false を返します。詳細は [https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html) を参照してください。 @@ -258,7 +249,6 @@ SELECT polygonsWithinSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879 0 ``` - ## readWKBMultiPolygon {#readwkbmultipolygon} WKB(Well-Known Binary)形式の MultiPolygon を MultiPolygon 型に変換します。 @@ -284,7 +274,6 @@ SELECT MultiPolygon - ## readWKBPolygon {#readwkbpolygon} Well-Known Binary (WKB) 形式の MultiPolygon を Polygon 型に変換します。 @@ -310,7 +299,6 @@ FORMAT Markdown ポリゴン - ## readWKBPoint {#readwkbpoint} ClickHouse の `readWKBPoint` 関数は、Point ジオメトリの Well-Known Binary (WKB) 表現を解析し、ClickHouse の内部フォーマットで表現された Point を返します。 @@ -339,7 +327,6 @@ SELECT readWKBPoint(unhex('0101000000333333333333f33f3333333333330b40')); (1.2,3.4) ``` - ## readWKBLineString {#readwkblinestring} LineString ジオメトリの Well-Known Binary (WKB) 表現を解析し、ClickHouse 内部形式で返します。 @@ -368,7 +355,6 @@ SELECT readWKBLineString(unhex('010200000004000000000000000000f03f000000000000f0 [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKBMultiLineString {#readwkbmultilinestring} MultiLineString ジオメトリの Well-Known Binary (WKB) 表現を解析し、ClickHouse の内部形式で返します。 @@ -403,7 +389,6 @@ SELECT readWKBMultiLineString(unhex('0105000000020000000102000000030000000000000 UInt8、`false` の場合は 0、`true` の場合は 1 - ## polygonsDistanceSpherical {#polygonsdistancespherical} 一方の点が最初のポリゴンに属し、もう一方の点が別のポリゴンに属する場合に、これら2点間の最短距離を計算します。ここでの「球面」とは、座標を理想的な完全球の表面上の座標として解釈することを意味しますが、これは地球には当てはまりません。この種の座標系を使用すると実行速度は向上しますが、当然ながら精度は落ちます。 @@ -426,7 +411,6 @@ SELECT polygonsDistanceSpherical([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsDistanceCartesian {#polygonsdistancecartesian} 2つのポリゴン間の距離を計算します。 @@ -449,7 +433,6 @@ SELECT polygonsDistanceCartesian([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsEqualsCartesian {#polygonsequalscartesian} 2つのポリゴンが等しい場合に true を返します。 @@ -472,7 +455,6 @@ SELECT polygonsEqualsCartesian([[[(1., 1.), (1., 4.), (4., 4.), (4., 1.)]]], [[[ UInt8、false の場合は 0、true の場合は 1 - ## polygonsSymDifferenceSpherical {#polygonssymdifferencespherical} 2 つのポリゴン間の空間集合論における対称差(XOR)を計算します @@ -495,7 +477,6 @@ Polygons MultiPolygon - ## polygonsSymDifferenceCartesian {#polygonssymdifferencecartesian} `polygonsSymDifferenceSpherical` と同様ですが、座標はデカルト座標系で表されます。こちらのほうが実際の地球のモデルにより近くなります。 @@ -518,7 +499,6 @@ Polygons MultiPolygon - ## polygonsIntersectionSpherical {#polygonsintersectionspherical} 多角形同士の共通部分(AND)を計算します。座標は球面座標系で表されます。 @@ -541,7 +521,6 @@ Polygons MultiPolygon - ## polygonsWithinCartesian {#polygonswithincartesian} 2 つ目のポリゴンが 1 つ目のポリゴンの内部にある場合に true を返します。 @@ -564,7 +543,6 @@ SELECT polygonsWithinCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], [[[ UInt8。false の場合は 0、true の場合は 1 - ## polygonsIntersectCartesian {#polygonsintersectcartesian} 2 つの多角形が交差している(領域または境界のいずれかを少しでも共有している)場合に true を返します。 @@ -587,7 +565,6 @@ SELECT polygonsIntersectCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], UInt8型。偽の場合は 0、真の場合は 1 - ## polygonsIntersectSpherical {#polygonsintersectspherical} 2 つのポリゴンが交差(共通の領域または境界を共有)する場合に true を返します。参照 [https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html) @@ -610,7 +587,6 @@ SELECT polygonsIntersectSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535 UInt8 型。偽なら 0、真なら 1。 - ## polygonConvexHullCartesian {#polygonconvexhullcartesian} 凸包を計算します。[リファレンス](https://www.boost.org/doc/libs/1_61_0/libs/geometry/doc/html/geometry/reference/algorithms/convex_hull.html) @@ -635,7 +611,6 @@ MultiPolygon Polygon - ## polygonAreaSpherical {#polygonareaspherical} 球面上の多角形の面積を計算します。 @@ -658,7 +633,6 @@ Polygon Float - ## polygonsUnionSpherical {#polygonsunionspherical} 和集合(論理和 / OR)を計算します。 @@ -681,7 +655,6 @@ Polygons MultiPolygon - ## polygonPerimeterSpherical {#polygonperimeterspherical} ポリゴンの周長を計算します。 @@ -692,18 +665,12 @@ MultiPolygon これは、ジンバブエを表すポリゴンです: - - ```text POLYGON((30.0107 -15.6462,30.0502 -15.6401,30.09 -15.6294,30.1301 -15.6237,30.1699 -15.6322,30.1956 -15.6491,30.2072 -15.6532,30.2231 -15.6497,30.231 -15.6447,30.2461 -15.6321,30.2549 -15.6289,30.2801 -15.6323,30.2962 -15.639,30.3281 -15.6524,30.3567 -15.6515,30.3963 -15.636,30.3977 -15.7168,30.3993 -15.812,30.4013 -15.9317,30.4026 -16.0012,30.5148 -16.0004,30.5866 -16,30.7497 -15.9989,30.8574 -15.9981,30.9019 -16.0071,30.9422 -16.0345,30.9583 -16.0511,30.9731 -16.062,30.9898 -16.0643,31.012 -16.0549,31.0237 -16.0452,31.0422 -16.0249,31.0569 -16.0176,31.0654 -16.0196,31.0733 -16.0255,31.0809 -16.0259,31.089 -16.0119,31.1141 -15.9969,31.1585 -16.0002,31.26 -16.0235,31.2789 -16.0303,31.2953 -16.0417,31.3096 -16.059,31.3284 -16.0928,31.3409 -16.1067,31.3603 -16.1169,31.3703 -16.1237,31.3746 -16.1329,31.3778 -16.1422,31.384 -16.1488,31.3877 -16.1496,31.3956 -16.1477,31.3996 -16.1473,31.4043 -16.1499,31.4041 -16.1545,31.4027 -16.1594,31.4046 -16.1623,31.4241 -16.1647,31.4457 -16.165,31.4657 -16.1677,31.4806 -16.178,31.5192 -16.1965,31.6861 -16.2072,31.7107 -16.2179,31.7382 -16.2398,31.7988 -16.3037,31.8181 -16.3196,31.8601 -16.3408,31.8719 -16.3504,31.8807 -16.368,31.8856 -16.4063,31.8944 -16.4215,31.9103 -16.4289,32.0141 -16.4449,32.2118 -16.4402,32.2905 -16.4518,32.3937 -16.4918,32.5521 -16.5534,32.6718 -16.5998,32.6831 -16.6099,32.6879 -16.6243,32.6886 -16.6473,32.6987 -16.6868,32.7252 -16.7064,32.7309 -16.7087,32.7313 -16.7088,32.7399 -16.7032,32.7538 -16.6979,32.7693 -16.6955,32.8007 -16.6973,32.862 -16.7105,32.8934 -16.7124,32.9096 -16.7081,32.9396 -16.6898,32.9562 -16.6831,32.9685 -16.6816,32.9616 -16.7103,32.9334 -16.8158,32.9162 -16.8479,32.9005 -16.8678,32.8288 -16.9351,32.8301 -16.9415,32.8868 -17.0382,32.9285 -17.1095,32.9541 -17.1672,32.9678 -17.2289,32.9691 -17.2661,32.9694 -17.2761,32.9732 -17.2979,32.9836 -17.3178,32.9924 -17.3247,33.0147 -17.3367,33.0216 -17.3456,33.0225 -17.3615,33.0163 -17.3772,33.0117 -17.384,32.9974 -17.405,32.9582 -17.4785,32.9517 -17.4862,32.943 -17.4916,32.9366 -17.4983,32.9367 -17.5094,32.9472 -17.5432,32.9517 -17.5514,32.9691 -17.5646,33.0066 -17.581,33.0204 -17.5986,33.0245 -17.6192,33.0206 -17.6385,33.0041 -17.6756,33.0002 -17.7139,33.0032 -17.7577,32.9991 -17.7943,32.9736 -17.8106,32.957 -17.818,32.9461 -17.8347,32.9397 -17.8555,32.9369 -17.875,32.9384 -17.8946,32.9503 -17.9226,32.9521 -17.9402,32.9481 -17.9533,32.9404 -17.96,32.9324 -17.9649,32.9274 -17.9729,32.929 -17.9823,32.9412 -17.9963,32.9403 -18.0048,32.9349 -18.0246,32.9371 -18.0471,32.9723 -18.1503,32.9755 -18.1833,32.9749 -18.1908,32.9659 -18.2122,32.9582 -18.2254,32.9523 -18.233,32.9505 -18.2413,32.955 -18.2563,32.9702 -18.2775,33.0169 -18.3137,33.035 -18.3329,33.0428 -18.352,33.0381 -18.3631,33.0092 -18.3839,32.9882 -18.4132,32.9854 -18.4125,32.9868 -18.4223,32.9995 -18.4367,33.003 -18.4469,32.9964 -18.4671,32.9786 -18.4801,32.9566 -18.4899,32.9371 -18.501,32.9193 -18.51,32.9003 -18.5153,32.8831 -18.5221,32.8707 -18.5358,32.8683 -18.5526,32.8717 -18.5732,32.8845 -18.609,32.9146 -18.6659,32.9223 -18.6932,32.9202 -18.7262,32.9133 -18.753,32.9025 -18.7745,32.8852 -18.7878,32.8589 -18.79,32.8179 -18.787,32.7876 -18.7913,32.6914 -18.8343,32.6899 -18.8432,32.6968 -18.8972,32.7032 -18.9119,32.7158 -18.9198,32.7051 -18.9275,32.6922 -18.9343,32.6825 -18.9427,32.6811 -18.955,32.6886 -18.9773,32.6903 -18.9882,32.6886 -19.001,32.6911 -19.0143,32.699 -19.0222,32.7103 -19.026,32.7239 -19.0266,32.786 -19.0177,32.8034 -19.0196,32.8142 -19.0238,32.82 -19.0283,32.823 -19.0352,32.8253 -19.0468,32.8302 -19.0591,32.8381 -19.0669,32.8475 -19.0739,32.8559 -19.0837,32.8623 -19.1181,32.8332 -19.242,32.8322 -19.2667,32.8287 -19.2846,32.8207 -19.3013,32.8061 -19.3234,32.7688 -19.3636,32.7665 -19.3734,32.7685 -19.4028,32.7622 -19.4434,32.7634 -19.464,32.7739 -19.4759,32.7931 -19.4767,32.8113 -19.4745,32.8254 -19.4792,32.8322 -19.5009,32.8325 -19.5193,32.8254 -19.5916,32.8257 -19.6008,32.8282 -19.6106,32.8296 -19.6237,32.8254 -19.6333,32.8195 -19.642,32.8163 -19.6521,32.8196 -19.6743,32.831 -19.6852,32.8491 -19.6891,32.8722 -19.6902,32.8947 -19.6843,32.9246 -19.6553,32.9432 -19.6493,32.961 -19.6588,32.9624 -19.6791,32.9541 -19.7178,32.9624 -19.7354,32.9791 -19.7514,33.0006 -19.7643,33.0228 -19.7731,33.0328 -19.7842,33.0296 -19.8034,33.0229 -19.8269,33.0213 -19.8681,33.002 -19.927,32.9984 -20.0009,33.0044 -20.0243,33.0073 -20.032,32.9537 -20.0302,32.9401 -20.0415,32.9343 -20.0721,32.9265 -20.0865,32.9107 -20.0911,32.8944 -20.094,32.8853 -20.103,32.8779 -20.1517,32.8729 -20.1672,32.8593 -20.1909,32.8571 -20.2006,32.8583 -20.2075,32.8651 -20.2209,32.8656 -20.2289,32.8584 -20.2595,32.853 -20.2739,32.8452 -20.2867,32.8008 -20.3386,32.7359 -20.4142,32.7044 -20.4718,32.6718 -20.5318,32.6465 -20.558,32.6037 -20.5648,32.5565 -20.5593,32.5131 -20.5646,32.4816 -20.603,32.4711 -20.6455,32.4691 -20.6868,32.4835 -20.7942,32.4972 -20.8981,32.491 -20.9363,32.4677 -20.9802,32.4171 -21.0409,32.3398 -21.1341,32.3453 -21.1428,32.3599 -21.1514,32.3689 -21.163,32.3734 -21.1636,32.3777 -21.1634,32.3806 -21.1655,32.3805 -21.1722,32.3769 -21.1785,32.373 -21.184,32.3717 -21.1879,32.4446 -21.3047,32.4458 -21.309,32.4472 -21.3137,32.4085 -21.2903,32.373 -21.3279,32.3245 -21.3782,32.2722 -21.4325,32.2197 -21.4869,32.1673 -21.5413,32.1148 -21.5956,32.0624 -21.65,32.01 -21.7045,31.9576 -21.7588,31.9052 -21.8132,31.8527 -21.8676,31.8003 -21.922,31.7478 -21.9764,31.6955 -22.0307,31.6431 -22.0852,31.5907 -22.1396,31.5382 -22.1939,31.4858 -22.2483,31.4338 -22.302,31.3687 -22.345,31.2889 -22.3973,31.2656 -22.3655,31.2556 -22.358,31.2457 -22.3575,31.2296 -22.364,31.2215 -22.3649,31.2135 -22.3619,31.1979 -22.3526,31.1907 -22.3506,31.1837 -22.3456,31.1633 -22.3226,31.1526 -22.3164,31.1377 -22.3185,31.1045 -22.3334,31.097 -22.3349,31.0876 -22.3369,31.0703 -22.3337,31.0361 -22.3196,30.9272 -22.2957,30.8671 -22.2896,30.8379 -22.2823,30.8053 -22.2945,30.6939 -22.3028,30.6743 -22.3086,30.6474 -22.3264,30.6324 -22.3307,30.6256 -22.3286,30.6103 -22.3187,30.6011 -22.3164,30.5722 -22.3166,30.5074 -22.3096,30.4885 -22.3102,30.4692 -22.3151,30.4317 -22.3312,30.4127 -22.3369,30.3721 -22.3435,30.335 -22.3447,30.3008 -22.337,30.2693 -22.3164,30.2553 -22.3047,30.2404 -22.2962,30.2217 -22.2909,30.197 -22.2891,30.1527 -22.2948,30.1351 -22.2936,30.1111 -22.2823,30.0826 -22.2629,30.0679 -22.2571,30.0381 -22.2538,30.0359 -22.2506,30.0345 -22.2461,30.0155 -22.227,30.0053 -22.2223,29.9838 -22.2177,29.974 -22.214,29.9467 -22.1983,29.9321 -22.1944,29.896 -22.1914,29.8715 -22.1793,29.8373 -22.1724,29.7792 -22.1364,29.7589 -22.1309,29.6914 -22.1341,29.6796 -22.1383,29.6614 -22.1265,29.6411 -22.1292,29.604 -22.1451,29.5702 -22.142,29.551 -22.146,29.5425 -22.1625,29.5318 -22.1724,29.5069 -22.1701,29.4569 -22.1588,29.4361 -22.1631,29.3995 -22.1822,29.378 -22.1929,29.3633 -22.1923,29.3569 -22.1909,29.3501 -22.1867,29.2736 -22.1251,29.2673 -22.1158,29.2596 -22.0961,29.2541 -22.0871,29.2444 -22.0757,29.2393 -22.0726,29.1449 -22.0753,29.108 -22.0692,29.0708 -22.051,29.0405 -22.0209,29.0216 -21.9828,29.0138 -21.9404,29.0179 -21.8981,29.0289 -21.8766,29.0454 -21.8526,29.0576 -21.8292,29.0553 -21.81,29.0387 -21.7979,28.9987 -21.786,28.9808 -21.7748,28.9519 -21.7683,28.891 -21.7649,28.8609 -21.7574,28.7142 -21.6935,28.6684 -21.68,28.6297 -21.6513,28.6157 -21.6471,28.5859 -21.6444,28.554 -21.6366,28.5429 -21.6383,28.5325 -21.6431,28.4973 -21.6515,28.4814 -21.6574,28.4646 -21.6603,28.4431 -21.6558,28.3618 -21.6163,28.3219 -21.6035,28.2849 -21.5969,28.1657 -21.5952,28.0908 -21.5813,28.0329 -21.5779,28.0166 -21.5729,28.0026 -21.5642,27.9904 -21.5519,27.9847 -21.5429,27.9757 -21.5226,27.9706 -21.5144,27.9637 -21.5105,27.9581 -21.5115,27.9532 -21.5105,27.9493 -21.5008,27.9544 -21.4878,27.9504 -21.482,27.9433 -21.4799,27.9399 -21.478,27.9419 -21.4685,27.9496 -21.4565,27.953 -21.4487,27.9502 -21.4383,27.9205 -21.3812,27.9042 -21.3647,27.8978 -21.3554,27.8962 -21.3479,27.8967 -21.3324,27.8944 -21.3243,27.885 -21.3102,27.8491 -21.2697,27.8236 -21.2317,27.7938 -21.1974,27.7244 -21.1497,27.7092 -21.1345,27.6748 -21.0901,27.6666 -21.0712,27.6668 -21.0538,27.679 -21.0007,27.6804 -20.9796,27.6727 -20.9235,27.6726 -20.9137,27.6751 -20.8913,27.6748 -20.8799,27.676 -20.8667,27.6818 -20.8576,27.689 -20.849,27.6944 -20.8377,27.7096 -20.7567,27.7073 -20.7167,27.6825 -20.6373,27.6904 -20.6015,27.7026 -20.5661,27.7056 -20.5267,27.6981 -20.5091,27.6838 -20.4961,27.666 -20.4891,27.6258 -20.4886,27.5909 -20.4733,27.5341 -20.483,27.4539 -20.4733,27.3407 -20.473,27.306 -20.4774,27.2684 -20.4958,27.284 -20.3515,27.266 -20.2342,27.2149 -20.1105,27.2018 -20.093,27.1837 -20.0823,27.1629 -20.0766,27.1419 -20.0733,27.1297 -20.0729,27.1198 -20.0739,27.1096 -20.0732,27.0973 -20.0689,27.0865 -20.0605,27.0692 -20.0374,27.0601 -20.0276,27.0267 -20.0101,26.9943 -20.0068,26.9611 -20.0072,26.9251 -20.0009,26.8119 -19.9464,26.7745 -19.9398,26.7508 -19.9396,26.731 -19.9359,26.7139 -19.9274,26.6986 -19.9125,26.6848 -19.8945,26.6772 -19.8868,26.6738 -19.8834,26.6594 -19.8757,26.6141 -19.8634,26.5956 -19.8556,26.5819 -19.8421,26.5748 -19.8195,26.5663 -19.8008,26.5493 -19.7841,26.5089 -19.7593,26.4897 -19.7519,26.4503 -19.7433,26.4319 -19.7365,26.4128 -19.7196,26.3852 -19.6791,26.3627 -19.6676,26.3323 -19.6624,26.3244 -19.6591,26.3122 -19.6514,26.3125 -19.6496,26.3191 -19.6463,26.3263 -19.6339,26.3335 -19.613,26.331 -19.605,26.3211 -19.592,26.3132 -19.5842,26.3035 -19.5773,26.2926 -19.5725,26.2391 -19.5715,26.1945 -19.5602,26.1555 -19.5372,26.1303 -19.5011,26.0344 -19.2437,26.0114 -19.1998,25.9811 -19.1618,25.9565 -19.1221,25.9486 -19.1033,25.9449 -19.0792,25.9481 -19.0587,25.9644 -19.0216,25.9678 -19.001,25.9674 -18.9999,25.9407 -18.9213,25.8153 -18.814,25.7795 -18.7388,25.7734 -18.6656,25.7619 -18.6303,25.7369 -18.6087,25.6983 -18.5902,25.6695 -18.566,25.6221 -18.5011,25.6084 -18.4877,25.5744 -18.4657,25.5085 -18.3991,25.4956 -18.3789,25.4905 -18.3655,25.4812 -18.3234,25.4732 -18.3034,25.4409 -18.2532,25.4088 -18.176,25.3875 -18.139,25.3574 -18.1158,25.3234 -18.0966,25.2964 -18.0686,25.255 -18.0011,25.2261 -17.9319,25.2194 -17.908,25.2194 -17.8798,25.2598 -17.7941,25.2667 -17.8009,25.2854 -17.8093,25.3159 -17.8321,25.3355 -17.8412,25.3453 -17.8426,25.3765 -17.8412,25.4095 -17.853,25.4203 -17.8549,25.4956 -17.8549,25.5007 -17.856,25.5102 -17.8612,25.5165 -17.8623,25.5221 -17.8601,25.5309 -17.851,25.5368 -17.8487,25.604 -17.8362,25.657 -17.8139,25.6814 -17.8115,25.6942 -17.8194,25.7064 -17.8299,25.7438 -17.8394,25.766 -17.8498,25.786 -17.8622,25.7947 -17.8727,25.8044 -17.8882,25.8497 -17.9067,25.8636 -17.9238,25.8475 -17.9294,25.8462 -17.9437,25.8535 -17.96,25.8636 -17.9716,25.9245 -17.999,25.967 -18.0005,25.9785 -17.999,26.0337 -17.9716,26.0406 -17.9785,26.0466 -17.9663,26.0625 -17.9629,26.0812 -17.9624,26.0952 -17.9585,26.0962 -17.9546,26.0942 -17.9419,26.0952 -17.9381,26.1012 -17.9358,26.1186 -17.9316,26.1354 -17.9226,26.1586 -17.9183,26.1675 -17.9136,26.203 -17.8872,26.2119 -17.8828,26.2211 -17.8863,26.2282 -17.8947,26.2339 -17.904,26.2392 -17.9102,26.2483 -17.9134,26.2943 -17.9185,26.3038 -17.9228,26.312 -17.9284,26.3183 -17.9344,26.3255 -17.936,26.3627 -17.9306,26.4086 -17.939,26.4855 -17.9793,26.5271 -17.992,26.5536 -17.9965,26.5702 -18.0029,26.5834 -18.0132,26.5989 -18.03,26.6127 -18.0412,26.6288 -18.0492,26.6857 -18.0668,26.7 -18.0692,26.7119 -18.0658,26.7406 -18.0405,26.7536 -18.033,26.7697 -18.029,26.794 -18.0262,26.8883 -17.9846,26.912 -17.992,26.9487 -17.9689,26.9592 -17.9647,27.0063 -17.9627,27.0213 -17.9585,27.0485 -17.9443,27.0782 -17.917,27.1154 -17.8822,27.149 -17.8425,27.1465 -17.8189,27.1453 -17.7941,27.147 -17.7839,27.1571 -17.7693,27.4221 -17.5048,27.5243 -17.4151,27.5773 -17.3631,27.6045 -17.3128,27.6249 -17.2333,27.6412 -17.1985,27.7773 -17.0012,27.8169 -16.9596,27.8686 -16.9297,28.023 -16.8654,28.1139 -16.8276,28.2125 -16.7486,28.2801 -16.7065,28.6433 -16.5688,28.6907 -16.5603,28.7188 -16.5603,28.7328 -16.5581,28.7414 -16.5507,28.7611 -16.5323,28.7693 -16.5152,28.8089 -16.4863,28.8225 -16.4708,28.8291 -16.4346,28.8331 -16.4264,28.8572 -16.3882,28.857 -16.3655,28.8405 -16.3236,28.8368 -16.3063,28.8403 -16.2847,28.8642 -16.2312,28.8471 -16.2027,28.8525 -16.1628,28.8654 -16.1212,28.871 -16.0872,28.8685 -16.0822,28.8638 -16.0766,28.8593 -16.0696,28.8572 -16.0605,28.8603 -16.0494,28.8741 -16.0289,28.8772 -16.022,28.8989 -15.9955,28.9324 -15.9637,28.9469 -15.9572,28.9513 -15.9553,28.9728 -15.9514,29.0181 -15.9506,29.0423 -15.9463,29.0551 -15.9344,29.0763 -15.8954,29.0862 -15.8846,29.1022 -15.8709,29.1217 -15.8593,29.1419 -15.8545,29.151 -15.8488,29.1863 -15.8128,29.407 -15.7142,29.4221 -15.711,29.5085 -15.7036,29.5262 -15.6928,29.5634 -15.6621,29.5872 -15.6557,29.6086 -15.6584,29.628 -15.6636,29.6485 -15.6666,29.6728 -15.6633,29.73 -15.6447,29.7733 -15.6381,29.8143 -15.6197,29.8373 -15.6148,29.8818 -15.6188,29.9675 -15.6415,30.0107 -15.6462)) ``` - - #### polygonPerimeterSpherical 関数の使い方 {#usage-of-polygon-perimeter-spherical} - - ```sql SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15.640129), (30.090029, -15.629381), (30.130129, -15.623696), (30.16992, -15.632171), (30.195552, -15.649121), (30.207231, -15.653152), (30.223147, -15.649741), (30.231002, -15.644677), (30.246091, -15.632068), (30.254876, -15.628864), (30.280094, -15.632275), (30.296196, -15.639042), (30.32805, -15.652428), (30.356679, -15.651498), (30.396263, -15.635995), (30.39771, -15.716817), (30.39926, -15.812005), (30.401327, -15.931688), (30.402568, -16.001244), (30.514809, -16.000418), (30.586587, -16.000004), (30.74973, -15.998867), (30.857424, -15.998144), (30.901865, -16.007136), (30.942173, -16.034524), (30.958296, -16.05106), (30.973075, -16.062016), (30.989767, -16.06429), (31.012039, -16.054885), (31.023718, -16.045169), (31.042218, -16.024912), (31.056895, -16.017574), (31.065421, -16.019641), (31.073328, -16.025532), (31.080872, -16.025946), (31.089037, -16.01189), (31.1141, -15.996904), (31.15849, -16.000211), (31.259983, -16.023465), (31.278897, -16.030287), (31.29533, -16.041655), (31.309592, -16.059019), (31.328351, -16.092815), (31.340908, -16.106664), (31.360339, -16.116896), (31.37026, -16.123718), (31.374601, -16.132916), (31.377754, -16.142218), (31.384006, -16.148832), (31.387727, -16.149556), (31.395582, -16.147695), (31.399613, -16.147282), (31.404315, -16.149866), (31.404057, -16.154517), (31.402713, -16.159374), (31.404574, -16.162268), (31.424107, -16.164749), (31.445708, -16.164955), (31.465655, -16.167746), (31.480641, -16.177978), (31.519192, -16.196478), (31.686107, -16.207227), (31.710705, -16.217872), (31.738197, -16.239783), (31.798761, -16.303655), (31.818088, -16.319571), (31.86005, -16.340759), (31.871935, -16.35037), (31.88072, -16.368044), (31.88563, -16.406284), (31.894363, -16.421477), (31.910279, -16.428919), (32.014149, -16.444938), (32.211759, -16.440184), (32.290463, -16.45176), (32.393661, -16.491757), (32.5521, -16.553355), (32.671783, -16.599761), (32.6831, -16.609889), (32.687906, -16.624255), (32.68863, -16.647303), (32.698655, -16.686784), (32.725217, -16.706421), (32.73095, -16.708656), (32.731314, -16.708798), (32.739893, -16.703217), (32.753845, -16.697946), (32.769348, -16.695466), (32.800664, -16.697326), (32.862004, -16.710452), (32.893372, -16.712415), (32.909598, -16.708075), (32.93957, -16.689781), (32.95621, -16.683063), (32.968509, -16.681615999999998), (32.961585, -16.710348), (32.933369, -16.815768), (32.916213, -16.847911), (32.900503, -16.867755), (32.828776, -16.935141), (32.83012, -16.941549), (32.886757, -17.038184), (32.928512, -17.109497), (32.954143, -17.167168), (32.967786, -17.22887), (32.96909, -17.266115), (32.969439, -17.276102), (32.973212, -17.297909), (32.983599, -17.317753), (32.992384, -17.324678), (33.014656, -17.336667), (33.021633, -17.345555), (33.022459, -17.361471), (33.016258, -17.377181), (33.011651, -17.383991), (32.997448, -17.404983), (32.958174, -17.478467), (32.951663, -17.486218), (32.942981, -17.491593), (32.936573, -17.498311), (32.936676, -17.509369), (32.947218, -17.543166), (32.951663, -17.551434), (32.969129, -17.56456), (33.006646, -17.580993), (33.020392, -17.598563), (33.024526, -17.619233), (33.020599, -17.638457), (33.004063, -17.675561), (33.000238, -17.713905), (33.003184, -17.757726), (32.999102, -17.794313), (32.973573, -17.810643), (32.957037, -17.817981), (32.946082, -17.834724), (32.939674, -17.855498), (32.936883, -17.875032), (32.938433, -17.894566), (32.950267, -17.922574), (32.952128, -17.940247), (32.948149, -17.95327), (32.940397, -17.959988), (32.932439, -17.964949), (32.927375, -17.972907), (32.928977, -17.982312), (32.941224, -17.996265), (32.940294, -18.004843), (32.934919, -18.024583), (32.93709, -18.047114), (32.972282, -18.150261), (32.975537, -18.183333), (32.974865, -18.190775), (32.965925, -18.212169), (32.958174, -18.225398), (32.952283, -18.233046), (32.950525999999996, -18.241314), (32.95497, -18.256301), (32.970163, -18.277488), (33.016878, -18.313661), (33.034965, -18.332885), (33.042768, -18.352005), (33.038066, -18.363064), (33.00923, -18.383941), (32.988198, -18.41319), (32.985356, -18.412467), (32.986803, -18.422285), (32.999515, -18.436651), (33.003029, -18.446883), (32.996414, -18.46714), (32.978586, -18.48006), (32.956624, -18.489878), (32.937142, -18.50104), (32.919313, -18.510032), (32.900296, -18.515303), (32.88314, -18.522124), (32.870737, -18.535767), (32.868257, -18.552613), (32.871668, -18.57318), (32.884483, -18.609044), (32.914559, -18.665888), (32.92231, -18.693173), (32.920243, -18.726246), (32.913267, -18.753014), (32.902518, -18.774512), (32.885207, -18.787844), (32.858852, -18.790015), (32.817924, -18.787018), (32.787642, -18.791255), (32.69142, -18.83425), (32.68987, -18.843241), (32.696794, -18.897192), (32.703202, -18.911868), (32.71576, -18.919826), (32.705063, -18.927474), (32.692247, -18.934295), (32.682532, -18.942667), (32.681085, -18.954966), (32.68863, -18.97729), (32.690283, -18.988246), (32.68863, -19.000958), (32.691058, -19.01429), (32.698965, -19.022249), (32.710282, -19.025969), (32.723873, -19.026589), (32.785988, -19.017701), (32.803351, -19.019561), (32.814203, -19.023799), (32.819991, -19.028346), (32.822988, -19.035168), (32.825262, -19.046847), (32.830223, -19.059146), (32.83813, -19.066897), (32.847483, -19.073925), (32.855906, -19.083744), (32.862262, -19.118057), (32.83322, -19.241977), (32.832187, -19.266678), (32.828673, -19.284558), (32.820715, -19.301301), (32.806142, -19.323419), (32.768831, -19.363623), (32.766454, -19.373442), (32.768521, -19.402794), (32.762217, -19.443412), (32.763354, -19.463979), (32.773947, -19.475864), (32.793119, -19.476691), (32.811309, -19.474521), (32.825365, -19.479172), (32.832187, -19.500876), (32.832497000000004, -19.519273), (32.825365, -19.59162), (32.825675, -19.600818), (32.828156, -19.610636), (32.829603, -19.623659), (32.825365, -19.633271), (32.819474, -19.641952), (32.81627, -19.652081), (32.819629, -19.674302), (32.83105, -19.685154), (32.849137, -19.689081), (32.872184, -19.690218), (32.894715, -19.684327), (32.924584, -19.655285), (32.943188, -19.64929), (32.960964, -19.658799), (32.962411, -19.679056), (32.954143, -19.717813), (32.962411, -19.735383), (32.979051, -19.751403), (33.0006, -19.764322), (33.022769, -19.773107), (33.032795, -19.784166), (33.029642, -19.80339), (33.022873, -19.826851), (33.021322, -19.868088), (33.001995, -19.927), (32.998378, -20.000897), (33.004373, -20.024255), (33.007266, -20.032006), (32.95373, -20.030249), (32.940087, -20.041515), (32.934299, -20.072107), (32.926548, -20.086473), (32.910683, -20.091124), (32.894405, -20.094018), (32.88531, -20.10301), (32.877869, -20.151689), (32.872908, -20.167192), (32.859265, -20.190859), (32.857095, -20.200575), (32.858335, -20.207499), (32.865053, -20.220935), (32.86557, -20.228893), (32.858438, -20.259486), (32.852961, -20.273852), (32.845209, -20.286668), (32.800767, -20.338551), (32.735862, -20.414205), (32.704443, -20.471773), (32.671783, -20.531821), (32.646462, -20.557969), (32.603674, -20.56479), (32.556545, -20.559312), (32.513136, -20.564583), (32.481614, -20.603031), (32.471072, -20.645509), (32.469108, -20.68685), (32.483474, -20.794233), (32.49722, -20.898103), (32.491019, -20.936344), (32.467661, -20.980165), (32.417122, -21.040937), (32.339814, -21.134058), (32.345343, -21.142843), (32.359864, -21.151421), (32.368856, -21.162997), (32.373352, -21.163617), (32.377744, -21.16341), (32.380638, -21.165477), (32.380535, -21.172195), (32.376866, -21.178499), (32.37299, -21.183977), (32.37175, -21.187905), (32.444613, -21.304693), (32.445849, -21.308994), (32.447197, -21.313685), (32.408543, -21.290327), (32.37299, -21.327948), (32.324517, -21.378177), (32.272221, -21.432541), (32.219718, -21.486904), (32.167318, -21.541268), (32.114814, -21.595632), (32.062415, -21.649995), (32.010015, -21.704462), (31.957615, -21.758826), (31.905215, -21.813189), (31.852712, -21.867553), (31.800312, -21.92202), (31.747808, -21.976384), (31.695512, -22.030747), (31.643112, -22.085214), (31.590712, -22.139578), (31.538209, -22.193941), (31.485809, -22.248305), (31.433822, -22.302048), (31.36871, -22.345043), (31.288922, -22.39734), (31.265616, -22.365507), (31.255642, -22.357962), (31.24572, -22.357549), (31.229597, -22.363957), (31.221536, -22.364887), (31.213474, -22.36189), (31.197868, -22.352588), (31.190685, -22.350624), (31.183657, -22.34556), (31.163348, -22.322616), (31.152599, -22.316414), (31.137717, -22.318482), (31.10454, -22.333364), (31.097048, -22.334922), (31.087642, -22.336878), (31.07033, -22.333674), (31.036121, -22.319618), (30.927187, -22.295744), (30.867087, -22.289646), (30.83789, -22.282308), (30.805282, -22.294504), (30.693919, -22.302772), (30.674282, -22.30856), (30.647410999999998, -22.32644), (30.632424, -22.330677), (30.625551, -22.32861), (30.610307, -22.318688), (30.601108, -22.316414), (30.57217, -22.316621), (30.507367, -22.309593), (30.488454, -22.310213), (30.46923, -22.315071), (30.431713, -22.331194), (30.412696, -22.336878), (30.372078, -22.343493), (30.334975, -22.344733), (30.300765, -22.336982), (30.269346, -22.316414), (30.25529, -22.304736), (30.240407, -22.296157), (30.2217, -22.290886), (30.196999, -22.289129), (30.15266, -22.294814), (30.13509, -22.293574), (30.111113, -22.282308), (30.082587, -22.262878), (30.067911, -22.25709), (30.038145, -22.253783), (30.035872, -22.250579), (30.034528, -22.246135), (30.015511, -22.227014), (30.005279, -22.22226), (29.983782, -22.217713), (29.973963, -22.213992), (29.946678, -22.198282), (29.932105, -22.194355), (29.896035, -22.191358), (29.871489, -22.179265), (29.837331, -22.172444), (29.779246, -22.136374), (29.758886, -22.130896), (29.691448, -22.1341), (29.679614, -22.138338), (29.661424, -22.126452), (29.641064, -22.129242), (29.60396, -22.145055), (29.570164, -22.141955), (29.551043, -22.145986), (29.542517, -22.162522), (29.53182, -22.172444), (29.506912, -22.170067), (29.456889, -22.158801), (29.436115, -22.163142), (29.399528, -22.182159), (29.378031, -22.192908), (29.363250999999998, -22.192288), (29.356947, -22.190944000000002), (29.350074, -22.186707), (29.273644, -22.125108), (29.26734, -22.115807), (29.259588, -22.096066), (29.254111, -22.087074), (29.244395, -22.075706), (29.239331, -22.072605), (29.144867, -22.075292), (29.10797, -22.069194), (29.070763, -22.051004), (29.040532, -22.020929), (29.021567, -21.982791), (29.013815, -21.940417), (29.017949, -21.898145), (29.028905, -21.876648), (29.045441, -21.852567), (29.057637, -21.829209), (29.05526, -21.809985), (29.038723, -21.797893), (28.998726, -21.786008), (28.980846, -21.774845), (28.951907, -21.768334), (28.891032, -21.764924), (28.860853, -21.757379), (28.714195, -21.693507), (28.66841, -21.679968), (28.629704, -21.651339), (28.6157, -21.647101), (28.585934, -21.644414), (28.553998, -21.636559), (28.542939, -21.638316), (28.532501, -21.643071), (28.497309, -21.651546), (28.481393, -21.657437), (28.464598, -21.660331), (28.443101, -21.655783), (28.361762, -21.616302), (28.321919, -21.603486), (28.284867, -21.596872), (28.165702, -21.595218), (28.090771, -21.581266), (28.032893, -21.577855), (28.016563, -21.572894), (28.002559, -21.564212), (27.990415, -21.551913), (27.984731, -21.542922), (27.975739, -21.522561), (27.970571, -21.514396), (27.963698, -21.510469), (27.958066, -21.511502), (27.953208, -21.510469), (27.949281, -21.500754), (27.954448, -21.487835), (27.950418, -21.482047), (27.943338, -21.479876), (27.939876, -21.478016), (27.941943, -21.468508), (27.949642, -21.456519), (27.953001, -21.448664), (27.950211, -21.438329), (27.920549, -21.381174), (27.904219, -21.364741), (27.897811, -21.35544), (27.896157, -21.347895), (27.896674, -21.332392), (27.8944, -21.32433), (27.884995, -21.310171), (27.849132, -21.269657), (27.823604, -21.231726), (27.793838, -21.197413), (27.724385, -21.149664), (27.709192, -21.134471), (27.674775, -21.090133), (27.666611, -21.071219), (27.666817, -21.053753), (27.678961, -21.000733), (27.680356, -20.979649), (27.672657, -20.923528), (27.672605, -20.913709), (27.675085, -20.891282), (27.674775, -20.879913), (27.676016, -20.866684), (27.681803, -20.857589), (27.689038, -20.849011), (27.694412, -20.837744999999998), (27.709605, -20.756716), (27.707332, -20.716719), (27.682475, -20.637344), (27.690382, -20.60148), (27.702629, -20.566134), (27.705575, -20.526653), (27.698133, -20.509083), (27.683767, -20.49606), (27.66599, -20.489136), (27.625786, -20.488619), (27.590853, -20.473323), (27.534112, -20.483038), (27.45391, -20.473323), (27.340739, -20.473013), (27.306012, -20.477354), (27.268392, -20.49575), (27.283998, -20.35147), (27.266015, -20.234164), (27.214907, -20.110451), (27.201781, -20.092984), (27.183746, -20.082339), (27.16292, -20.076551), (27.141888, -20.073347), (27.129692, -20.072934), (27.119771, -20.073864), (27.109642, -20.073244), (27.097343, -20.068903), (27.086491, -20.060532), (27.069231, -20.03738), (27.060136, -20.027562), (27.02665, -20.010095), (26.9943, -20.006788), (26.961072, -20.007201), (26.925054, -20.000897), (26.811882, -19.94643), (26.774469, -19.939815), (26.750801, -19.939609), (26.730957, -19.935888), (26.713904, -19.927413), (26.698608, -19.91253), (26.684758, -19.894547), (26.67717, -19.886815), (26.673803, -19.883385), (26.659437, -19.875737), (26.614065, -19.863438), (26.595565, -19.855583), (26.581922, -19.842147), (26.574791, -19.819513), (26.566316, -19.800806), (26.549263, -19.784063), (26.508852, -19.759258), (26.489731, -19.75192), (26.450251, -19.743342), (26.431854, -19.73652), (26.412837, -19.71957), (26.385242, -19.679056), (26.362711, -19.667584), (26.332325, -19.662416), (26.324367, -19.659109), (26.312171, -19.651358), (26.312481, -19.649601), (26.319096, -19.646293), (26.326331, -19.633891), (26.333462, -19.613014), (26.330981, -19.604952), (26.32106, -19.592033), (26.313205, -19.584178), (26.30349, -19.577254), (26.292638, -19.572499), (26.239101, -19.571466), (26.194452, -19.560200000000002), (26.155488, -19.537153), (26.13027, -19.501082), (26.034359, -19.243734), (26.011414, -19.199809), (25.981132, -19.161775), (25.956534, -19.122088), (25.948576, -19.103277), (25.944855, -19.079196), (25.948059, -19.058732), (25.964389, -19.021629), (25.9678, -19.000958), (25.967449, -18.999925), (25.940721, -18.921273), (25.815251, -18.813993), (25.779491, -18.738752), (25.773393, -18.665578), (25.761921, -18.630335), (25.736909, -18.608734), (25.698255, -18.590234), (25.669523, -18.566049), (25.622084, -18.501143), (25.608442, -18.487708), (25.574439, -18.465693), (25.508499, -18.399134), (25.49558, -18.378877), (25.490516, -18.365545), (25.481163, -18.323377), (25.473204, -18.303429), (25.440855, -18.2532), (25.408816, -18.175995), (25.387525, -18.138995), (25.357449, -18.115844), (25.323446, -18.09662), (25.296368, -18.068612), (25.255026, -18.001122), (25.226088, -17.931876), (25.21937, -17.908001), (25.21937, -17.879786), (25.259781, -17.794107), (25.266705, -17.800928), (25.285412, -17.809299), (25.315901, -17.83214), (25.335538, -17.841235), (25.345254, -17.842579), (25.376466, -17.841235), (25.409539, -17.853018), (25.420288, -17.854878), (25.49558, -17.854878), (25.500748, -17.856015), (25.510153, -17.861183), (25.516458, -17.862319), (25.522142, -17.860149), (25.530927, -17.850951), (25.536818, -17.848677), (25.603997, -17.836171), (25.657017, -17.81395), (25.681409, -17.81147), (25.694224, -17.819428), (25.70642, -17.829867), (25.743834, -17.839375), (25.765951, -17.849814), (25.786002, -17.862216), (25.794683, -17.872655), (25.804399, -17.888158), (25.849667, -17.906658), (25.86362, -17.923814), (25.847497, -17.929395), (25.846153, -17.943658), (25.853490999999998, -17.959988), (25.86362, -17.971563), (25.924495, -17.998952), (25.966973, -18.000502), (25.978548, -17.998952), (26.033739, -17.971563), (26.04056, -17.978488), (26.046554, -17.966292), (26.062471, -17.962882), (26.081178, -17.962365), (26.095234, -17.958541), (26.096164, -17.954614), (26.0942, -17.941901), (26.095234, -17.938077), (26.101228, -17.935803), (26.118591, -17.931566), (26.135438, -17.922574), (26.158589, -17.918337), (26.167477, -17.913582), (26.203031, -17.887227), (26.211919, -17.882783), (26.221117, -17.886297), (26.228249, -17.894669), (26.233933, -17.903971), (26.239204, -17.910172), (26.248299, -17.913376), (26.294291, -17.918543), (26.3038, -17.922781), (26.311965, -17.928362), (26.318269, -17.934356), (26.325504, -17.93601), (26.362711, -17.930636), (26.408599, -17.939007), (26.485494, -17.979315), (26.527145, -17.992027), (26.553604, -17.996471), (26.570243, -18.002879), (26.583369, -18.013215), (26.598872, -18.029958), (26.612721, -18.041223), (26.628844, -18.049181), (26.685689, -18.066751), (26.700003, -18.069232), (26.71194, -18.065821), (26.740569, -18.0405), (26.753591, -18.032955), (26.769714, -18.029028), (26.794002, -18.026237), (26.88826, -17.984586), (26.912031, -17.992027), (26.94867, -17.968876), (26.95916, -17.964742), (27.006289, -17.962675), (27.021275, -17.958541), (27.048457, -17.944278), (27.078171, -17.916993), (27.11543, -17.882163), (27.149019, -17.842476), (27.146539, -17.818911), (27.145299, -17.794107), (27.146952, -17.783875), (27.157081, -17.769302), (27.422078, -17.504822), (27.524294, -17.415112), (27.577314, -17.363125), (27.604495, -17.312792), (27.624856, -17.233314), (27.641186, -17.198484), (27.777301, -17.001183), (27.816886, -16.959636), (27.868562, -16.929663), (28.022993, -16.865393), (28.113922, -16.827551), (28.21252, -16.748589), (28.280113, -16.706524), (28.643295, -16.568755), (28.690734, -16.56028), (28.718794, -16.56028), (28.73285, -16.55811), (28.741377, -16.550668), (28.761117, -16.532271), (28.769282, -16.515218), (28.808866, -16.486279), (28.822509, -16.470776), (28.829124, -16.434603), (28.833051, -16.426438), (28.857236, -16.388198), (28.857029, -16.36546), (28.840492, -16.323602), (28.836772, -16.306342), (28.840286, -16.284741), (28.86416, -16.231205), (28.847107, -16.202679), (28.852481, -16.162785), (28.8654, -16.121237), (28.870981, -16.087234), (28.868501, -16.08217), (28.86385, -16.076589), (28.859303, -16.069561), (28.857236, -16.060466), (28.860336, -16.049407), (28.874082, -16.028943), (28.877183, -16.022018), (28.898887, -15.995457), (28.932373, -15.963727), (28.946862, -15.957235), (28.951287, -15.955252), (28.972784, -15.951428), (29.018053, -15.950602), (29.042341, -15.946261), (29.055053, -15.934375), (29.076344, -15.895411), (29.086162, -15.884559), (29.102182, -15.870916), (29.121716, -15.859341), (29.141869, -15.854483), (29.150964, -15.848799), (29.186311, -15.812832), (29.406969, -15.714233), (29.422059, -15.711030000000001), (29.508462, -15.703588), (29.526239, -15.692839), (29.563446, -15.662144), (29.587217, -15.655736), (29.608559, -15.658422999999999), (29.62799, -15.663591), (29.648505, -15.666588), (29.672793, -15.663281), (29.73005, -15.644677), (29.773252, -15.638062), (29.814283, -15.619666), (29.837331, -15.614808), (29.881773, -15.618839), (29.967504, -15.641473), (30.010654, -15.646227)]), 6) ``` @@ -712,14 +679,10 @@ SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15 0.45539 ``` - - ### 入力パラメーター {#input-parameters-15} ### 戻り値 {#returned-value-22} - - ## polygonsIntersectionCartesian {#polygonsintersectioncartesian} 多角形同士の交差領域を計算します。 @@ -742,7 +705,6 @@ Polygons MultiPolygon - ## polygonAreaCartesian {#polygonareacartesian} 多角形の面積を計算します。 @@ -765,7 +727,6 @@ Polygon Float64 - ## polygonPerimeterCartesian {#polygonperimetercartesian} 多角形の周長を計算します。 @@ -788,7 +749,6 @@ Polygon Float64 - ## polygonsUnionCartesian {#polygonsunioncartesian} 多角形の和集合を計算します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md index 9be85fd28cc..afde60299ad 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md @@ -6,20 +6,14 @@ description: 'S2 インデックスを扱う関数のドキュメント' doc_type: 'reference' --- - - # S2 インデックスを扱うための関数 {#functions-for-working-with-s2-index} - - ## S2Index {#s2index} [S2](https://s2geometry.io/) は、すべての地理データを球体(地球儀に似た形)上で表現する地理インデックスシステムです。 S2 ライブラリでは、点は S2 インデックスとして表現されます。これは、従来の (緯度, 経度) の組とは異なり、単位球の表面上の点を内部的にエンコードした数値表現です。(緯度, 経度) 形式で指定されたある点に対して S2 ポイントインデックスを取得するには、[geoToS2](#geotos2) 関数を使用します。また、指定された S2 ポイントインデックスに対応する地理座標を取得するには、[s2ToGeo](#s2togeo) 関数を使用できます。 - - ## geoToS2 {#geotos2} 指定した座標 `(longitude, latitude)` に対応する [S2](#s2index) のポイントインデックスを返します。 @@ -55,7 +49,6 @@ SELECT geoToS2(37.79506683, 55.71290588) AS s2Index; └─────────────────────┘ ``` - ## s2ToGeo {#s2togeo} 指定された [S2](#s2index) ポイントインデックスに対応する地理座標 `(longitude, latitude)` を返します。 @@ -92,7 +85,6 @@ SELECT s2ToGeo(4704772434919038107) AS s2座標; └──────────────────────────────────────┘ ``` - ## s2GetNeighbors {#s2getneighbors} 指定された [S2](#s2index) に対応する S2 近傍インデックスを返します。S2 システム内の各セルは、4 本の測地線で囲まれた四辺形となっています。そのため、各セルには 4 つの隣接セルがあります。 @@ -127,7 +119,6 @@ SELECT s2GetNeighbors(5074766849661468672) AS s2Neighbors; └───────────────────────────────────────────────────────────────────────────────────┘ ``` - ## s2CellsIntersect {#s2cellsintersect} 2 つの [S2](#s2index) セルが交差しているかどうかを判定します。 @@ -163,7 +154,6 @@ SELECT s2CellsIntersect(9926595209846587392, 9926594385212866560) AS intersect; └───────────┘ ``` - ## s2CapContains {#s2capcontains} キャップが S2 ポイントを含むかどうかを判定します。キャップは、平面によって切り取られた球の一部を表します。球面上の 1 点と、度(degree)単位の半径によって定義されます。 @@ -201,7 +191,6 @@ SELECT s2CapContains(1157339245694594829, 1.0, 1157347770437378819) AS capContai └─────────────┘ ``` - ## s2CapUnion {#s2capunion} 指定された 2 つの入力キャップの両方を含む、最小のキャップを求めます。キャップは、平面で切り取られた球面の一部を表します。球面上の 1 点と、度単位の半径によって定義されます。 @@ -238,7 +227,6 @@ SELECT s2CapUnion(3814912406305146967, 1.0, 1157347770437378819, 1.0) AS capUnio └────────────────────────────────────────┘ ``` - ## s2RectAdd {#s2rectadd} 与えられた S2 ポイントを含めるように、バウンディング長方形のサイズを拡大します。S2 システムでは、長方形は緯度経度空間における長方形を表す `S2LatLngRect` という `S2Region` 型で表されます。 @@ -276,7 +264,6 @@ SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) └───────────────────────────────────────────┘ ``` - ## s2RectContains {#s2rectcontains} 指定された長方形が S2 ポイントを含んでいるかどうかを判定します。S2 システムでは、長方形は緯度・経度空間上の長方形を表す `S2Region` の一種である `S2LatLngRect` 型として表現されます。 @@ -314,7 +301,6 @@ SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187 └──────────────┘ ``` - ## s2RectUnion {#s2rectunion} この長方形と指定された長方形の和集合を含む、最小の長方形を返します。S2 システムでは、長方形は緯度・経度空間の長方形を表す、S2Region 型の一種である `S2LatLngRect` によって表現されます。 @@ -351,7 +337,6 @@ SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815 └───────────────────────────────────────────┘ ``` - ## s2RectIntersection {#s2rectintersection} この矩形と指定された矩形との交差部分全体を含む、最小の矩形を返します。S2 システムでは、矩形は緯度・経度空間の矩形を表す `S2LatLngRect` という種類の S2Region で表現されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md index ac8c05855ac..25c84966206 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md @@ -6,12 +6,8 @@ title: 'IN 演算子を実装するための関数' doc_type: 'reference' --- - - # IN 演算子の実装用関数 {#functions-for-implementing-the-in-operator} - - ## in, notIn, globalIn, globalNotIn {#in-notin-globalin-globalnotin} [IN 演算子](/sql-reference/operators/in) に関するセクションを参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md index 351331bfab0..4fa1e5f1224 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md @@ -6,30 +6,20 @@ title: '機械学習関数' doc_type: 'reference' --- - - # 機械学習関数 {#machine-learning-functions} - - ## evalMLMethod {#evalmlmethod} 学習済みの回帰モデルを用いた予測には `evalMLMethod` 関数を使用します。詳細は `linearRegression` を参照してください。 - - ## stochasticLinearRegression {#stochasticlinearregression} [stochasticLinearRegression](/sql-reference/aggregate-functions/reference/stochasticlinearregression) 集約関数は、線形モデルと MSE 損失関数を用いる確率的勾配降下法を実装します。新しいデータに対する予測には `evalMLMethod` を使用します。 - - ## stochasticLogisticRegression {#stochasticlogisticregression} [stochasticLogisticRegression](/sql-reference/aggregate-functions/reference/stochasticlogisticregression) 集約関数は、二値分類問題に対して確率的勾配降下法を実装したものです。新しいデータに対する予測には `evalMLMethod` を使用します。 - - ## naiveBayesClassifier {#naivebayesclassifier} n-gram およびラプラス平滑化を用いる Naive Bayes モデルで入力テキストを分類します。モデルは使用前に ClickHouse 上で事前に設定されている必要があります。 @@ -130,7 +120,6 @@ Naive Bayes 分類アルゴリズムを使用し、未出現の n-gram を扱う **モデル学習ガイド** - **ファイル形式** 人間が読みやすい形式の出力では、`n=1` で `token` モードの場合、モデルは次のような形になります: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md index 7bf66f9b3a0..571108919c1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md @@ -6,14 +6,10 @@ title: 'NumericIndexedVector の関数' doc_type: 'reference' --- - - # NumericIndexedVector {#numericindexedvector} NumericIndexedVector は、ベクトルをカプセル化し、ベクトルの集約および要素単位の演算を実装する抽象データ構造です。Bit-Sliced Index をそのストレージ方式として利用します。理論的な背景と利用シナリオについては、論文 [Large-Scale Metric Computation in Online Controlled Experiment Platform](https://arxiv.org/pdf/2405.08411) を参照してください。 - - ## BSI {#bit-sliced-index} BSI(Bit-Sliced Index)ストレージ方式では、データは[Bit-Sliced Index](https://dl.acm.org/doi/abs/10.1145/253260.253268)として保存され、その後[Roaring Bitmap](https://github.com/RoaringBitmap/RoaringBitmap)を用いて圧縮されます。集約演算および要素ごとの演算は圧縮データに対して直接実行され、ストレージおよびクエリの効率を大幅に向上させることができます。 @@ -27,8 +23,6 @@ BSI(Bit-Sliced Index)ストレージ方式では、データは[Bit-Sliced I - Bit-Sliced Index の仕組みでは、値は 2 進数に変換されます。浮動小数点型については固定小数点表現による変換が行われるため、精度が損なわれる可能性があります。精度は、小数部に使用するビット数をカスタマイズすることで調整でき、デフォルトは 24 ビットであり、ほとんどのシナリオで十分です。集約関数 groupNumericIndexedVector の `-State` 版を用いて NumericIndexedVector を構築する際に、整数部と小数部に使用するビット数をカスタマイズできます。 - インデックスには、非ゼロ値を持つもの、ゼロ値を持つもの、存在しないものの 3 通りがあります。NumericIndexedVector では、非ゼロ値とゼロ値のみが保存されます。さらに、2 つの NumericIndexedVector 間の要素ごとの演算において、存在しないインデックスの値は 0 として扱われます。除算のシナリオでは、除数が 0 の場合、結果は 0 になります。 - - ## numericIndexedVector オブジェクトを作成する {#create-numeric-indexed-vector-object} この構造を作成する方法は 2 つあります。1 つは、集約関数 `groupNumericIndexedVector` に `-State` を付けて使用する方法です。 @@ -37,8 +31,6 @@ BSI(Bit-Sliced Index)ストレージ方式では、データは[Bit-Sliced I もう 1 つは、`numericIndexedVectorBuild` を使用して Map から構築する方法です。 `groupNumericIndexedVectorState` 関数では、パラメータによって整数ビット数と小数ビット数をカスタマイズできますが、`numericIndexedVectorBuild` にはその機能はありません。 - - ## groupNumericIndexedVector {#group-numeric-indexed-vector} 2 つのデータ列から NumericIndexedVector を構築し、すべての値の合計を `Float64` 型で返します。末尾に `State` を付けた場合は、NumericIndexedVector オブジェクトを返します。 @@ -107,7 +99,6 @@ SELECT groupNumericIndexedVectorStateIf('BSI', 32, 0)(UserID, PlayTime, day = '2 詳細については https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md を参照してください。 */ } - {/*AUTOGENERATED_START*/ } ## numericIndexedVectorAllValueSum {#numericIndexedVectorAllValueSum} @@ -144,7 +135,6 @@ SELECT numericIndexedVectorAllValueSum(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorBuild {#numericIndexedVectorBuild} 導入バージョン: v25.7 @@ -179,7 +169,6 @@ SELECT numericIndexedVectorBuild(mapFromArrays([1, 2, 3], [10, 20, 30])) AS res, └─────┴────────────────────────────────────────────────────────────┘ ``` - ## numericIndexedVectorCardinality {#numericIndexedVectorCardinality} 導入: v25.7 @@ -214,7 +203,6 @@ SELECT numericIndexedVectorCardinality(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorGetValue {#numericIndexedVectorGetValue} 導入バージョン: v25.7 @@ -250,7 +238,6 @@ SELECT numericIndexedVectorGetValue(numericIndexedVectorBuild(mapFromArrays([1, └─────┘ ``` - ## numericIndexedVectorPointwiseAdd {#numericIndexedVectorPointwiseAdd} 導入バージョン: v25.7 @@ -291,7 +278,6 @@ SELECT └───────────────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseDivide {#numericIndexedVectorPointwiseDivide} 導入バージョン: v25.7 @@ -332,7 +318,6 @@ SELECT └─────────────┴─────────────────┘ ``` - ## numericIndexedVectorPointwiseEqual {#numericIndexedVectorPointwiseEqual} 導入バージョン: v25.7 @@ -374,7 +359,6 @@ SELECT └───────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreater {#numericIndexedVectorPointwiseGreater} 導入バージョン: v25.7 @@ -416,7 +400,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreaterEqual {#numericIndexedVectorPointwiseGreaterEqual} 導入バージョン: v25.7 @@ -458,7 +441,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseLess {#numericIndexedVectorPointwiseLess} 導入バージョン: v25.7 @@ -500,7 +482,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseLessEqual {#numericIndexedVectorPointwiseLessEqual} 導入バージョン: v25.7 @@ -542,7 +523,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseMultiply {#numericIndexedVectorPointwiseMultiply} 導入バージョン: v25.7 @@ -583,7 +563,6 @@ SELECT └───────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseNotEqual {#numericIndexedVectorPointwiseNotEqual} 導入バージョン: v25.7 @@ -625,7 +604,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseSubtract {#numericIndexedVectorPointwiseSubtract} 導入バージョン: v25.7 @@ -666,7 +644,6 @@ SELECT └────────────────────────┴─────────────────┘ ``` - ## numericIndexedVectorShortDebugString {#numericIndexedVectorShortDebugString} 導入バージョン:v25.7 @@ -702,7 +679,6 @@ SELECT numericIndexedVectorShortDebugString(numericIndexedVectorBuild(mapFromArr res: {"vector_type":"BSI","index_type":"char8_t","value_type":"char8_t","integer_bit_num":8,"fraction_bit_num":0,"zero_indexes_info":{"cardinality":"0"},"non_zero_indexes_info":{"total_cardinality":"3","all_value_sum":60,"number_of_bitmaps":"8","bitmap_info":{"cardinality":{"0":"0","1":"2","2":"2","3":"2","4":"2","5":"0","6":"0","7":"0"}}}} ``` - ## numericIndexedVectorToMap {#numericIndexedVectorToMap} 導入バージョン: v25.7 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md index 9f1587a61a5..ed7638ccc00 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md @@ -7,8 +7,6 @@ title: '通常関数' doc_type: 'reference' --- - - # 通常関数 {#regular-functions} 関数には少なくとも\* 2種類あります。通常関数(単に「関数」とも呼ばれます)と集約関数です。これは互いにまったく別の概念です。通常関数は、各行に対して個別に適用されるかのように動作します(各行に対する関数の結果は、他の行には依存しません)。集約関数は、複数の行から値の集合を蓄積します(つまり、すべての行の集合に依存します)。 @@ -19,26 +17,18 @@ doc_type: 'reference' 3番目の種類の関数として、['arrayJoin' 関数](../functions/array-join.md) に分類されるものがあります。また、[テーブル関数](../table-functions/index.md) も別に挙げられます。 ::: - - ## 強い型付け {#strong-typing} 標準的な SQL と異なり、ClickHouse は強い型付けを採用しています。言い換えると、型間の暗黙的な変換は行われません。各関数は特定の型の組み合わせに対してのみ利用できます。このため、場合によっては型変換関数を使用する必要があります。 - - ## 共通部分式除去 {#common-subexpression-elimination} クエリ内で同一の AST(同一のノード、または構文解析結果が同じもの)を持つすべての式は、同じ値を持つと見なされます。これらの式はまとめられ、一度だけ実行されます。同一の副問い合わせも同様の方法で除去されます。 - - ## 結果の型 {#types-of-results} すべての関数は、結果として単一の値のみを返します(複数の値を返したり、値を返さないことはありません)。結果の型は通常、値ではなく引数の型のみによって決まります。例外は、`tupleElement` 関数(`a.N` 演算子)および `toFixedString` 関数です。 - - ## 定数 {#constants} 説明を簡単にするため、一部の関数は特定の引数について定数のみを受け付けます。例えば、LIKE 演算子の右側の引数は定数でなければなりません。 @@ -48,8 +38,6 @@ doc_type: 'reference' 関数は、定数引数と非定数引数に対して別々の実装になっている場合があります(実行されるコードが異なります)。しかし、定数に対する結果と、同じ値のみを含む通常の列に対する結果とは一致していなければなりません。 - - ## NULL の処理 {#null-processing} 関数は次のように動作します。 @@ -57,14 +45,10 @@ doc_type: 'reference' - 関数の引数のうち少なくとも 1 つが `NULL` の場合、その関数の結果も `NULL` になります。 - 各関数の説明で個別に指定されている特殊な動作を持つものもあります。ClickHouse のソースコードでは、これらの関数は `UseDefaultImplementationForNulls=false` になっています。 - - ## 不変性 {#constancy} 関数は引数の値を変更できず、行われた変更はすべて結果として返されます。したがって、個々の関数の計算結果は、クエリ内で関数を記述する順序には依存しません。 - - ## 高階関数 {#higher-order-functions} ### `->` 演算子と lambda(params, expr) 関数 {#arrow-operator-and-lambda} @@ -82,7 +66,6 @@ str -> str != Referer 一部の関数では、最初の引数(ラムダ関数)を省略できます。この場合、恒等写像が指定されたものとして扱われます。 - ## ユーザー定義関数 (UDF) {#user-defined-functions-udfs} ClickHouse はユーザー定義関数をサポートしています。詳しくは [UDFs](../functions/udf.md) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md index 2c1f7749cf5..80cfa6cc49a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md @@ -10,7 +10,6 @@ keywords: ['時間ウィンドウ'] import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # 時間ウィンドウ関数 {#time-window-functions} @@ -26,7 +25,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; 詳細は https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md を参照してください。 */ } - {/*AUTOGENERATED_START*/ } ## hop {#hop} @@ -66,7 +64,6 @@ SELECT hop(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) ('2024-07-03 00:00:00','2024-07-05 00:00:00') ``` - ## hopEnd {#hopEnd} 導入バージョン: v22.1 @@ -104,7 +101,6 @@ SELECT hopEnd(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-05 00:00:00 ``` - ## hopStart {#hopStart} 導入バージョン: v22.1 @@ -142,7 +138,6 @@ SELECT hopStart(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-03 00:00:00 ``` - ## tumble {#tumble} 導入バージョン: v21.12 @@ -177,7 +172,6 @@ SELECT tumble(now(), toIntervalDay('1')) ('2024-07-04 00:00:00','2024-07-05 00:00:00') ``` - ## tumbleEnd {#tumbleEnd} 導入バージョン: v22.1 @@ -212,7 +206,6 @@ SELECT tumbleEnd(now(), toIntervalDay('1')) 2024-07-05 00:00:00 ``` - ## tumbleStart {#tumbleStart} 導入: v22.1 @@ -249,7 +242,6 @@ SELECT tumbleStart(now(), toIntervalDay('1')) {/*AUTOGENERATED_END*/ } - ## 関連コンテンツ {#related-content} - [時系列データのユースケースガイド](/use-cases/time-series) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md index 0c3f9c17e96..6974e3387c2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md @@ -16,7 +16,6 @@ doc_type: 'reference' 詳細は https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md を参照してください。 */ } - {/*AUTOGENERATED_START*/ } ## flattenTuple {#flattenTuple} @@ -57,7 +56,6 @@ SELECT flattenTuple(t) FROM tab; └────────────────┘ ``` - ## tuple {#tuple} 導入バージョン: v @@ -92,7 +90,6 @@ SELECT tuple(1, 2) (1,2) ``` - ## tupleConcat {#tupleConcat} 導入: v23.8 @@ -125,7 +122,6 @@ SELECT tupleConcat((1, 2), ('a',), (true, false)) (1, 2, 'a', true, false) ``` - ## tupleDivide {#tupleDivide} 導入バージョン: v21.11 @@ -163,7 +159,6 @@ SELECT tupleDivide((1, 2), (2, 3)) (0.5, 0.6666666666666666) ``` - ## tupleDivideByNumber {#tupleDivideByNumber} 導入バージョン: v21.11 @@ -201,7 +196,6 @@ SELECT tupleDivideByNumber((1, 2), 0.5) (2, 4) ``` - ## tupleElement {#tupleElement} 導入バージョン: v1.1 @@ -277,7 +271,6 @@ SELECT (1, 'hello').2 hello ``` - ## tupleHammingDistance {#tupleHammingDistance} 導入: v21.1 @@ -341,7 +334,6 @@ SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseIn 2 ``` - ## tupleIntDiv {#tupleIntDiv} 導入バージョン: v23.8 @@ -387,7 +379,6 @@ SELECT tupleIntDiv((15, 10, 5), (5.5, 5.5, 5.5)) (2, 1, 0) ``` - ## tupleIntDivByNumber {#tupleIntDivByNumber} 導入バージョン: v23.8 @@ -433,7 +424,6 @@ SELECT tupleIntDivByNumber((15.2, 10.7, 5.5), 5.8) (2, 1, 0) ``` - ## tupleIntDivOrZero {#tupleIntDivOrZero} 導入バージョン: v23.8 @@ -469,7 +459,6 @@ SELECT tupleIntDivOrZero((5, 10, 15), (0, 0, 0)) (0, 0, 0) ``` - ## tupleIntDivOrZeroByNumber {#tupleIntDivOrZeroByNumber} 導入バージョン: v23.8 @@ -515,7 +504,6 @@ SELECT tupleIntDivOrZeroByNumber((15, 10, 5), 0) (0, 0, 0) ``` - ## tupleMinus {#tupleMinus} 導入バージョン: v21.11 @@ -551,7 +539,6 @@ SELECT tupleMinus((1, 2), (2, 3)) (-1, -1) ``` - ## tupleModulo {#tupleModulo} 導入バージョン: v23.8 @@ -585,7 +572,6 @@ SELECT tupleModulo((15, 10, 5), (5, 3, 2)) (0, 1, 1) ``` - ## tupleModuloByNumber {#tupleModuloByNumber} 導入バージョン: v23.8 @@ -619,7 +605,6 @@ SELECT tupleModuloByNumber((15, 10, 5), 2) (1, 0, 1) ``` - ## tupleMultiply {#tupleMultiply} 導入バージョン: v21.11 @@ -653,7 +638,6 @@ SELECT tupleMultiply((1, 2), (2, 3)) (2, 6) ``` - ## tupleMultiplyByNumber {#tupleMultiplyByNumber} 導入バージョン: v21.11 @@ -687,7 +671,6 @@ SELECT tupleMultiplyByNumber((1, 2), -2.1) (-2.1, -4.2) ``` - ## tupleNames {#tupleNames} 導入バージョン: v @@ -717,7 +700,6 @@ SELECT tupleNames(tuple(1 as a, 2 as b)) ['a','b'] ``` - ## tupleNegate {#tupleNegate} 導入バージョン: v21.11 @@ -750,7 +732,6 @@ SELECT tupleNegate((1, 2)) (-1, -2) ``` - ## tuplePlus {#tuplePlus} 導入バージョン: v21.11 @@ -786,7 +767,6 @@ SELECT tuplePlus((1, 2), (2, 3)) (3, 5) ``` - ## tupleToNameValuePairs {#tupleToNameValuePairs} 導入: v21.9 @@ -833,7 +813,6 @@ SELECT tupleToNameValuePairs(tuple(3, 2, 1)) {/*AUTOGENERATED_END*/ } - ## untuple {#untuple} 呼び出し箇所で [tuple](/sql-reference/data-types/tuple) 要素の構文的な置換を行います。 @@ -910,7 +889,6 @@ SELECT untuple((* EXCEPT (v2, v3),)) FROM kv; └─────┴────┴────┴────┴───────────┘ ``` - ## 距離関数 {#distance-functions} サポートされているすべての関数については、[距離関数のドキュメント](../../sql-reference/functions/distance-functions.md)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md index c4d065fe305..7779b0a4d81 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md @@ -8,7 +8,7 @@ doc_type: 'reference' ## map {#map} -キーと値のペアから [Map(key, value)](../data-types/map.md) 型の値を生成します。 +キーと値のペアから、[Map(key, value)](../data-types/map.md) 型の値を作成します。 **構文** @@ -21,9 +21,9 @@ map(key1, value1[, key2, value2, ...]) * `key_n` — マップエントリのキー。[Map](../data-types/map.md) のキー型としてサポートされる任意の型。 * `value_n` — マップエントリの値。[Map](../data-types/map.md) の値型としてサポートされる任意の型。 -**戻り値** +**返り値** -* `key:value` のペアを含むマップ。[Map(key, value)](../data-types/map.md)。 +* `key:value` ペアを含むマップ。[Map(key, value)](../data-types/map.md)。 **例** @@ -33,7 +33,7 @@ map(key1, value1[, key2, value2, ...]) SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); ``` -結果: +結果: ```text ┌─map('key1', number, 'key2', multiply(number, 2))─┐ @@ -45,10 +45,10 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); ## mapFromArrays {#mapfromarrays} -キーの配列またはマップと、値の配列またはマップから map を作成します。 +キーの配列またはマップと値の配列またはマップから map を作成します。 この関数は、構文 `CAST([...], 'Map(key_type, value_type)')` の便利な代替手段です。 -たとえば、次のように書く代わりに +たとえば、次のように記述する代わりに * `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')` や * `CAST([('aa',4), ('bb',5)], 'Map(String, UInt32)')` @@ -61,16 +61,16 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); mapFromArrays(keys, values) ``` -エイリアス: `MAP_FROM_ARRAYS(keys, values)` +Alias: `MAP_FROM_ARRAYS(keys, values)` **引数** -* `keys` — マップを作成するためのキーの配列またはマップ。[Array](../data-types/array.md) または [Map](../data-types/map.md)。`keys` が配列の場合、その型として `Array(Nullable(T))` または `Array(LowCardinality(Nullable(T)))` を許容しますが、NULL 値を含んではいけません。 -* `values` - マップを作成するための値の配列またはマップ。[Array](../data-types/array.md) または [Map](../data-types/map.md)。 +* `keys` — マップを作成するためのキーの配列またはマップ。[Array](../data-types/array.md) または [Map](../data-types/map.md) 型です。`keys` が配列の場合、その型として `Array(Nullable(T))` または `Array(LowCardinality(Nullable(T)))` を、NULL 値を含まない限り使用できます。 +* `values` - マップを作成するための値の配列またはマップ。[Array](../data-types/array.md) または [Map](../data-types/map.md) 型です。 -**返される値** +**戻り値** -* キー配列および値の配列/マップから構成されるキーと値を持つマップ。 +* キー配列および値配列/マップから構築されたマップ。 **例** @@ -80,7 +80,7 @@ mapFromArrays(keys, values) SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) ``` -結果: +結果: ```response ┌─mapFromArrays(['a', 'b', 'c'], [1, 2, 3])─┐ @@ -88,7 +88,7 @@ SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) └───────────────────────────────────────────┘ ``` -`mapFromArrays` は、[Map](../data-types/map.md) 型の引数も受け付けます。これらの引数は、実行時にタプルの配列にキャストされます。 +`mapFromArrays` は、[Map](../data-types/map.md) 型の引数も受け付けます。これらは、実行時にタプルの配列にキャストされます。 ```sql SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) @@ -117,9 +117,9 @@ SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3]) ## extractKeyValuePairs {#extractkeyvaluepairs} キーと値のペアからなる文字列を [Map(String, String)](../data-types/map.md) に変換します。 -解析はノイズ(例: ログファイル)を含んでいても許容されます。 -入力文字列内のキーと値のペアは、キー、キーと値の区切り文字、それに続く値で構成されます。 -キーと値のペア同士はペア区切り文字で区切られます。 +パース処理は(ログファイルなどの)ノイズに対して寛容です。 +入力文字列中のキーと値のペアは、キー、キーと値の区切り文字、それに続く値から構成されます。 +キーと値のペア同士は、ペア区切り文字で区切られます。 キーと値は引用符で囲むことができます。 **構文** @@ -128,7 +128,7 @@ SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3]) extractKeyValuePairs(data[, key_value_delimiter[, pair_delimiter[, quoting_character[, unexpected_quoting_character_strategy]]]) ``` -別名: +Alias: * `str_to_map` * `mapFromString` @@ -138,12 +138,12 @@ extractKeyValuePairs(data[, key_value_delimiter[, pair_delimiter[, quoting_chara * `data` - キーと値のペアを抽出する対象の文字列。[String](../data-types/string.md) または [FixedString](../data-types/fixedstring.md)。 * `key_value_delimiter` - キーと値を区切る 1 文字の区切り文字。デフォルトは `:`。[String](../data-types/string.md) または [FixedString](../data-types/fixedstring.md)。 * `pair_delimiters` - ペア同士を区切る文字の集合。デフォルトは ` `、`,`、`;`。[String](../data-types/string.md) または [FixedString](../data-types/fixedstring.md)。 -* `quoting_character` - クオート文字として使用される 1 文字。デフォルトは `"`. [String](../data-types/string.md) または [FixedString](../data-types/fixedstring.md)。 -* `unexpected_quoting_character_strategy` - `read_key` および `read_value` フェーズ中に想定外の位置で現れたクオート文字を処理する戦略。取りうる値: `invalid`、`accept`、`promote`。`invalid` はキー/値を破棄し、`WAITING_KEY` 状態に戻る。`accept` は通常の文字として扱う。`promote` は `READ_QUOTED_{KEY/VALUE}` 状態へ遷移し、次の文字から開始する。 +* `quoting_character` - クオート文字として使われる 1 文字。デフォルトは `"`. [String](../data-types/string.md) または [FixedString](../data-types/fixedstring.md)。 +* `unexpected_quoting_character_strategy` - `read_key` と `read_value` フェーズで、予期しない位置に現れたクオート文字をどのように扱うかの戦略。指定可能な値: `invalid`, `accept`, `promote`。`invalid` はキー/値を破棄し、`WAITING_KEY` 状態に戻る。`accept` は通常の文字として扱う。`promote` は `READ_QUOTED_{KEY/VALUE}` 状態に遷移し、次の文字から読み取りを再開する。 -**返される値** +**返り値** -* キーと値のペアからなるマップ。型: [Map(String, String)](../data-types/map.md) +* キーと値のペアの集合。型: [Map(String, String)](../data-types/map.md) **使用例** @@ -161,7 +161,7 @@ SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') A └─────────────────────────────────────────────────────────────────────────┘ ``` -クォート文字としてシングルクォート `'` を使用する場合: +引用文字としてシングルクォート(`'`)を使用する場合: ```sql SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') AS kv @@ -175,7 +175,7 @@ SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:bra └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -unexpected_quoting_character_strategy の例: +unexpected_quoting_character_strategy の設定例: unexpected_quoting_character_strategy=invalid @@ -243,7 +243,7 @@ SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') AS kv; └──────────────┘ ``` -エスケープシーケンス非対応環境でのエスケープシーケンス: +エスケープシーケンス(エスケープシーケンス非対応環境向け): ```sql SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv @@ -257,7 +257,7 @@ SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv └────────────────────────┘ ``` -`toString` でシリアライズされた Map<string, string> のキーと値のペアを復元するには: +`toString` でシリアライズされた map の文字列キーと値のペアを復元するには、次のようにします。 ```sql SELECT @@ -267,10 +267,10 @@ SELECT FORMAT Vertical; ``` -結果: +結果: ```response -Row 1: +行 1: ────── m: {'John':'33','Paula':'31'} map_serialized: {'John':'33','Paula':'31'} @@ -279,23 +279,23 @@ map_restored: {'John':'33','Paula':'31'} ## extractKeyValuePairsWithEscaping {#extractkeyvaluepairswithescaping} -`extractKeyValuePairs` と同様ですが、エスケープシーケンスをサポートします。 +`extractKeyValuePairs` と同様ですが、エスケープシーケンスに対応しています。 -サポートされているエスケープシーケンス: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v`, `\0`。 -標準ではないエスケープシーケンスは、次のいずれかの場合を除き(バックスラッシュを含めて)そのまま返されます: +サポートされるエスケープシーケンス: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v`, `\0`。 +標準的でないエスケープシーケンスは、次のいずれかの場合を除き、そのまま(バックスラッシュを含めて)返されます: `\\`, `'`, `"`, `backtick`, `/`, `=` または ASCII 制御文字 (c <= 31)。 -この関数は、事前エスケープおよび事後エスケープが適さないユースケースに有用です。例えば、次の +この関数は、事前エスケープや事後エスケープでは対処できないユースケースに適しています。たとえば、次の 入力文字列を考えます: `a: "aaaa\"bbb"`。期待される出力は: `a: aaaa\"bbbb` です。 -* 事前エスケープ: 事前エスケープすると、出力は `a: "aaaa"bbb"` となり、その後 `extractKeyValuePairs` は `a: aaaa` を出力します -* 事後エスケープ: `extractKeyValuePairs` は `a: aaaa\` を出力し、事後エスケープではそれがそのまま維持されます。 +* 事前エスケープ: 事前エスケープすると出力は `a: "aaaa"bbb"` となり、その後 `extractKeyValuePairs` は `a: aaaa` を出力します +* 事後エスケープ: `extractKeyValuePairs` は `a: aaaa\` を出力し、事後エスケープではそれをそのまま保持します。 -キー内の先頭にあるエスケープシーケンスはスキップされ、値に対しては不正と見なされます。 +キー内の先頭のエスケープシーケンスはスキップされ、値に対しては無効とみなされます。 **例** -エスケープシーケンスサポートを有効にした場合のエスケープシーケンスの例: +エスケープシーケンスのサポートを有効にした場合の動作例: ```sql SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv @@ -311,7 +311,7 @@ SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv ## mapAdd {#mapadd} -すべてのキーを収集し、対応する値を合計します。 +すべてのキーを集めて、それぞれに対応する値を合計します。 **構文** @@ -321,15 +321,15 @@ mapAdd(arg1, arg2 [, ...]) **引数** -引数は 2 つの[配列](/sql-reference/data-types/array)からなる [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) で、最初の配列の要素がキーを表し、2 番目の配列がそれぞれのキーに対応する値を保持します。すべてのキー配列は同じ型でなければならず、すべての値配列は 1 つの型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges)、または [Float64](/sql-reference/data-types/float))へ昇格可能な要素を含んでいる必要があります。共通の昇格後の型が、結果の配列の型として使用されます。 +引数は、2 つの[配列](/sql-reference/data-types/array)から構成される[map](../data-types/map.md)または[tuple](/sql-reference/data-types/tuple)であり、最初の配列の要素がキーを表し、2 番目の配列に各キーに対応する値が含まれます。すべてのキー配列は同じ型でなければならず、すべての値配列は 1 つの型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges)、または [Float64](/sql-reference/data-types/float))へと昇格可能な要素を含んでいる必要があります。共通の昇格後の型が、結果配列の型として使用されます。 -**返される値** +**戻り値** -* 引数に応じて 1 つの [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) を返し、1 番目の配列にはソート済みのキーが含まれ、2 番目の配列には値が含まれます。 +* 引数に応じて、最初の配列にソート済みのキーを含み、2 番目の配列に値を含む [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) を 1 つ返します。 **例** -`Map` 型を用いたクエリ: +`Map` 型を使ったクエリ: ```sql SELECT mapAdd(map(1,1), map(1,1)); @@ -359,7 +359,7 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) AS res, toTy ## mapSubtract {#mapsubtract} -すべてのキーを収集し、対応する値同士を減算します。 +すべてのキーを集約し、対応する値の差を取ります。 **構文** @@ -369,11 +369,11 @@ mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) **引数** -引数は 2 つの[配列](/sql-reference/data-types/array)からなる [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) であり、1 つ目の配列の要素がキーを表し、2 つ目の配列に各キーに対応する値が格納されます。すべてのキー配列は同じ型でなければならず、すべての値配列の要素は、型昇格によって 1 つの型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges)、または [Float64](/sql-reference/data-types/float))に統一できる必要があります。昇格後の共通型が、結果の配列の型として使用されます。 +引数は 2 つの [配列](/sql-reference/data-types/array)から構成される [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) であり、1 つ目の配列の要素がキーを表し、2 つ目の配列が各キーに対応する値を含みます。すべてのキー配列は同じ型である必要があり、すべての値配列は 1 つの共通の型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges)、または [Float64](/sql-reference/data-types/float))へ昇格される要素を含んでいる必要があります。この共通の昇格後の型が、結果配列の型として使用されます。 **戻り値** -* 引数に応じて、1 つの [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) を返します。このとき、1 つ目の配列にはソートされたキーが、2 つ目の配列には値が格納されます。 +* 引数に応じて 1 つの [map](../data-types/map.md) または [tuple](/sql-reference/data-types/tuple) を返し、1 つ目の配列にはソートされたキーが、2 つ目の配列には値が含まれます。 **例** @@ -383,7 +383,7 @@ mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) SELECT mapSubtract(map(1,1), map(1,1)); ``` -結果: +結果: ```text ┌─mapSubtract(map(1, 1), map(1, 1))─┐ @@ -397,7 +397,7 @@ SELECT mapSubtract(map(1,1), map(1,1)); SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) AS res, toTypeName(res) AS type; ``` -結果: +結果: ```text ┌─res────────────┬─type──────────────────────────────┐ @@ -407,11 +407,11 @@ SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt3 ## mapPopulateSeries {#mappopulateseries} -整数キーを持つマップに対して、欠けているキーと値の組を補完します。 -既存の最大キーより大きいキーまで拡張できるように、上限となる最大キーを指定できます。 -より正確には、この関数は、キーが最小のキーから最大のキー(または指定されている場合は `max` 引数)まで 1 刻みの数列を成し、それに対応する値を持つマップを返します。 +整数キーを持つマップで、欠損しているキーと値のペアを補完します。 +最大値を超えてキーを拡張できるように、最大キーを指定することができます。 +より正確には、この関数は、キーが最小キーから最大キー(指定されていれば引数 `max`)までステップ幅 1 の数列を成し、それぞれに対応する値を持つマップを返します。 あるキーに対して値が指定されていない場合、そのキーの値としてデフォルト値が使用されます。 -キーが重複している場合、そのキーには最初の値(出現順)が関連付けられます。 +キーが重複している場合、そのキーには(出現順に)最初の値のみが対応付けられます。 **構文** @@ -420,29 +420,29 @@ mapPopulateSeries(map[, max]) mapPopulateSeries(keys, values[, max]) ``` -配列引数の場合、各行において `keys` と `values` の要素数は同じでなければなりません。 +配列引数の場合、各行ごとに `keys` と `values` の要素数は同じでなければなりません。 -**引数** +**Arguments** -引数は、[Map](../data-types/map.md) か 2 つの [Array](/sql-reference/data-types/array) であり、1 つ目と 2 つ目の配列には、それぞれキーおよび各キーに対応する値が格納されます。 +引数は [Maps](../data-types/map.md) か、または 2 つの [Arrays](/sql-reference/data-types/array) で、1 つ目と 2 つ目の配列にはそれぞれキーと、その各キーに対応する値が含まれます。 マップされた配列: -* `map` — 整数キーを持つ Map。[Map](../data-types/map.md)。 +* `map` — 整数キーを持つ Map。 [Map](../data-types/map.md)。 または -* `keys` — キーの配列。[Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 -* `values` — 値の配列。[Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 -* `max` — キーの最大値。省略可能。[Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges)。 +* `keys` — キーの配列。 [Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 +* `values` — 値の配列。 [Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 +* `max` — キーの最大値。省略可能。 [Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges)。 -**戻り値** +**Returned value** -* 引数に応じて、[Map](../data-types/map.md) か、ソートされた順序のキー配列とそれに対応する値配列からなる 2 つの [Array](/sql-reference/data-types/array) の [Tuple](/sql-reference/data-types/tuple)。 +* 引数に応じて、[Map](../data-types/map.md) または 2 つの [Arrays](/sql-reference/data-types/array) からなる [Tuple](/sql-reference/data-types/tuple) が返されます。前者はソート済みのキー、後者はそれぞれのキーに対応する値です。 -**例** +**Example** -`Map` 型を用いたクエリ: +`Map` 型を使ったクエリ: ```sql SELECT mapPopulateSeries(map(1, 10, 5, 20), 6); @@ -456,13 +456,13 @@ SELECT mapPopulateSeries(map(1, 10, 5, 20), 6); └─────────────────────────────────────────┘ ``` -マッピングされた配列を使ったクエリ: +マッピングされた配列に対するクエリ: ```sql SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type; ``` -結果: +結果: ```text ┌─res──────────────────────────┬─type──────────────────────────────┐ @@ -474,8 +474,8 @@ SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type 指定された map のキーを返します。 -この関数は、設定 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) を有効にすることで最適化できます。 -この設定を有効にすると、関数は map 全体ではなく、[keys](/sql-reference/data-types/map#reading-subcolumns-of-map) サブカラムのみを読み取ります。 +この関数は、setting [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) を有効にすることで最適化できます。 +この setting を有効にすると、この関数は map 全体ではなく [keys](/sql-reference/data-types/map#reading-subcolumns-of-map) サブカラムだけを読み取ります。 クエリ `SELECT mapKeys(m) FROM table` は `SELECT m.keys FROM table` に変換されます。 **構文** @@ -488,9 +488,9 @@ mapKeys(map) * `map` — マップ。[Map](../data-types/map.md)。 -**戻り値** +**返される値** -* `map` 内のすべてのキーを含む配列。[Array](../data-types/array.md)。 +* `map` に含まれるすべてのキーを含む配列。[Array](../data-types/array.md)。 **例** @@ -515,7 +515,7 @@ SELECT mapKeys(a) FROM tab; ## mapContains {#mapcontains} -指定された map に、指定されたキーが含まれているかどうかを返します。 +指定したマップに指定したキーが含まれているかどうかを返します。 **構文** @@ -530,9 +530,9 @@ mapContains(map, key) * `map` — マップ。[Map](../data-types/map.md)。 * `key` — キー。型は `map` のキー型と一致している必要があります。 -**戻り値** +**返り値** -* `map` が `key` を含む場合は `1`、含まない場合は `0`。[UInt8](../data-types/int-uint.md)。 +* `map` に `key` が含まれていれば `1`、含まれていなければ `0`。[UInt8](../data-types/int-uint.md)。 **例** @@ -566,16 +566,16 @@ mapContainsKeyLike(map, pattern) **引数** -* `map` — Map。 [Map](../data-types/map.md)。 -* `pattern` - 照合に使用する文字列パターン。 +* `map` — Map 型。[Map](../data-types/map.md)。 +* `pattern` - マッチさせる文字列パターン。 **戻り値** -* `map` に `pattern` で指定したパターンにマッチする `key` が含まれている場合は `1`、含まれていない場合は `0`。 +* `map` が指定されたパターンにマッチする `key` を含む場合は `1`、含まない場合は `0`。 **例** -クエリ: +クエリ: ```sql CREATE TABLE tab (a Map(String, String)) ENGINE = Memory; @@ -596,7 +596,7 @@ SELECT mapContainsKeyLike(a, 'a%') FROM tab; ## mapExtractKeyLike {#mapextractkeylike} -文字列キーを持つ `map` と LIKE パターンが与えられると、この関数はキーがそのパターンにマッチする要素のみを含む `map` を返します。 +文字列キーを持つ `Map` と LIKE パターンが与えられると、この関数はキーがそのパターンに一致する要素のみを含む `Map` を返します。 **構文** @@ -606,16 +606,16 @@ mapExtractKeyLike(map, pattern) **引数** -* `map` — マップ。[Map](../data-types/map.md)。 +* `map` — Map 型。[Map](../data-types/map.md)。 * `pattern` - マッチさせる文字列パターン。 -**返される値** +**戻り値** -* 指定したパターンに一致するキーを持つ要素を含むマップ。パターンに一致する要素がない場合は、空のマップが返されます。 +* 指定したパターンに一致するキーを持つ要素のみを含む Map。一致する要素がない場合は、空の Map が返されます。 **例** -クエリ: +クエリ: ```sql CREATE TABLE tab (a Map(String, String)) ENGINE = Memory; @@ -640,7 +640,7 @@ SELECT mapExtractKeyLike(a, 'a%') FROM tab; この関数は、設定 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) を有効にすることで最適化できます。 この設定を有効にすると、関数は map 全体ではなく、[values](/sql-reference/data-types/map#reading-subcolumns-of-map) サブカラムのみを読み取ります。 -クエリ `SELECT mapValues(m) FROM table` は `SELECT m.values FROM table` に書き換えられます。 +クエリ `SELECT mapValues(m) FROM table` は `SELECT m.values FROM table` に変換されます。 **構文** @@ -650,15 +650,15 @@ mapValues(map) **引数** -* `map` — マップ。 [Map](../data-types/map.md)。 +* `map` — Map 型。[Map](../data-types/map.md)。 **戻り値** -* `map` に含まれるすべての値を含む配列。 [Array](../data-types/array.md)。 +* `map` に含まれるすべての値を格納した配列。[Array](../data-types/array.md)。 **例** -クエリ: +クエリ: ```sql CREATE TABLE tab (a Map(String, String)) ENGINE = Memory; @@ -679,7 +679,7 @@ SELECT mapValues(a) FROM tab; ## mapContainsValue {#mapcontainsvalue} -指定された map に指定されたキーが含まれているかどうかを返します。 +指定した map に指定したキーが含まれているかどうかを返します。 **構文** @@ -687,7 +687,7 @@ SELECT mapValues(a) FROM tab; mapContainsValue(map, value) ``` -Alias: `mapContainsValue(map, value)` +別名: `mapContainsValue(map, value)` **引数** @@ -700,7 +700,7 @@ Alias: `mapContainsValue(map, value)` **例** -クエリ: +クエリ: ```sql CREATE TABLE tab (a Map(String, String)) ENGINE = Memory; @@ -730,12 +730,12 @@ mapContainsValueLike(map, pattern) **引数** -* `map` — マップ。[Map](../data-types/map.md)。 +* `map` — Map。 [Map](../data-types/map.md)。 * `pattern` - 照合する文字列パターン。 -**返される値** +**返り値** -* `map` に `pattern` で指定したパターンにマッチする `value` が含まれている場合は `1`、含まれていない場合は `0`。 +* `map` に、指定したパターンにマッチする `value` が含まれていれば `1`、含まれていなければ `0`。 **例** @@ -760,7 +760,7 @@ SELECT mapContainsValueLike(a, 'a%') FROM tab; ## mapExtractValueLike {#mapextractvaluelike} -文字列値を持つマップと `LIKE` パターンを指定すると、この関数は値がパターンに一致する要素のみを含むマップを返します。 +文字列値を持つ Map と LIKE パターンを指定すると、この関数は値がパターンにマッチする要素のみを含む Map を返します。 **構文** @@ -770,12 +770,12 @@ mapExtractValueLike(map, pattern) **引数** -* `map` — マップ。[Map](../data-types/map.md)。 -* `pattern` - マッチさせる文字列パターン。 +* `map` — Map。[Map](../data-types/map.md)。 +* `pattern` - 照合する文字列パターン。 -**戻り値** +**返り値** -* 値が指定したパターンにマッチする要素だけで構成されるマップ。パターンにマッチする要素がない場合は、空のマップが返されます。 +* 値が指定したパターンに一致する要素を含む map。パターンに一致する要素がない場合は、空の map が返されます。 **例** @@ -789,7 +789,7 @@ INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); SELECT mapExtractValueLike(a, 'a%') FROM tab; ``` -結果: +結果: ```text ┌─mapExtractValueLike(a, 'a%')─┐ @@ -800,7 +800,7 @@ SELECT mapExtractValueLike(a, 'a%') FROM tab; ## mapApply {#mapapply} -マップの各要素に関数を適用します。 +map の各要素に関数を適用します。 **構文** @@ -810,12 +810,12 @@ mapApply(func, map) **引数** -* `func` — [Lambda 関数](/sql-reference/functions/overview#higher-order-functions)。 -* `map` — [Map 型](../data-types/map.md)。 +* `func` — [ラムダ関数](/sql-reference/functions/overview#higher-order-functions)。 +* `map` — [Map](../data-types/map.md)。 -**返される値** +**返り値** -* 各要素ごとに `func(map1[i], ..., mapN[i])` を適用して、元のマップから生成されたマップを返します。 +* 各要素に対して `func(map1[i], ..., mapN[i])` を適用することで、元のマップから得られるマップを返します。 **例** @@ -830,7 +830,7 @@ FROM ) ``` -結果: +結果: ```text ┌─r─────────────────────┐ @@ -842,7 +842,7 @@ FROM ## mapFilter {#mapfilter} -map の各要素に関数を適用してフィルタ処理を行います。 +マップの各要素に関数を適用してフィルタリングします。 **構文** @@ -857,7 +857,7 @@ mapFilter(func, map) **戻り値** -* `func(map1[i], ..., mapN[i])` が 0 以外の値を返す、`map` 内の要素のみを含む map を返します。 +* `func(map1[i], ..., mapN[i])` が 0 以外の値を返す要素のみを含む `map` を返します。 **例** @@ -872,7 +872,7 @@ FROM ) ``` -結果: +結果: ```text ┌─r───────────────────┐ @@ -897,17 +897,17 @@ mapUpdate(map1, map2) **戻り値** -* `map2` 内の対応するキーの値で更新された `map1` を返します。 +* `map2` の対応するキーの値で値を更新した `map1` を返します。 **例** -クエリ: +クエリ: ```sql SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map; ``` -結果: +結果: ```text ┌─map────────────────────────────┐ @@ -917,8 +917,8 @@ SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map; ## mapConcat {#mapconcat} -キーが等しいことに基づいて複数の Map を連結します。 -同じキーを持つ要素が 2 つ以上の入力 Map に存在する場合、すべての要素が結果の Map に追加されますが、演算子 `[]` でアクセスできるのは最初の要素だけです。 +キーの一致に基づいて複数の map を連結します。 +同じキーを持つ要素が複数の入力 map に存在する場合、すべての要素が結果の map に追加されますが、`[]` 演算子でアクセスできるのは最初の要素のみです。 **構文** @@ -928,11 +928,11 @@ mapConcat(maps) **引数** -* `maps` – 任意の数の [Maps](../data-types/map.md)。 +* `maps` – 任意数の[Map](../data-types/map.md)。 -**返り値** +**返される値** -* 引数として渡されたマップを連結したマップを返します。 +* 引数として渡された Map を連結した結果の Map を返します。 **例** @@ -942,7 +942,7 @@ mapConcat(maps) SELECT mapConcat(map('key1', 1, 'key3', 3), map('key2', 2)) AS map; ``` -結果: +結果: ```text ┌─map──────────────────────────┐ @@ -966,11 +966,11 @@ SELECT mapConcat(map('key1', 1, 'key2', 2), map('key1', 3)) AS map, map['key1']; ## mapExists([func,], map) {#mapexistsfunc-map} -`map` 内に、`func(key, value)` が 0 以外の値を返すキーと値のペアが 1 つ以上存在する場合は 1 を、それ以外の場合は 0 を返します。 +`map` 内の少なくとも1つのキーと値のペアについて、`func(key, value)` が0以外を返す場合は1を返します。そうでない場合は0を返します。 :::note `mapExists` は[高階関数](/sql-reference/functions/overview#higher-order-functions)です。 -最初の引数としてラムダ関数を渡すことができます。 +第1引数としてラムダ関数を渡すことができます。 ::: **例** @@ -991,16 +991,16 @@ SELECT mapExists((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapAll([func,] map) {#mapallfunc-map} -`map` 内のすべてのキー・値ペアについて、`func(key, value)` が 0 以外の値を返した場合は 1 を返します。そうでない場合は 0 を返します。 +`map` 内のすべてのキーと値のペアに対して `func(key, value)` が 0 以外の値を返す場合は 1 を返し、そうでない場合は 0 を返します。 :::note -`mapAll` は[高階関数](/sql-reference/functions/overview#higher-order-functions)です。 +`mapAll` は [高階関数](/sql-reference/functions/overview#higher-order-functions) です。 第 1 引数としてラムダ関数を渡すことができます。 ::: **例** -クエリ: +クエリ: ```sql SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res @@ -1017,7 +1017,7 @@ SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapSort([func,], map) {#mapsortfunc-map} map の要素を昇順に並べ替えます。 -`func` 関数が指定されている場合は、map のキーと値に `func` 関数を適用した結果に基づいて並び順が決まります。 +`func` 関数が指定されている場合、map のキーと値に `func` 関数を適用した結果によって並べ替え順が決定されます。 **例** @@ -1045,8 +1045,8 @@ SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; ## mapPartialSort {#mappartialsort} -`limit` 引数を追加で指定することで、マップの要素を昇順に並べ替える際に、ソートする要素数を制限できます。 -`func` 関数が指定されている場合は、マップのキーと値に対して `func` 関数を適用した結果に基づいてソート順が決まります。 +map の要素を昇順にソートします。`limit` 引数によって部分ソートを指定できます。 +`func` 関数が指定された場合、map のキーおよび値に `func` 関数を適用した結果に基づいてソート順が決定されます。 **構文** @@ -1056,13 +1056,13 @@ mapPartialSort([func,] limit, map) **引数** -* `func` – マップのキーと値に適用する省略可能な関数。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 +* `func` – map のキーと値に適用する任意の関数。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 * `limit` – 範囲 [1..limit] の要素がソートされます。[(U)Int](../data-types/int-uint.md)。 -* `map` – ソート対象のマップ。[Map](../data-types/map.md)。 +* `map` – ソートする map。[Map](../data-types/map.md)。 **戻り値** -* 部分的にソートされたマップ。[Map](../data-types/map.md)。 +* 部分的にソートされた map。[Map](../data-types/map.md)。 **例** @@ -1078,8 +1078,8 @@ SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ## mapReverseSort([func,], map) {#mapreversesortfunc-map} -map の要素を降順でソートします。 -`func` 関数が指定されている場合、map のキーと値に `func` 関数を適用した結果によって、ソート順が決定されます。 +マップの要素を降順にソートします。 +`func` 関数が指定されている場合、マップのキーおよび値に `func` 関数を適用した結果に基づいてソートされます。 **例** @@ -1103,12 +1103,12 @@ SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; └──────────────────────────────┘ ``` -詳細については、関数 [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort) を参照してください。 +詳細は、関数 [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort) を参照してください。 ## mapPartialReverseSort {#mappartialreversesort} -`limit` 引数を指定すると、マップの要素を降順で部分的にソートします。 -`func` 関数が指定されている場合、マップのキーおよび値に対して `func` を適用した結果に基づいてソート順が決定されます。 +追加の `limit` 引数により、マップの要素を降順に部分ソートします。 +`func` 関数が指定されている場合は、マップのキーおよび値に `func` 関数を適用した結果に基づいてソート順が決定されます。 **構文** @@ -1118,13 +1118,13 @@ mapPartialReverseSort([func,] limit, map) **引数** -* `func` – マップのキーと値に適用するオプションの関数。[ラムダ関数](/sql-reference/functions/overview#higher-order-functions)。 -* `limit` – 範囲 [1..limit] の要素がソートされます。[(U)Int](../data-types/int-uint.md)。 -* `map` – ソートするマップ。[Map](../data-types/map.md)。 +* `func` – map のキーと値に適用する任意の関数。[Lambda 関数](/sql-reference/functions/overview#higher-order-functions)。 +* `limit` – 範囲 [1..limit] 内の要素をソートします。[(U)Int](../data-types/int-uint.md)。 +* `map` – ソート対象の map。[Map](../data-types/map.md)。 **戻り値** -* 部分的にソートされたマップ。[Map](../data-types/map.md)。 +* 部分的にソートされた map。[Map](../data-types/map.md)。 **例** @@ -1139,11 +1139,1073 @@ SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ``` {/* - 以下のタグ内の内容は、ドキュメントフレームワークのビルド時に + 以下のタグの内側の内容は、ドキュメントフレームワークのビルド時に system.functions から自動生成されたドキュメントで置き換えられます。タグを変更または削除しないでください。 詳細は https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md を参照してください。 */ } {/*AUTOGENERATED_START*/ } +## extractKeyValuePairs {#extractKeyValuePairs} + +導入バージョン: v + +任意の文字列からキーと値のペアを抽出します。文字列は 100% キー・バリュー形式で構造化されている必要はありません。 + +ノイズ(例: ログファイル)を含んでいても問題ありません。解釈対象となるキー・バリュー形式は、関数の引数で指定する必要があります。 + +キーと値のペアは、キーに続いて `key_value_delimiter` と値が並ぶ形で構成されます。引用符付きのキーおよび値にも対応しています。キーと値のペア同士は、ペア区切り文字で区切られている必要があります。 + +**構文** + +```sql + extractKeyValuePairs(data, [key_value_delimiter], [pair_delimiter], [quoting_character]) +``` + +**引数** + +* `data` - キーと値のペアを抽出する対象の文字列。[String](../../sql-reference/data-types/string.md) または [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `key_value_delimiter` - キーと値の間の区切り文字として使用する文字。デフォルトは `:`。型は [String](../../sql-reference/data-types/string.md) または [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `pair_delimiters` - ペア間の区切り文字として使用する文字の集合。デフォルトは `\space`、`,`、`;`。型は [String](../../sql-reference/data-types/string.md) または [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `quoting_character` - クオート文字として使用する文字。デフォルトは `"`. 型は [String](../../sql-reference/data-types/string.md) または [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `unexpected_quoting_character_strategy` - `read_key` および `read_value` フェーズ中に想定外の位置に現れたクオート文字を処理するための戦略。指定可能な値: `invalid`、`accept`、`promote`。`invalid` はキー/値を破棄して `WAITING_KEY` 状態に戻ります。`accept` は通常の文字として扱います。`promote` は `READ_QUOTED_{KEY/VALUE}` 状態へ遷移し、次の文字から処理を開始します。デフォルト値は `INVALID` です。 + +**戻り値** + +* 抽出されたキーと値のペアを Map(String, String) 型のマップとして返します。 + +**例** + +クエリ: + +**単純な例** + +```sql + arthur :) select extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + Query id: f9e0ca6f-3178-4ee2-aa2c-a5517abb9cee + + ┌─kv──────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │ + └─────────────────────────────────────────────────────────────────────────┘ +``` + +**引用文字としての単一引用符** + +```sql + arthur :) select extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + クエリ ID: 0e22bf6b-9844-414a-99dc-32bf647abd5e + + ┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +unexpected_quoting_character_strategy の例: + +unexpected_quoting_character_strategy=invalid + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +unexpected_quoting_character_strategy=accept + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'name"abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv─────────────────┐ + │ {'name"abc"':'5'} │ + └────────────────────┘ +``` + +unexpected_quoting_character_strategy=promote + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv───────────┐ + │ {'abc':'5'} │ + └──────────────┘ +``` + +**エスケープシーケンス非対応環境でのエスケープ** + +```sql + arthur :) select extractKeyValuePairs('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv + + Query id: e9fd26ee-b41f-4a11-b17f-25af6fd5d356 + + ┌─kv────────────────────┐ + │ {'age':'a\\x0A\\n\\0'} │ + └───────────────────────┘ +``` + +**構文** + +```sql +``` + +**別名**: `str_to_map`, `mapFromString` + +**引数** + +* なし。 + +**戻り値** + +**例** + +## extractKeyValuePairsWithEscaping {#extractKeyValuePairsWithEscaping} + +導入バージョン: v + +`extractKeyValuePairs` と同じですが、エスケープシーケンスに対応しています。 + +サポートされるエスケープシーケンス: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v`, `\0`。 +標準外のエスケープシーケンスは、次のいずれかに該当しない限り、そのまま(バックスラッシュを含めて)返されます: +`\\`, `'`, `"`, `backtick`, `/`, `=` または ASCII 制御文字 (`c <= 31`)。 + +この関数は、事前エスケープおよび事後エスケープが適さないユースケースに適しています。例えば、次の入力文字列を考えます: +`a: "aaaa\"bbb"`。期待される出力は `a: aaaa\"bbbb` です。 + +* 事前エスケープ: 事前エスケープすると、出力は `a: "aaaa"bbb"` となり、その後 `extractKeyValuePairs` は `a: aaaa` を出力します。 + * 事後エスケープ: `extractKeyValuePairs` は `a: aaaa\` を出力し、事後エスケープを行ってもそのまま保持されます。 + +先頭のエスケープシーケンスはキーではスキップされ、値に対しては不正とみなされます。 + +**エスケープシーケンス対応が有効な場合のエスケープシーケンス** + +```sql + arthur :) select extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv + + Query id: 44c114f0-5658-4c75-ab87-4574de3a1645 + + ┌─kv───────────────┐ + │ {'age':'a\n\n\0'} │ + └──────────────────┘ +``` + +**構文** + +```sql +``` + +**引数** + +* なし。 + +**戻り値** + +**例** + +## map {#map} + +導入バージョン: v21.1 + +キーと値のペアから、`Map(key, value)` 型の値を作成します。 + +**構文** + +```sql +map(key1, value1[, key2, value2, ...]) +``` + +**引数** + +* `key_n` — マップエントリのキー。[`Any`](/sql-reference/data-types) +* `value_n` — マップエントリの値。[`Any`](/sql-reference/data-types) + +**返り値** + +キーと値のペアを含むマップを返します。[`Map(Any, Any)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT map('key1', number, 'key2', number * 2) FROM numbers(3) +``` + +```response title=Response +{'key1':0,'key2':0} +{'key1':1,'key2':2} +{'key1':2,'key2':4} +``` + +## mapAdd {#mapAdd} + +導入バージョン: v20.7 + +すべてのキーを集約し、それぞれのキーに対応する値を合計します。 + +**構文** + +```sql +mapAdd(arg1[, arg2, ...]) +``` + +**引数** + +* `arg1[, arg2, ...]` — 2 つの配列からなる Map またはタプルであり、1 つ目の配列の要素がキー、2 つ目の配列の要素が各キーに対応する値になります。[`Map(K, V)`](/sql-reference/data-types/map) または [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**戻り値** + +Map またはタプルを返します。1 つ目の配列にはソート済みのキーが含まれ、2 つ目の配列には対応する値が含まれます。[`Map(K, V)`](/sql-reference/data-types/map) または [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**例** + +**Map 型での使用例** + +```sql title=Query +SELECT mapAdd(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:2} +``` + +**タプルを使用する場合** + +```sql title=Query +SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) +``` + +```response title=Response +([1, 2], [2, 2]) +``` + +## mapAll {#mapAll} + +導入バージョン: v23.4 + +マップ内のすべてのキーと値のペアに対して、ある条件が成り立つかどうかを判定します。 +`mapAll` は高階関数です。 +第1引数としてラムダ関数を渡すことができます。 + +**構文** + +```sql +mapAll([func,] map) +``` + +**引数** + +* `func` — ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 検査対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**返り値** + +すべてのキーと値のペアが条件を満たす場合は `1`、それ以外の場合は `0` を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +SELECT mapAll((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +0 +``` + +## mapApply {#mapApply} + +導入バージョン: v22.3 + +関数を map の各要素に適用します。 + +**構文** + +```sql +mapApply(func, map) +``` + +**引数** + +* `func` — ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 関数を適用する対象の Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +元の Map の各要素に `func` を適用して得られる新しい Map を返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapApply((k, v) -> (k, v * 2), map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k1':2,'k2':4} +``` + +## mapConcat {#mapConcat} + +導入バージョン: v23.4 + +複数の `map` を、そのキーの等値性に基づいて連結します。 +同じキーを持つ要素が複数の入力 `map` に存在する場合、すべての要素が結果の `map` に追加されますが、演算子 `[]` で参照できるのは最初の要素のみです。 + +**構文** + +```sql +mapConcat(maps) +``` + +**引数** + +* `maps` — 任意個の Map。[`Map`](/sql-reference/data-types/map) + +**返り値** + +引数として渡された Map を連結した Map を返します。[`Map`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapConcat(map('k1', 'v1'), map('k2', 'v2')) +``` + +```response title=Response +{'k1':'v1','k2':'v2'} +``` + +## mapContainsKey {#mapContainsKey} + +導入バージョン: v21.2 + +マップにキーが含まれているかどうかを判定します。 + +**構文** + +```sql +mapContains(map, key) +``` + +**エイリアス**: `mapContains` + +**引数** + +* `map` — 検索対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `key` — 検索するキー。型はマップのキー型と一致している必要があります。[`Any`](/sql-reference/data-types) + +**戻り値** + +マップにキーが含まれていれば 1、含まれていなければ 0 を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +SELECT mapContainsKey(map('k1', 'v1', 'k2', 'v2'), 'k1') +``` + +```response title=Response +1 +``` + +## mapContainsKeyLike {#mapContainsKeyLike} + +導入バージョン: v23.4 + +マップに、`LIKE` で指定したパターンに一致するキーが含まれているかを判定します。 + +**構文** + +```sql +mapContainsKeyLike(map, pattern) +``` + +**引数** + +* `map` — 検索対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — キーと照合するパターン。[`const String`](/sql-reference/data-types/string) + +**戻り値** + +`map` に `pattern` に一致するキーが含まれていれば `1`、そうでなければ `0` を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsKeyLike(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└─────────────────────────────┘ +``` + +## mapContainsValue {#mapContainsValue} + +導入バージョン: v25.6 + +マップに指定した値が含まれているかどうかを判定します。 + +**構文** + +```sql +mapContainsValue(map, value) +``` + +**引数** + +* `map` — 検索対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `value` — 検索する値。型は `map` の値の型と一致している必要があります。[`Any`](/sql-reference/data-types) + +**戻り値** + +`map` に値が含まれていれば `1`、含まれていなければ `0` を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +SELECT mapContainsValue(map('k1', 'v1', 'k2', 'v2'), 'v1') +``` + +```response title=Response +1 +``` + +## mapContainsValueLike {#mapContainsValueLike} + +導入バージョン: v25.5 + +マップに、指定したパターンに対して `LIKE` マッチする値が含まれているかをチェックします。 + +**構文** + +```sql +mapContainsValueLike(map, pattern) +``` + +**引数** + +* `map` — 検索対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 値と照合するパターン。[`const String`](/sql-reference/data-types/string) + +**戻り値** + +`map` に `pattern` と一致する値が含まれている場合は `1`、それ以外は `0` を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsV⋯ke(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└──────────────────────────┘ +``` + +## mapExists {#mapExists} + +導入バージョン: v23.4 + +マップ内の少なくとも 1 つのキーと値のペアについて、条件が成り立つかどうかをテストします。 +`mapExists` は高階関数です。 +第 1 引数としてラムダ関数を渡すことができます。 + +**構文** + +```sql +mapExists([func,] map) +``` + +**引数** + +* `func` — 省略可能。ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — チェック対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**返される値** + +少なくとも 1 つのキーと値の組が条件を満たす場合は `1` を返し、それ以外の場合は `0` を返します。[`UInt8`](/sql-reference/data-types/int-uint) + +**例** + +**使用例** + +```sql title=Query +SELECT mapExists((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +1 +``` + +## mapExtractKeyLike {#mapExtractKeyLike} + +導入: v23.4 + +文字列キーを持つ map と `LIKE` パターンを引数に取り、この関数はキーがそのパターンにマッチする要素のみを含む map を返します。 + +**構文** + +```sql +mapExtractKeyLike(map, pattern) +``` + +**引数** + +* `map` — 抽出元となるマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — キーと照合するためのパターン。[`const String`](/sql-reference/data-types/string) + +**戻り値** + +キーが指定したパターンにマッチする要素のみを含むマップを返します。パターンに一致する要素がない場合は、空のマップを返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractKeyLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└────────────────────────────┘ +``` + +## mapExtractValueLike {#mapExtractValueLike} + +導入バージョン: v25.5 + +文字列値を持つマップと `LIKE` パターンを指定すると、この関数は値がそのパターンに一致する要素のみを含むマップを返します。 + +**構文** + +```sql +mapExtractValueLike(map, pattern) +``` + +**引数** + +* `map` — 抽出対象とするマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 値と照合するパターン。[`const String`](/sql-reference/data-types/string) + +**戻り値** + +指定したパターンにマッチする値を持つ要素だけを含むマップを返します。パターンにマッチする要素がない場合は、空のマップが返されます。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractValueLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└──────────────────────────────┘ +``` + +## mapFilter {#mapFilter} + +導入バージョン: v22.3 + +マップの各要素に関数を適用し、その結果に基づいてマップをフィルタリングします。 + +**構文** + +```sql +mapFilter(func, map) +``` + +**引数** + +* `func` — ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — フィルタ対象の Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +`func` が `0` 以外の値を返す要素だけを含む Map を返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapFilter((k, v) -> v > 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k2':2} +``` + +## mapFromArrays {#mapFromArrays} + +v23.3 で導入。 + +キーの配列(またはマップ)と値の配列(またはマップ)からマップを作成します。 +この関数は、構文 `CAST([...], 'Map(key_type, value_type)')` の便利な代替手段です。 + +**構文** + +```sql +mapFromArrays(keys, values) +``` + +**別名**: `MAP_FROM_ARRAYS` + +**引数** + +* `keys` — マップを作成するためのキーの配列またはマップ。[`Array`](/sql-reference/data-types/array) または [`Map`](/sql-reference/data-types/map) +* `values` — マップを作成するための値の配列またはマップ。[`Array`](/sql-reference/data-types/array) または [`Map`](/sql-reference/data-types/map) + +**戻り値** + +キー配列および値の配列/マップから構成されるキーと値を持つマップを返します。[`Map`](/sql-reference/data-types/map) + +**例** + +**基本的な使い方** + +```sql title=Query +SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) +``` + +```response title=Response +{'a':1,'b':2,'c':3} +``` + +**map 型を入力とする場合** + +```sql title=Query +SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) +``` + +```response title=Response +{1:('a', 1), 2:('b', 2), 3:('c', 3)} +``` + +## mapKeys {#mapKeys} + +導入バージョン: v21.2 + +指定されたマップのキーを返します。 +この関数は、設定 [`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns) を有効にすることで最適化できます。 +この設定を有効にすると、関数はマップ全体ではなく `keys` サブカラムだけを読み取ります。 +クエリ `SELECT mapKeys(m) FROM table` は `SELECT m.keys FROM table` に変換されます。 + +**構文** + +```sql +mapKeys(map) +``` + +**引数** + +* `map` — キーを抽出する対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +マップ内のすべてのキーを含む配列を返します。[`Array(T)`](/sql-reference/data-types/array) + +**例** + +**使用例** + +```sql title=Query +SELECT mapKeys(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['k1','k2'] +``` + +## mapPartialReverseSort {#mapPartialReverseSort} + +導入バージョン: v23.4 + +map の要素を降順にソートし、追加の limit 引数によって先頭の一部だけをソートできます。 +func 関数が指定されている場合、map のキーと値に func 関数を適用した結果に基づいてソート順が決まります。 + +**構文** + +```sql +mapPartialReverseSort([func,] limit, map) +``` + +**引数** + +* `func` — 省略可能。ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — 範囲 `[1..limit]` 内の要素がソートされます。[`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — ソート対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**返される値** + +降順で部分的にソートされたマップを返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapPartialSort {#mapPartialSort} + +導入バージョン: v23.4 + +`map` の要素を昇順にソートします。追加の `limit` 引数を指定することで、一部のみを対象とした「部分ソート」が可能です。 +`func` 関数が指定されている場合は、`map` のキーおよび値に `func` 関数を適用した結果に基づいてソート順が決定されます。 + +**構文** + +```sql +mapPartialSort([func,] limit, map) +``` + +**引数** + +* `func` — 省略可能。Lambda 関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — 範囲 `[1..limit]` 内の要素がソートされます。[`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — ソート対象の Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +部分的にソートされた Map を返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapPopulateSeries {#mapPopulateSeries} + +導入バージョン: v20.10 + +整数キーを持つマップにおいて、欠けているキーと値のペアを補完します。 +既存の最大値より大きいキーも拡張できるように、最大キーを指定できます。 +より正確には、この関数は、キーが最小キーから最大キー(指定されている場合は `max` 引数)までステップ幅 1 の数列を形成し、それに対応する値を持つマップを返します。 +あるキーに値が指定されていない場合、そのキーの値としてデフォルト値が使用されます。 +キーが重複している場合、先に出現した値のみがそのキーに関連付けられます。 + +**構文** + +```sql +mapPopulateSeries(map[, max]) | mapPopulateSeries(keys, values[, max]) +``` + +**引数** + +* `map` — 整数キーを持つ Map 型。[`Map((U)Int*, V)`](/sql-reference/data-types/map) +* `keys` — キーの配列。[`Array(T)`](/sql-reference/data-types/array) +* `values` — 値の配列。[`Array(T)`](/sql-reference/data-types/array) +* `max` — オプション。キーの最大値を指定します。[`Int8`](/sql-reference/data-types/int-uint) または [`Int16`](/sql-reference/data-types/int-uint) または [`Int32`](/sql-reference/data-types/int-uint) または [`Int64`](/sql-reference/data-types/int-uint) または [`Int128`](/sql-reference/data-types/int-uint) または [`Int256`](/sql-reference/data-types/int-uint) + +**返される値** + +ソート済みのキーを持つ Map、または 1 つ目にソート済みのキー、2 つ目に対応する値を持つ 2 つの配列からなるタプルを返します。[`Map(K, V)`](/sql-reference/data-types/map) または [`Tuple(Array(UInt*), Array(Any))`](/sql-reference/data-types/tuple) + +**例** + +**Map 型を使用する場合** + +```sql title=Query +SELECT mapPopulateSeries(map(1, 10, 5, 20), 6) +``` + +```response title=Response +{1:10, 2:0, 3:0, 4:0, 5:20, 6:0} +``` + +**マップされた配列を使う場合** + +```sql title=Query +SELECT mapPopulateSeries([1, 2, 4], [11, 22, 44], 5) +``` + +```response title=Response +([1, 2, 3, 4, 5], [11, 22, 0, 44, 0]) +``` + +## mapReverseSort {#mapReverseSort} + +導入バージョン: v23.4 + +map の要素を降順に並べ替えます。 +`func` 関数が指定されている場合、map のキーおよび値に `func` 関数を適用した結果によってソート順が決まります。 + +**構文** + +```sql +mapReverseSort([func,] map) +``` + +**引数** + +* `func` — オプションのラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — ソート対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +降順にソートされたマップを返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapReverseSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapSort {#mapSort} + +Introduced in: v23.4 + +マップの要素を昇順で並べ替えます。 +`func` 関数が指定されている場合、マップのキーと値に `func` 関数を適用した結果によってソート順が決まります。 + +**構文** + +```sql +mapSort([func,] map) +``` + +**引数** + +* `func` — 任意。ラムダ関数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — ソート対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +昇順にソートされたマップを返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**例** + +**使用例** + +```sql title=Query +SELECT mapSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapSubtract {#mapSubtract} + +導入バージョン: v20.7 + +すべてのキーを取得し、対応する値同士の差を計算します。 + +**構文** + +```sql +mapSubtract(arg1[, arg2, ...]) +``` + +**引数** + +* `arg1[, arg2, ...]` — 2 つの配列からなる Map またはタプル。1 つ目の配列の要素がキーを表し、2 つ目の配列には各キーに対応する値が含まれます。[`Map(K, V)`](/sql-reference/data-types/map) または [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**戻り値** + +戻り値は 1 つの Map またはタプルで、1 つ目の配列にはソート済みのキーが含まれ、2 つ目の配列には値が含まれます。[`Map(K, V)`](/sql-reference/data-types/map) または [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**例** + +**Map 型の場合** + +```sql title=Query +SELECT mapSubtract(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:0} +``` + +**タプルマップを使用する場合** + +```sql title=Query +SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) +``` + +```response title=Response +([1, 2], [-1, 0]) +``` + +## mapUpdate {#mapUpdate} + +導入バージョン: v22.3 + +2つのマップを受け取り、2つ目のマップの対応するキーの値で値を更新した1つ目のマップを返します。 + +**構文** + +```sql +mapUpdate(map1, map2) +``` + +**引数** + +* `map1` — 更新対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) +* `map2` — 更新に使用するマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**返される値** + +`map2` に含まれる同じキーの値で更新された `map1` を返します。[`Map(K, V)`](/sql-reference/data-types/map) + +**使用例** + +**基本的な使い方** + +```sql title=Query +SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) +``` + +```response title=Response +{'key3':0,'key1':10,'key2':10} +``` + +## mapValues {#mapValues} + +導入バージョン: v21.2 + +指定された map の値を返します。 +この関数は、[`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns) の設定を有効にすることで最適化できます。 +設定を有効にすると、この関数は map 全体ではなく `values` サブカラムのみを読み取ります。 +クエリ `SELECT mapValues(m) FROM table` は `SELECT m.values FROM table` に変換されます。 + +**構文** + +```sql +mapValues(map) +``` + +**引数** + +* `map` — 値を抽出する対象のマップ。[`Map(K, V)`](/sql-reference/data-types/map) + +**戻り値** + +マップ内のすべての値を含む配列を返します。[`Array(T)`](/sql-reference/data-types/array) + +**例** + +**使用例** + +```sql title=Query +SELECT mapValues(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['v1','v2'] +``` + {/*AUTOGENERATED_END*/ } diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md index d3b70928604..ce212967b57 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md @@ -6,12 +6,8 @@ title: '型変換関数' doc_type: 'reference' --- - - # 型変換関数 {#type-conversion-functions} - - ## データ変換時の一般的な問題 {#common-issues-with-data-conversion} ClickHouse は、一般的に [C++ プログラムと同じ動作](https://en.cppreference.com/w/cpp/language/implicit_conversion) を採用しています。 @@ -54,7 +50,6 @@ SETTINGS cast_keep_nullable = 1 └──────────────────┴─────────────────────┴──────────────────┘ ``` - ## `toString` 関数に関する注意事項 {#to-string-functions} `toString` ファミリーの関数は、数値、文字列(FixedString は除く)、日付、および日時の間での変換を行うための関数です。 @@ -65,8 +60,6 @@ SETTINGS cast_keep_nullable = 1 - 日時と数値を相互に変換する場合、日時は Unix エポックの開始からの経過秒数に対応します。 - `DateTime` 型の引数に対する `toString` 関数は、`Europe/Amsterdam` のようなタイムゾーン名を含む 2 つ目の String 型引数を取ることができます。この場合、時刻は指定されたタイムゾーンに従ってフォーマットされます。 - - ## `toDate`/`toDateTime` 関数に関する注意事項 {#to-date-and-date-time-functions} `toDate`/`toDateTime` 関数における日付および日時の形式は、次のように定義されています。 @@ -116,7 +109,6 @@ LIMIT 10 [`toUnixTimestamp`](#toUnixTimestamp) 関数も参照してください。 - ## toBool {#tobool} 入力値を [`Bool`](../data-types/boolean.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -167,7 +159,6 @@ toBool('false'): false toBool('FALSE'): false ``` - ## toInt8 {#toint8} 入力値を[`Int8`](../data-types/int-uint.md)型の値に変換します。エラーが発生した場合は例外を送出します。 @@ -234,7 +225,6 @@ toInt8('-8'): -8 * [`toInt8OrNull`](#toInt8OrNull)。 * [`toInt8OrDefault`](#toint8ordefault)。 - ## toInt8OrZero {#toint8orzero} [`toInt8`](#toint8) と同様に、この関数は入力値を [Int8](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -297,7 +287,6 @@ toInt8OrZero('abc'): 0 * [`toInt8OrNull`](#toInt8OrNull). * [`toInt8OrDefault`](#toint8ordefault). - ## toInt8OrNull {#toInt8OrNull} [`toInt8`](#toint8) と同様に、この関数は入力値を [Int8](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -360,7 +349,6 @@ toInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toInt8OrZero`](#toint8orzero)。 * [`toInt8OrDefault`](#toint8ordefault)。 - ## toInt8OrDefault {#toint8ordefault} [`toInt8`](#toint8) と同様に、この関数は入力値を型 [Int8](../data-types/int-uint.md) の値に変換しますが、エラー発生時にはデフォルト値を返します。 @@ -428,7 +416,6 @@ toInt8OrDefault('abc', CAST('-1', 'Int8')): -1 * [`toInt8OrZero`](#toint8orzero)。 * [`toInt8OrNull`](#toInt8OrNull)。 - ## toInt16 {#toint16} 入力値を[`Int16`](../data-types/int-uint.md)型の値に変換します。エラーが発生した場合には例外をスローします。 @@ -495,7 +482,6 @@ toInt16('-16'): -16 * [`toInt16OrNull`](#toint16ornull)。 * [`toInt16OrDefault`](#toint16ordefault)。 - ## toInt16OrZero {#toint16orzero} [`toInt16`](#toint16) と同様に、この関数は入力値を [Int16](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合には `0` を返します。 @@ -558,7 +544,6 @@ toInt16OrZero('abc'): 0 * [`toInt16OrNull`](#toint16ornull)。 * [`toInt16OrDefault`](#toint16ordefault)。 - ## toInt16OrNull {#toint16ornull} [`toInt16`](#toint16) と同様に、この関数は入力値を [Int16](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合には `NULL` を返します。 @@ -621,7 +606,6 @@ toInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toInt16OrZero`](#toint16orzero). * [`toInt16OrDefault`](#toint16ordefault). - ## toInt16OrDefault {#toint16ordefault} [`toInt16`](#toint16) と同様に、この関数は入力値を型 [Int16](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -689,7 +673,6 @@ toInt16OrDefault('abc', CAST('-1', 'Int16')): -1 * [`toInt16OrZero`](#toint16orzero)。 * [`toInt16OrNull`](#toint16ornull)。 - ## toInt32 {#toint32} 入力値を[`Int32`](../data-types/int-uint.md)型の値に変換します。エラー時には例外をスローします。 @@ -756,7 +739,6 @@ toInt32('-32'): -32 * [`toInt32OrNull`](#toint32ornull)。 * [`toInt32OrDefault`](#toint32ordefault)。 - ## toInt32OrZero {#toint32orzero} [`toInt32`](#toint32) と同様に、この関数は入力値を [Int32](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -819,7 +801,6 @@ toInt32OrZero('abc'): 0 * [`toInt32OrNull`](#toint32ornull)。 * [`toInt32OrDefault`](#toint32ordefault)。 - ## toInt32OrNull {#toint32ornull} [`toInt32`](#toint32) と同様に、この関数は入力値を [Int32](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -882,7 +863,6 @@ toInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrDefault`](#toint32ordefault). - ## toInt32OrDefault {#toint32ordefault} [`toInt32`](#toint32) と同様に、この関数は入力値を [Int32](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -950,7 +930,6 @@ toInt32OrDefault('abc', CAST('-1', 'Int32')): -1 * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrNull`](#toint32ornull). - ## toInt64 {#toint64} 入力値を[`Int64`](../data-types/int-uint.md)型の値に変換します。エラーが発生した場合は例外を送出します。 @@ -1017,7 +996,6 @@ toInt64('-64'): -64 * [`toInt64OrNull`](#toint64ornull)。 * [`toInt64OrDefault`](#toint64ordefault)。 - ## toInt64OrZero {#toint64orzero} [`toInt64`](#toint64) と同様に、この関数は入力値を [Int64](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -1080,7 +1058,6 @@ toInt64OrZero('abc'): 0 * [`toInt64OrNull`](#toint64ornull). * [`toInt64OrDefault`](#toint64ordefault). - ## toInt64OrNull {#toint64ornull} [`toInt64`](#toint64) と同様に、この関数は入力値を [Int64](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -1143,7 +1120,6 @@ toInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toInt64OrZero`](#toint64orzero). * [`toInt64OrDefault`](#toint64ordefault). - ## toInt64OrDefault {#toint64ordefault} [`toInt64`](#toint64) と同様に、この関数は入力値を [Int64](../data-types/int-uint.md) 型の値に変換しますが、エラー時にはデフォルト値を返します。 @@ -1211,7 +1187,6 @@ toInt64OrDefault('abc', CAST('-1', 'Int64')): -1 * [`toInt64OrZero`](#toint64orzero). * [`toInt64OrNull`](#toint64ornull). - ## toInt128 {#toint128} 入力値を [`Int128`](../data-types/int-uint.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -1277,7 +1252,6 @@ toInt128('-128'): -128 * [`toInt128OrNull`](#toint128ornull)。 * [`toInt128OrDefault`](#toint128ordefault)。 - ## toInt128OrZero {#toint128orzero} [`toInt128`](#toint128) と同様に、この関数は入力値を [Int128](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -1340,7 +1314,6 @@ toInt128OrZero('abc'): 0 * [`toInt128OrNull`](#toint128ornull)。 * [`toInt128OrDefault`](#toint128ordefault)。 - ## toInt128OrNull {#toint128ornull} [`toInt128`](#toint128) と同様に、この関数は入力値を [Int128](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -1403,7 +1376,6 @@ toInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toInt128OrZero`](#toint128orzero). * [`toInt128OrDefault`](#toint128ordefault). - ## toInt128OrDefault {#toint128ordefault} [`toInt128`](#toint128) と同様に、この関数は入力値を [Int128](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -1472,7 +1444,6 @@ toInt128OrDefault('abc', CAST('-1', 'Int128')): -1 * [`toInt128OrZero`](#toint128orzero)。 * [`toInt128OrNull`](#toint128ornull)。 - ## toInt256 {#toint256} 入力値を[`Int256`](../data-types/int-uint.md)型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -1538,7 +1509,6 @@ toInt256('-256'): -256 * [`toInt256OrNull`](#toint256ornull)。 * [`toInt256OrDefault`](#toint256ordefault)。 - ## toInt256OrZero {#toint256orzero} [`toInt256`](#toint256) と同様に、この関数は入力値を [Int256](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -1601,7 +1571,6 @@ toInt256OrZero('abc'): 0 * [`toInt256OrNull`](#toint256ornull). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrNull {#toint256ornull} [`toInt256`](#toint256) と同様に、この関数は入力値を型 [Int256](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -1664,7 +1633,6 @@ toInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toInt256OrZero`](#toint256orzero)。 * [`toInt256OrDefault`](#toint256ordefault)。 - ## toInt256OrDefault {#toint256ordefault} [`toInt256`](#toint256) と同様に、この関数は入力値を [Int256](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -1732,7 +1700,6 @@ toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 * [`toInt256OrZero`](#toint256orzero)。 * [`toInt256OrNull`](#toint256ornull)。 - ## toUInt8 {#touint8} 入力値を [`UInt8`](../data-types/int-uint.md) 型の値に変換します。エラーが発生した場合には例外をスローします。 @@ -1799,7 +1766,6 @@ toUInt8('8'): 8 * [`toUInt8OrNull`](#touint8ornull)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrZero {#touint8orzero} [`toUInt8`](#touint8) と同様に、この関数は入力値を [UInt8](../data-types/int-uint.md) 型の値に変換しますが、エラー時には `0` を返します。 @@ -1862,7 +1828,6 @@ toUInt8OrZero('abc'): 0 * [`toUInt8OrNull`](#touint8ornull)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrNull {#touint8ornull} [`toUInt8`](#touint8) と同様に、この関数は入力値を [UInt8](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -1925,7 +1890,6 @@ toUInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt8OrZero`](#touint8orzero)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrDefault {#touint8ordefault} [`toUInt8`](#touint8) と同様に、この関数は入力値を型 [UInt8](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -1993,7 +1957,6 @@ toUInt8OrDefault('abc', CAST('0', 'UInt8')): 0 * [`toUInt8OrZero`](#touint8orzero)。 * [`toUInt8OrNull`](#touint8ornull)。 - ## toUInt16 {#touint16} 入力値を[`UInt16`](../data-types/int-uint.md)型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -2060,7 +2023,6 @@ toUInt16('16'): 16 * [`toUInt16OrNull`](#touint16ornull)。 * [`toUInt16OrDefault`](#touint16ordefault)。 - ## toUInt16OrZero {#touint16orzero} [`toUInt16`](#touint16) と同様に、この関数は入力値を [UInt16](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -2123,7 +2085,6 @@ toUInt16OrZero('abc'): 0 * [`toUInt16OrNull`](#touint16ornull). * [`toUInt16OrDefault`](#touint16ordefault). - ## toUInt16OrNull {#touint16ornull} [`toUInt16`](#touint16) と同様に、この関数は入力値を [UInt16](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -2186,7 +2147,6 @@ toUInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt16OrZero`](#touint16orzero)。 * [`toUInt16OrDefault`](#touint16ordefault)。 - ## toUInt16OrDefault {#touint16ordefault} [`toUInt16`](#touint16) と同様に、この関数は入力値を型 [UInt16](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -2254,7 +2214,6 @@ toUInt16OrDefault('abc', CAST('0', 'UInt16')): 0 * [`toUInt16OrZero`](#touint16orzero)。 * [`toUInt16OrNull`](#touint16ornull)。 - ## toUInt32 {#touint32} 入力値を [`UInt32`](../data-types/int-uint.md) 型に変換します。エラーが発生した場合は例外をスローします。 @@ -2321,7 +2280,6 @@ toUInt32('32'): 32 * [`toUInt32OrNull`](#touint32ornull)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrZero {#touint32orzero} [`toUInt32`](#touint32) と同様に、この関数は入力値を [UInt32](../data-types/int-uint.md) 型に変換しますが、エラーが発生した場合は `0` を返します。 @@ -2385,7 +2343,6 @@ toUInt32OrZero('abc'): 0 * [`toUInt32OrNull`](#touint32ornull)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrNull {#touint32ornull} [`toUInt32`](#touint32) と同様に、この関数は入力値を型 [UInt32](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -2449,7 +2406,6 @@ toUInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt32OrZero`](#touint32orzero)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrDefault {#touint32ordefault} [`toUInt32`](#touint32) と同様に、この関数は入力値を型 [UInt32](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -2517,7 +2473,6 @@ toUInt32OrDefault('abc', CAST('0', 'UInt32')): 0 * [`toUInt32OrZero`](#touint32orzero) * [`toUInt32OrNull`](#touint32ornull) - ## toUInt64 {#touint64} 入力値を [`UInt64`](../data-types/int-uint.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -2584,7 +2539,6 @@ toUInt64('64'): 64 * [`toUInt64OrNull`](#touint64ornull)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrZero {#touint64orzero} [`toUInt64`](#touint64) と同様に、この関数は入力値を [UInt64](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -2647,7 +2601,6 @@ toUInt64OrZero('abc'): 0 * [`toUInt64OrNull`](#touint64ornull)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrNull {#touint64ornull} [`toUInt64`](#touint64) と同様に、この関数は入力値を [UInt64](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -2710,7 +2663,6 @@ toUInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt64OrZero`](#touint64orzero)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrDefault {#touint64ordefault} [`toUInt64`](#touint64) と同様に、この関数は入力値を型 [UInt64](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -2778,7 +2730,6 @@ toUInt64OrDefault('abc', CAST('0', 'UInt64')): 0 * [`toUInt64OrZero`](#touint64orzero). * [`toUInt64OrNull`](#touint64ornull). - ## toUInt128 {#touint128} 入力値を [`UInt128`](../data-types/int-uint.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -2844,7 +2795,6 @@ toUInt128('128'): 128 * [`toUInt128OrNull`](#touint128ornull)。 * [`toUInt128OrDefault`](#touint128ordefault)。 - ## toUInt128OrZero {#touint128orzero} [`toUInt128`](#touint128) と同様に、この関数は入力値を [UInt128](../data-types/int-uint.md) 型の値に変換しますが、エラー時には `0` を返します。 @@ -2907,7 +2857,6 @@ toUInt128OrZero('abc'): 0 * [`toUInt128OrNull`](#touint128ornull)。 * [`toUInt128OrDefault`](#touint128ordefault)。 - ## toUInt128OrNull {#touint128ornull} [`toUInt128`](#touint128) と同様に、この関数は入力値を [UInt128](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合には `NULL` を返します。 @@ -2970,7 +2919,6 @@ toUInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt128OrZero`](#touint128orzero). * [`toUInt128OrDefault`](#touint128ordefault). - ## toUInt128OrDefault {#touint128ordefault} [`toUInt128`](#toint128) と同様に、この関数は入力値を [UInt128](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -3039,7 +2987,6 @@ toUInt128OrDefault('abc', CAST('0', 'UInt128')): 0 * [`toUInt128OrZero`](#touint128orzero)。 * [`toUInt128OrNull`](#touint128ornull)。 - ## toUInt256 {#touint256} 入力値を[`UInt256`](../data-types/int-uint.md)型の値に変換します。エラーが発生すると例外をスローします。 @@ -3105,7 +3052,6 @@ toUInt256('256'): 256 * [`toUInt256OrNull`](#touint256ornull)。 * [`toUInt256OrDefault`](#touint256ordefault)。 - ## toUInt256OrZero {#touint256orzero} [`toUInt256`](#touint256) と同様に、この関数は入力値を [UInt256](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -3168,7 +3114,6 @@ toUInt256OrZero('abc'): 0 * [`toUInt256OrNull`](#touint256ornull)。 * [`toUInt256OrDefault`](#touint256ordefault)。 - ## toUInt256OrNull {#touint256ornull} [`toUInt256`](#touint256) と同様に、この関数は入力値を型 [UInt256](../data-types/int-uint.md) の値に変換しますが、エラーが発生した場合には `NULL` を返します。 @@ -3231,7 +3176,6 @@ toUInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt256OrZero`](#touint256orzero). * [`toUInt256OrDefault`](#touint256ordefault). - ## toUInt256OrDefault {#touint256ordefault} [`toUInt256`](#touint256) と同様に、この関数は入力値を [UInt256](../data-types/int-uint.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -3299,7 +3243,6 @@ toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0 * [`toUInt256OrZero`](#touint256orzero). * [`toUInt256OrNull`](#touint256ornull). - ## toFloat32 {#tofloat32} 入力値を [`Float32`](../data-types/float.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -3357,7 +3300,6 @@ toFloat32('NaN'): nan * [`toFloat32OrNull`](#tofloat32ornull)。 * [`toFloat32OrDefault`](#tofloat32ordefault)。 - ## toFloat32OrZero {#tofloat32orzero} [`toFloat32`](#tofloat32) と同様に、この関数は入力値を [Float32](../data-types/float.md) 型の値に変換しますが、エラー発生時には `0` を返します。 @@ -3410,7 +3352,6 @@ toFloat32OrZero('abc'): 0 * [`toFloat32OrNull`](#tofloat32ornull). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrNull {#tofloat32ornull} [`toFloat32`](#tofloat32) と同様に、この関数は入力値を [Float32](../data-types/float.md) 型の値に変換しますが、エラーが発生した場合には `NULL` を返します。 @@ -3463,7 +3404,6 @@ toFloat32OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrDefault {#tofloat32ordefault} [`toFloat32`](#tofloat32) と同様に、この関数は入力値を [Float32](../data-types/float.md) 型の値に変換しますが、エラー発生時にはデフォルト値を返します。 @@ -3521,7 +3461,6 @@ toFloat32OrDefault('abc', CAST('0', 'Float32')): 0 * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrNull`](#tofloat32ornull). - ## toFloat64 {#tofloat64} 入力値を [`Float64`](../data-types/float.md) 型の値に変換します。エラーが発生した場合には例外をスローします。 @@ -3579,7 +3518,6 @@ toFloat64('NaN'): nan * [`toFloat64OrNull`](#tofloat64ornull). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrZero {#tofloat64orzero} [`toFloat64`](#tofloat64) と同様に、この関数は入力値を [Float64](../data-types/float.md) 型の値に変換しますが、エラー時には `0` を返します。 @@ -3632,7 +3570,6 @@ toFloat64OrZero('abc'): 0 * [`toFloat64OrNull`](#tofloat64ornull). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrNull {#tofloat64ornull} [`toFloat64`](#tofloat64) と同様に、この関数は入力値を [Float64](../data-types/float.md) 型の値に変換しますが、エラーが発生した場合は `NULL` を返します。 @@ -3685,7 +3622,6 @@ toFloat64OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrDefault {#tofloat64ordefault} [`toFloat64`](#tofloat64) と同様に、この関数は入力値を [Float64](../data-types/float.md) 型の値に変換しますが、エラーが発生した場合は既定値を返します。 @@ -3743,7 +3679,6 @@ toFloat64OrDefault('abc', CAST('0', 'Float64')): 0 * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrNull`](#tofloat64ornull). - ## toBFloat16 {#tobfloat16} 入力値を [`BFloat16`](/sql-reference/data-types/float#bfloat16) 型に変換します。 @@ -3791,7 +3726,6 @@ SELECT toBFloat16('42.7'); * [`toBFloat16OrZero`](#tobfloat16orzero)。 * [`toBFloat16OrNull`](#tobfloat16ornull)。 - ## toBFloat16OrZero {#tobfloat16orzero} 入力の文字列値を [`BFloat16`](/sql-reference/data-types/float#bfloat16) 型の値に変換します。 @@ -3845,7 +3779,6 @@ SELECT toBFloat16OrZero('12.3456789'); * [`toBFloat16`](#tobfloat16)。 * [`toBFloat16OrNull`](#tobfloat16ornull)。 - ## toBFloat16OrNull {#tobfloat16ornull} 文字列の入力値を [`BFloat16`](/sql-reference/data-types/float#bfloat16) 型の値に変換します。 @@ -3899,7 +3832,6 @@ SELECT toBFloat16OrNull('12.3456789'); * [`toBFloat16`](#tobfloat16)。 * [`toBFloat16OrZero`](#tobfloat16orzero)。 - ## toDate {#todate} 引数を [Date](../data-types/date.md) データ型に変換します。 @@ -4003,7 +3935,6 @@ SELECT toDate(10000000000.) 関数 `toDate` は、別の表記でも記述できます。 - ```sql SELECT now() AS time, @@ -4018,7 +3949,6 @@ SELECT └─────────────────────┴───────────────┴─────────────┴─────────────────────┘ ``` - ## toDateOrZero {#todateorzero} 無効な引数が渡された場合に [Date](../data-types/date.md) の下限値を返す点を除き、[toDate](#todate) と同じです。[String](../data-types/string.md) 型の引数のみがサポートされています。 @@ -4039,7 +3969,6 @@ SELECT toDateOrZero('2022-12-30'), toDateOrZero(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrNull {#todateornull} [toDate](#todate) と同様ですが、無効な引数を受け取った場合は `NULL` を返します。[String](../data-types/string.md) 型の引数のみがサポートされます。 @@ -4060,7 +3989,6 @@ SELECT toDateOrNull('2022-12-30'), toDateOrNull(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrDefault {#todateordefault} [toDate](#todate) と同様ですが、変換に失敗した場合はデフォルト値を返します。デフォルト値は、第 2 引数が指定されている場合はその値、指定されていない場合は [Date](../data-types/date.md) の最小値です。 @@ -4087,7 +4015,6 @@ SELECT toDateOrDefault('2022-12-30'), toDateOrDefault('', '2023-01-01'::Date); └───────────────────────────────┴─────────────────────────────────────────────────┘ ``` - ## toDateTime {#todatetime} 入力値を [DateTime](../data-types/datetime.md) 型に変換します。 @@ -4129,7 +4056,6 @@ SELECT toDateTime('2022-12-30 13:44:17'), toDateTime(1685457500, 'UTC'); └───────────────────────────────────┴───────────────────────────────┘ ``` - ## toDateTimeOrZero {#todatetimeorzero} [toDateTime](#todatetime) と同様ですが、無効な引数を受け取った場合は [DateTime](../data-types/datetime.md) の最小値を返します。[String](../data-types/string.md) 型の引数のみがサポートされています。 @@ -4150,7 +4076,6 @@ SELECT toDateTimeOrZero('2022-12-30 13:44:17'), toDateTimeOrZero(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrNull {#todatetimeornull} [toDateTime](#todatetime) と同様ですが、無効な引数が渡された場合は `NULL` を返します。[String](../data-types/string.md) 型の引数のみがサポートされます。 @@ -4171,7 +4096,6 @@ SELECT toDateTimeOrNull('2022-12-30 13:44:17'), toDateTimeOrNull(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrDefault {#todatetimeordefault} [toDateTime](#todatetime) と同様ですが、変換に失敗した場合はデフォルト値を返します。デフォルト値は、3 番目の引数が指定されていればその値、指定されていない場合は [DateTime](../data-types/datetime.md) の下限値です。 @@ -4198,7 +4122,6 @@ SELECT toDateTimeOrDefault('2022-12-30 13:44:17'), toDateTimeOrDefault('', 'UTC' └────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────┘ ``` - ## toDate32 {#todate32} 引数を [Date32](../data-types/date32.md) データ型に変換します。値が範囲外の場合、`toDate32` は [Date32](../data-types/date32.md) でサポートされる範囲の境界値を返します。引数が [Date](../data-types/date.md) 型の場合は、その型で取り得る値の範囲の境界も考慮されます。 @@ -4255,7 +4178,6 @@ SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value); └────────────┴────────────────────────────────────────────┘ ``` - ## toDate32OrZero {#todate32orzero} [toDate32](#todate32) と同様ですが、無効な引数を受け取った場合は [Date32](../data-types/date32.md) の最小値を返します。 @@ -4276,7 +4198,6 @@ SELECT toDate32OrZero('1899-01-01'), toDate32OrZero(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrNull {#todate32ornull} [toDate32](#todate32) と同様ですが、無効な引数が渡された場合は `NULL` を返します。 @@ -4297,7 +4218,6 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrDefault {#todate32ordefault} 引数を [Date32](../data-types/date32.md) データ型に変換します。値が範囲外の場合、`toDate32OrDefault` は [Date32](../data-types/date32.md) でサポートされる下限値を返します。引数が [Date](../data-types/date.md) 型の場合は、その型で取り得る範囲が考慮されます。無効な引数が渡された場合は、デフォルト値を返します。 @@ -4320,7 +4240,6 @@ SELECT └─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` - ## toDateTime64 {#todatetime64} 入力値を [DateTime64](../data-types/datetime64.md) 型の値に変換します。 @@ -4391,7 +4310,6 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN └─────────────────────────┴─────────────────────────────────────────────────────────────────────┘ ``` - ## toDateTime64OrZero {#todatetime64orzero} [toDateTime64](#todatetime64) と同様に、この関数は入力値を [DateTime64](../data-types/datetime64.md) 型の値に変換しますが、無効な引数を受け取った場合は [DateTime64](../data-types/datetime64.md) の最小値を返します。 @@ -4434,7 +4352,6 @@ SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg * [toDateTime64OrNull](#todatetime64ornull)。 * [toDateTime64OrDefault](#todatetime64ordefault)。 - ## toDateTime64OrNull {#todatetime64ornull} [toDateTime64](#todatetime64) と同様に、この関数は入力値を [DateTime64](../data-types/datetime64.md) 型の値に変換しますが、無効な引数を受け取った場合は `NULL` を返します。 @@ -4479,7 +4396,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero) * [toDateTime64OrDefault](#todatetime64ordefault) - ## toDateTime64OrDefault {#todatetime64ordefault} [toDateTime64](#todatetime64) と同様に、この関数は入力値を [DateTime64](../data-types/datetime64.md) 型の値に変換しますが、 @@ -4527,7 +4443,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero)。 * [toDateTime64OrNull](#todatetime64ornull)。 - ## toDecimal32 {#todecimal32} 入力値をスケールが `S` の型 [`Decimal(9, S)`](../data-types/decimal.md) の値に変換します。エラーが発生した場合は、例外をスローします。 @@ -4600,7 +4515,6 @@ type_c: Decimal(9, 3) * [`toDecimal32OrNull`](#todecimal32ornull)。 * [`toDecimal32OrDefault`](#todecimal32ordefault)。 - ## toDecimal32OrZero {#todecimal32orzero} [`toDecimal32`](#todecimal32) と同様に、この関数は入力値を [Decimal(9, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -4666,7 +4580,6 @@ toTypeName(b): Decimal(9, 5) * [`toDecimal32OrNull`](#todecimal32ornull). * [`toDecimal32OrDefault`](#todecimal32ordefault). - ## toDecimal32OrNull {#todecimal32ornull} [`toDecimal32`](#todecimal32) と同様に、この関数は入力値を [Nullable(Decimal(9, S))](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合には `0` を返します。 @@ -4732,7 +4645,6 @@ toTypeName(b): Nullable(Decimal(9, 5)) * [`toDecimal32OrZero`](#todecimal32orzero)。 * [`toDecimal32OrDefault`](#todecimal32ordefault)。 - ## toDecimal32OrDefault {#todecimal32ordefault} [`toDecimal32`](#todecimal32) と同様に、この関数は入力値を [Decimal(9, S)](../data-types/decimal.md) 型の値に変換しますが、エラー時にはデフォルト値を返します。 @@ -4805,7 +4717,6 @@ toTypeName(b): Decimal(9, 0) * [`toDecimal32OrZero`](#todecimal32orzero). * [`toDecimal32OrNull`](#todecimal32ornull). - ## toDecimal64 {#todecimal64} 入力値をスケール `S` を持つ [`Decimal(18, S)`](../data-types/decimal.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -4878,7 +4789,6 @@ type_c: Decimal(18, 3) * [`toDecimal64OrNull`](#todecimal64ornull)。 * [`toDecimal64OrDefault`](#todecimal64ordefault)。 - ## toDecimal64OrZero {#todecimal64orzero} [`toDecimal64`](#todecimal64) と同様に、この関数は入力値を [Decimal(18, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -4944,7 +4854,6 @@ toTypeName(b): Decimal(18, 18) * [`toDecimal64OrNull`](#todecimal64ornull). * [`toDecimal64OrDefault`](#todecimal64ordefault). - ## toDecimal64OrNull {#todecimal64ornull} [`toDecimal64`](#todecimal64) と同様に、この関数は入力値を [Nullable(Decimal(18, S))](../data-types/decimal.md) 型の値に変換します。ただし、エラーが発生した場合は `0` を返します。 @@ -5010,7 +4919,6 @@ toTypeName(b): Nullable(Decimal(18, 18)) * [`toDecimal64OrZero`](#todecimal64orzero)。 * [`toDecimal64OrDefault`](#todecimal64ordefault)。 - ## toDecimal64OrDefault {#todecimal64ordefault} [`toDecimal64`](#todecimal64) と同様に、この関数は入力値を [Decimal(18, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合は既定値を返します。 @@ -5083,7 +4991,6 @@ toTypeName(b): Decimal(18, 0) * [`toDecimal64OrZero`](#todecimal64orzero)。 * [`toDecimal64OrNull`](#todecimal64ornull)。 - ## toDecimal128 {#todecimal128} 入力値をスケール `S` を持つ型 [`Decimal(38, S)`](../data-types/decimal.md) の値に変換します。エラーが発生した場合は例外を送出します。 @@ -5156,7 +5063,6 @@ type_c: Decimal(38, 3) * [`toDecimal128OrNull`](#todecimal128ornull). * [`toDecimal128OrDefault`](#todecimal128ordefault). - ## toDecimal128OrZero {#todecimal128orzero} [`toDecimal128`](#todecimal128) と同様に、この関数は入力値を [Decimal(38, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -5222,7 +5128,6 @@ toTypeName(b): Decimal(38, 38) * [`toDecimal128OrNull`](#todecimal128ornull)。 * [`toDecimal128OrDefault`](#todecimal128ordefault)。 - ## toDecimal128OrNull {#todecimal128ornull} [`toDecimal128`](#todecimal128) と同様に、この関数は入力値を [Nullable(Decimal(38, S))](../data-types/decimal.md) 型の値に変換します。ただし、エラーが発生した場合は `0` を返します。 @@ -5288,7 +5193,6 @@ toTypeName(b): Nullable(Decimal(38, 38)) * [`toDecimal128OrZero`](#todecimal128orzero)。 * [`toDecimal128OrDefault`](#todecimal128ordefault)。 - ## toDecimal128OrDefault {#todecimal128ordefault} [`toDecimal128`](#todecimal128) と同様に、この関数は入力値を [Decimal(38, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合にはデフォルト値を返します。 @@ -5361,7 +5265,6 @@ toTypeName(b): Decimal(38, 0) * [`toDecimal128OrZero`](#todecimal128orzero). * [`toDecimal128OrNull`](#todecimal128ornull). - ## toDecimal256 {#todecimal256} 入力値を、スケール `S` を持つ [`Decimal(76, S)`](../data-types/decimal.md) 型の値に変換します。エラーが発生した場合は例外をスローします。 @@ -5434,7 +5337,6 @@ type_c: Decimal(76, 3) * [`toDecimal256OrNull`](#todecimal256ornull)。 * [`toDecimal256OrDefault`](#todecimal256ordefault)。 - ## toDecimal256OrZero {#todecimal256orzero} [`toDecimal256`](#todecimal256) と同様に、この関数は入力値を [Decimal(76, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合は `0` を返します。 @@ -5500,7 +5402,6 @@ toTypeName(b): Decimal(76, 76) * [`toDecimal256OrNull`](#todecimal256ornull)。 * [`toDecimal256OrDefault`](#todecimal256ordefault)。 - ## toDecimal256OrNull {#todecimal256ornull} [`toDecimal256`](#todecimal256) と同様に、この関数は入力値を [Nullable(Decimal(76, S))](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合には `0` を返します。 @@ -5566,7 +5467,6 @@ toTypeName(b): Nullable(Decimal(76, 76)) * [`toDecimal256OrZero`](#todecimal256orzero). * [`toDecimal256OrDefault`](#todecimal256ordefault). - ## toDecimal256OrDefault {#todecimal256ordefault} [`toDecimal256`](#todecimal256) と同様に、この関数は入力値を [Decimal(76, S)](../data-types/decimal.md) 型の値に変換しますが、エラーが発生した場合はデフォルト値を返します。 @@ -5639,7 +5539,6 @@ toTypeName(b): Decimal(76, 0) * [`toDecimal256OrZero`](#todecimal256orzero). * [`toDecimal256OrNull`](#todecimal256ornull). - ## toString {#tostring} 値を文字列表現に変換します。 @@ -5684,7 +5583,6 @@ LIMIT 10; └─────────────────────┴───────────────────┴─────────────────────┘ ``` - ## toFixedString {#tofixedstring} [String](../data-types/string.md) 型の引数を [FixedString(N)](../data-types/fixedstring.md) 型(長さ N の固定長文字列)に変換します。 @@ -5721,7 +5619,6 @@ SELECT toFixedString('foo', 8) AS s; └───────────────┘ ``` - ## toStringCutToZero {#tostringcuttozero} String または FixedString 型の引数を受け取り、最初に見つかったゼロバイト以降を切り捨てた String を返します。 @@ -5762,7 +5659,6 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut; └────────────┴───────┘ ``` - ## toDecimalString {#todecimalstring} 数値を、出力時の小数桁数をユーザーが指定できる `String` 型の値に変換します。 @@ -5801,7 +5697,6 @@ SELECT toDecimalString(CAST('64.32', 'Float64'), 5); └─────────────────────────────────────────────┘ ``` - ## reinterpretAsUInt8 {#reinterpretasuint8} 入力値を `UInt8` 型の値として解釈し、バイト列の再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。ターゲット型が入力型を表現できない場合、出力は意味を成さない値になります。 @@ -5840,7 +5735,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt16 {#reinterpretasuint16} 入力値を `UInt16` 型の値として扱い、バイト列の再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとは試みません。対象の型が入力の値を表現できない場合、出力される値は意味を持ちません。 @@ -5879,7 +5773,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt32 {#reinterpretasuint32} 入力値を `UInt32` 型の値として扱い、バイト単位で再解釈します。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとしません。対象の型が入力値を表現できない場合、出力は意味のない値になります。 @@ -5918,7 +5811,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt64 {#reinterpretasuint64} 入力値を `UInt64` 型の値として扱うことで、バイト列を再解釈します。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとは試みません。対象の型が入力値を表現できない場合、出力結果は意味を持ちません。 @@ -5957,7 +5849,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt128 {#reinterpretasuint128} 入力値を `UInt128` 型の値として扱い、バイト列として再解釈します。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型で入力の値を表現できない場合、出力は意味のある値にはなりません。 @@ -5996,7 +5887,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt256 {#reinterpretasuint256} 入力値を `UInt256` 型の値として扱い、バイト単位で再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型が入力値を表現できない場合、出力は無意味な値になります。 @@ -6035,7 +5925,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt8 {#reinterpretasint8} 入力値を `Int8` 型の値として扱い、バイト列として再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型が入力値を表現できない場合、出力は無意味な値になります。 @@ -6074,7 +5963,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt16 {#reinterpretasint16} 入力値を `Int16` 型の値として扱うことで、バイトレベルで再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型が入力の型を表現できない場合、出力は意味のない値になります。 @@ -6113,7 +6001,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt32 {#reinterpretasint32} 入力値を `Int32` 型の値として扱い、そのバイト表現を再解釈します。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとしません。ターゲット型が入力値を表現できない場合、出力には意味がありません。 @@ -6152,7 +6039,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt64 {#reinterpretasint64} 入力値を `Int64` 型の値として扱うことで、バイト列として再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型で入力値を表現できない場合、出力は意味のない値になります。 @@ -6191,7 +6077,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt128 {#reinterpretasint128} 入力値を Int128 型の値として扱い、バイト列として再解釈します。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型で入力値を表現できない場合、その出力は意味のない値になります。 @@ -6230,7 +6115,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt256 {#reinterpretasint256} 入力値を `Int256` 型の値として解釈し直し、バイト列の再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値の保持を試みません。対象の型が入力の型を表現できない場合、出力結果は無意味な値になります。 @@ -6269,7 +6153,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsFloat32 {#reinterpretasfloat32} 入力値を Float32 型の値として解釈し、バイト列の再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型が入力の型を表現できない場合、出力には意味がありません。 @@ -6304,7 +6187,6 @@ SELECT reinterpretAsUInt32(toFloat32(0.2)) AS x, reinterpretAsFloat32(x); └────────────┴─────────────────────────┘ ``` - ## reinterpretAsFloat64 {#reinterpretasfloat64} 入力値を `Float64` 型の値として扱い、バイト列の再解釈を行います。[`CAST`](#cast) と異なり、この関数は元の値を保持しようとはしません。対象の型が入力値の型を表現できない場合、出力は無意味な値になります。 @@ -6339,7 +6221,6 @@ SELECT reinterpretAsUInt64(toFloat64(0.2)) AS x, reinterpretAsFloat64(x); └─────────────────────┴─────────────────────────┘ ``` - ## reinterpretAsDate {#reinterpretasdate} 文字列、FixedString、または数値を受け取り、そのバイト列をホストのバイト順序(リトルエンディアン)での数値として解釈します。解釈された数値を Unix Epoch の開始時点からの日数として解釈し、その日数に対応する日付を返します。 @@ -6380,7 +6261,6 @@ SELECT reinterpretAsDate(65), reinterpretAsDate('A'); └───────────────────────┴────────────────────────┘ ``` - ## reinterpretAsDateTime {#reinterpretasdatetime} これらの関数は文字列を受け取り、その文字列の先頭にあるバイト列をホスト順序(リトルエンディアン)の数値として解釈します。Unixエポックの開始時点からの経過秒数として解釈した日時を返します。 @@ -6421,7 +6301,6 @@ SELECT reinterpretAsDateTime(65), reinterpretAsDateTime('A'); └───────────────────────────┴────────────────────────────┘ ``` - ## reinterpretAsString {#reinterpretasstring} この関数は数値、日付、または日時を受け取り、対応する値をホストのバイトオーダー(リトルエンディアン)で表したバイト列を含む文字列を返します。末尾の null バイトは削除されます。例えば、UInt32 型の値 255 は 1 バイト長の文字列になります。 @@ -6458,7 +6337,6 @@ SELECT └────────────────────────────────────────────────────────┴───────────────────────────────────────────┘ ``` - ## reinterpretAsFixedString {#reinterpretasfixedstring} この関数は数値、日付、または日時を受け取り、対応する値をホストのバイトオーダー(リトルエンディアン)で表すバイト列を含む `FixedString` を返します。末尾にあるヌルバイトは削除されます。たとえば、`UInt32` 型の値 255 は、長さ 1 バイトの `FixedString` になります。 @@ -6495,7 +6373,6 @@ SELECT └─────────────────────────────────────────────────────────────┴────────────────────────────────────────────────┘ ``` - ## reinterpretAsUUID {#reinterpretasuuid} :::note @@ -6556,7 +6433,6 @@ SELECT uuid = uuid2; └─────────────────────┘ ``` - ## reinterpret {#reinterpret} `x` の値のメモリ上のバイト列をそのまま利用し、それを変換先の型として再解釈します。 @@ -6608,7 +6484,6 @@ SELECT reinterpret(x'3108b4403108d4403108b4403108d440', 'Array(Float32)') AS str └────────────────────────────┘ ``` - ## CAST {#cast} 入力値を指定されたデータ型に変換します。[reinterpret](#reinterpret) 関数と異なり、`CAST` は新しいデータ型を使って同じ値を表現しようとします。変換できない場合は例外が送出されます。 @@ -6714,7 +6589,6 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null; * [cast_keep_nullable](../../operations/settings/settings.md/#cast_keep_nullable) 設定 - ## accurateCast(x, T) {#accuratecastx-t} `x` をデータ型 `T` に変換します。 @@ -6749,7 +6623,6 @@ SELECT accurateCast(-1, 'UInt8') AS uint8; コード: 70. DB::Exception: localhost:9000 から受信。DB::Exception: Int8 列の値を UInt8 型に安全に変換できません: accurateCast(-1, 'UInt8') AS uint8 の処理中。 ``` - ## accurateCastOrNull(x, T) {#accuratecastornullx-t} 入力値 `x` を指定されたデータ型 `T` に変換します。常に [Nullable](../data-types/nullable.md) 型を返し、変換後の値が対象の型で表現できない場合は [NULL](/sql-reference/syntax#null) を返します。 @@ -6802,7 +6675,6 @@ SELECT └───────┴──────┴──────────────┘ ``` - ## accurateCastOrDefault(x, T[, default_value]) {#accuratecastordefaultx-t-default_value} 入力値 `x` を指定されたデータ型 `T` に変換します。キャスト結果が対象型で表現できない場合は、その型のデフォルト値、もしくは指定されていれば `default_value` を返します。 @@ -6859,7 +6731,6 @@ SELECT └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ ``` - ## toInterval {#toInterval} 数値とインターバル単位(例:'second' や 'day')から [Interval](../../sql-reference/data-types/special-data-types/interval.md) データ型の値を作成します。 @@ -6907,7 +6778,6 @@ SELECT toDateTime('2025-01-01 00:00:00') + toInterval(1, 'hour') └────────────────────────────────────────────────────────────┘ ``` - ## toIntervalYear {#tointervalyear} `n` 年を表すインターバル値を、データ型 [IntervalYear](../data-types/special-data-types/interval.md) として返します。 @@ -6945,7 +6815,6 @@ SELECT date + interval_to_year AS result └────────────┘ ``` - ## toIntervalQuarter {#tointervalquarter} `n` 四半期を表す [IntervalQuarter](../data-types/special-data-types/interval.md) 型の間隔を返します。 @@ -6983,7 +6852,6 @@ SELECT date + interval_to_quarter AS result └────────────┘ ``` - ## toIntervalMonth {#tointervalmonth} データ型 [IntervalMonth](../data-types/special-data-types/interval.md) の `n` か月の間隔を返します。 @@ -7021,7 +6889,6 @@ SELECT date + interval_to_month AS result └────────────┘ ``` - ## toIntervalWeek {#tointervalweek} データ型 [IntervalWeek](../data-types/special-data-types/interval.md) の `n` 週間を表す間隔を返します。 @@ -7059,7 +6926,6 @@ SELECT date + interval_to_week AS result └────────────┘ ``` - ## toIntervalDay {#tointervalday} `n` 日の時間間隔を表す [IntervalDay](../data-types/special-data-types/interval.md) 型の値を返します。 @@ -7097,7 +6963,6 @@ SELECT date + interval_to_days AS result └────────────┘ ``` - ## toIntervalHour {#tointervalhour} 長さ `n` 時間の間隔値を、データ型 [IntervalHour](../data-types/special-data-types/interval.md) として返します。 @@ -7135,7 +7000,6 @@ SELECT date + interval_to_hours AS result └─────────────────────┘ ``` - ## toIntervalMinute {#tointervalminute} データ型 [IntervalMinute](../data-types/special-data-types/interval.md) の `n` 分を表す間隔を返します。 @@ -7173,7 +7037,6 @@ Result: 結果: └─────────────────────┘ ``` - ## toIntervalSecond {#tointervalsecond} `n` 秒のインターバルを表す [IntervalSecond](../data-types/special-data-types/interval.md) 型の値を返します。 @@ -7211,7 +7074,6 @@ SELECT date + interval_to_seconds AS result └─────────────────────┘ ``` - ## toIntervalMillisecond {#tointervalmillisecond} `n` ミリ秒の間隔をデータ型 [IntervalMillisecond](../data-types/special-data-types/interval.md) で返します。 @@ -7249,7 +7111,6 @@ SELECT date + interval_to_milliseconds AS result └─────────────────────────┘ ``` - ## toIntervalMicrosecond {#tointervalmicrosecond} `n` マイクロ秒の値を [IntervalMicrosecond](../data-types/special-data-types/interval.md) 型のインターバルとして返します。 @@ -7287,7 +7148,6 @@ SELECT date + interval_to_microseconds AS result └────────────────────────────┘ ``` - ## toIntervalNanosecond {#tointervalnanosecond} `n` ナノ秒のインターバルをデータ型 [IntervalNanosecond](../data-types/special-data-types/interval.md) で返します。 @@ -7325,7 +7185,6 @@ SELECT date + interval_to_nanoseconds AS result └───────────────────────────────┘ ``` - ## parseDateTime {#parsedatetime} [String](../data-types/string.md) を [MySQL のフォーマット文字列](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format) に従って [DateTime](../data-types/datetime.md) に変換します。 @@ -7366,21 +7225,16 @@ SELECT parseDateTime('2021-01-04+23:00:00', '%Y-%m-%d+%H:%i:%s') 別名:`TO_TIMESTAMP` - ## parseDateTimeOrZero {#parsedatetimeorzero} [parseDateTime](#parsedatetime) と同様ですが、処理できない日付形式に遭遇した場合は 0 の日時値を返します。 - - ## parseDateTimeOrNull {#parsedatetimeornull} [parseDateTime](#parsedatetime) と同様ですが、処理できない日付形式に遭遇した場合は `NULL` を返します。 エイリアス: `str_to_date`。 - - ## parseDateTimeInJodaSyntax {#parsedatetimeinjodasyntax} [parseDateTime](#parsedatetime) と同様ですが、フォーマット文字列に MySQL 構文ではなく [Joda](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) 構文を使用します。 @@ -7421,19 +7275,14 @@ SELECT parseDateTimeInJodaSyntax('2023-02-24 14:53:31', 'yyyy-MM-dd HH:mm:ss', ' └─────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## parseDateTimeInJodaSyntaxOrZero {#parsedatetimeinjodasyntaxorzero} [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) と同様ですが、処理できない日付形式に当たった場合は、ゼロ値の日付を返します。 - - ## parseDateTimeInJodaSyntaxOrNull {#parsedatetimeinjodasyntaxornull} [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) と同様ですが、処理できない日付形式を検出した場合は `NULL` を返します。 - - ## parseDateTime64 {#parsedatetime64} [MySQL のフォーマット文字列](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format)に従って、[String](../data-types/string.md) を [DateTime64](../data-types/datetime64.md) に変換します。 @@ -7455,19 +7304,14 @@ parseDateTime64(str[, format[, timezone]]) MySQL スタイルのフォーマット文字列に従って、入力文字列から解析された [DateTime64](../data-types/datetime64.md) 値を返します。 返される値の精度は 6 桁です。 - ## parseDateTime64OrZero {#parsedatetime64orzero} [parseDateTime64](#parsedatetime64) と同様ですが、処理できない日付形式に遭遇した場合は、ゼロの日時を返します。 - - ## parseDateTime64OrNull {#parsedatetime64ornull} [parseDateTime64](#parsedatetime64) と同様ですが、処理できない日付形式に遭遇した場合は `NULL` を返します。 - - ## parseDateTime64InJodaSyntax {#parsedatetime64injodasyntax} [String](../data-types/string.md) を [Joda のフォーマット文字列](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) に従って [DateTime64](../data-types/datetime64.md) に変換します。 @@ -7489,19 +7333,14 @@ parseDateTime64InJodaSyntax(str[, format[, timezone]]) 入力文字列を Joda スタイルのフォーマット文字列に従って解析した [DateTime64](../data-types/datetime64.md) 値を返します。 返される値の精度は、フォーマット文字列内の `S` プレースホルダーの数に等しくなります(ただし最大 6 まで)。 - ## parseDateTime64InJodaSyntaxOrZero {#parsedatetime64injodasyntaxorzero} [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax) と同様ですが、処理できない日付フォーマットに遭遇した場合は、ゼロの日時を返します。 - - ## parseDateTime64InJodaSyntaxOrNull {#parsedatetime64injodasyntaxornull} [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax) と同様ですが、処理できない日付形式が指定された場合は `NULL` を返します。 - - ## parseDateTimeBestEffort {#parsedatetimebesteffort} ## parseDateTime32BestEffort {#parsedatetime32besteffort} @@ -7607,7 +7446,6 @@ SELECT toYear(now()) AS year, parseDateTimeBestEffort('10 20:19'); 結果: - ```response ┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐ │ 2023 │ 2023-01-10 20:19:00 │ @@ -7644,39 +7482,28 @@ FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around); * [@xkcd による ISO 8601 に関するアナウンス](https://xkcd.com/1179/) * [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2) - ## parseDateTimeBestEffortUS {#parsedatetimebesteffortus} この関数は、`YYYY-MM-DD hh:mm:ss` のような ISO の日付形式や、`YYYYMMDDhhmmss`、`YYYY-MM`、`DD hh`、`YYYY-MM-DD hh:mm:ss ±h:mm` など、月と日付の要素を曖昧さなしに抽出できるその他の日付形式に対しては、[parseDateTimeBestEffort](#parsedatetimebesteffort) と同様に動作します。`MM/DD/YYYY`、`MM-DD-YYYY`、`MM-DD-YY` のように月と日付の要素を一意に特定できない場合には、`DD/MM/YYYY`、`DD-MM-YYYY`、`DD-MM-YY` ではなく、米国式の日付形式を優先します。ただし例外として、月の値が 12 より大きく 31 以下の場合には、この関数は [parseDateTimeBestEffort](#parsedatetimebesteffort) の動作にフォールバックします。例えば、`15/08/2020` は `2020-08-15` として解析されます。 - - ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} ## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} [parseDateTimeBestEffort](#parsedatetimebesteffort) と同様ですが、処理できない日付形式に出会った場合は `NULL` を返します。 - - ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} ## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} [parseDateTimeBestEffort](#parsedatetimebesteffort) と同様ですが、処理できない形式の日付に遭遇した場合は、ゼロ日付またはゼロ日時を返します。 - - ## parseDateTimeBestEffortUSOrNull {#parsedatetimebesteffortusornull} [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) 関数と同様ですが、処理できない日付形式を検出した場合は `NULL` を返します。 - - ## parseDateTimeBestEffortUSOrZero {#parsedatetimebesteffortusorzero} [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) 関数と同様ですが、処理できない形式の日付を検出した場合に、ゼロ日付(`1970-01-01`)または時刻付きゼロ日付(`1970-01-01 00:00:00`)を返します。 - - ## parseDateTime64BestEffort {#parsedatetime64besteffort} [parseDateTimeBestEffort](#parsedatetimebesteffort) 関数と同様ですが、ミリ秒およびマイクロ秒も解析し、[DateTime](/sql-reference/data-types/datetime) 型の値を返します。 @@ -7723,37 +7550,26 @@ FORMAT PrettyCompactMonoBlock; └────────────────────────────┴────────────────────────────────┘ ``` - ## parseDateTime64BestEffortUS {#parsedatetime64besteffortus} [parseDateTime64BestEffort](#parsedatetime64besteffort) と同様ですが、あいまいさがある場合には、米国形式の日付(`MM/DD/YYYY` など)を優先して解釈します。 - - ## parseDateTime64BestEffortOrNull {#parsedatetime64besteffortornull} [parseDateTime64BestEffort](#parsedatetime64besteffort) と同様ですが、処理できない日付形式に遭遇した場合は `NULL` を返します。 - - ## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} [parseDateTime64BestEffort](#parsedatetime64besteffort) と同様ですが、処理できない日付形式に遭遇した場合は、ゼロの日付またはゼロの日時を返します。 - - ## parseDateTime64BestEffortUSOrNull {#parsedatetime64besteffortusornull} [parseDateTime64BestEffort](#parsedatetime64besteffort) と同じですが、あいまいな場合には米国の日時形式(`MM/DD/YYYY` など)を優先的に解釈し、処理できない形式だった場合は `NULL` を返します。 - - ## parseDateTime64BestEffortUSOrZero {#parsedatetime64besteffortusorzero} [parseDateTime64BestEffort](#parsedatetime64besteffort) と同様ですが、この関数はあいまいな場合に US の日付形式(`MM/DD/YYYY` など)を優先し、解釈できない日付形式に遭遇したときはゼロ日付またはゼロ日時を返します。 - - ## toLowCardinality {#tolowcardinality} 入力引数を、同じデータ型の [LowCardinality](../data-types/lowcardinality.md) バージョンに変換します。 @@ -7790,7 +7606,6 @@ SELECT toLowCardinality('1'); └───────────────────────┘ ``` - ## toUnixTimestamp {#toUnixTimestamp} `String`、`Date`、または `DateTime` を、Unix タイムスタンプ(`1970-01-01 00:00:00 UTC` からの経過秒数)を表す `UInt32` 値に変換します。 @@ -7838,7 +7653,6 @@ from_date: 1509840000 from_date32: 1509840000 ``` - ## toUnixTimestamp64Second {#tounixtimestamp64second} `DateTime64` を秒単位の固定精度を持つ `Int64` 値に変換します。入力値は、その精度に応じて適切にスケーリングされます。 @@ -7878,7 +7692,6 @@ SELECT toUnixTimestamp64Second(dt64); └───────────────────────────────┘ ``` - ## toUnixTimestamp64Milli {#tounixtimestamp64milli} `DateTime64` を固定のミリ秒精度を持つ `Int64` 値に変換します。入力値は、その精度に応じて適切にスケールアップまたはスケールダウンされます。 @@ -7918,7 +7731,6 @@ SELECT toUnixTimestamp64Milli(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Micro {#tounixtimestamp64micro} `DateTime64` を、マイクロ秒単位で固定精度の `Int64` 値に変換します。入力値は、その精度に応じて適切にスケール変換(拡大または縮小)されます。 @@ -7958,7 +7770,6 @@ SELECT toUnixTimestamp64Micro(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Nano {#tounixtimestamp64nano} `DateTime64` をナノ秒精度に固定した `Int64` 値に変換します。入力値は、その精度に応じて適切にスケール変換(拡大または縮小)されます。 @@ -7998,7 +7809,6 @@ SELECT toUnixTimestamp64Nano(dt64); └─────────────────────────────┘ ``` - ## fromUnixTimestamp64Second {#fromunixtimestamp64second} `Int64` を固定の秒精度と任意のタイムゾーンを持つ `DateTime64` 値に変換します。入力値は、その精度に応じて適切にスケールアップまたはスケールダウンされます。 @@ -8041,7 +7851,6 @@ SELECT └─────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Milli {#fromunixtimestamp64milli} `Int64` を、固定のミリ秒単位の精度と任意のタイムゾーンを持つ `DateTime64` 値に変換します。入力値は、その精度に応じて適切にスケーリング(拡大または縮小)されます。 @@ -8084,7 +7893,6 @@ SELECT └─────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Micro {#fromunixtimestamp64micro} `Int64` を、マイクロ秒固定精度と任意のタイムゾーンを持つ `DateTime64` 値に変換します。入力値は、その精度に応じて適切にスケールアップまたはスケールダウンされます。 @@ -8127,7 +7935,6 @@ SELECT └────────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Nano {#fromunixtimestamp64nano} `Int64` をナノ秒精度の `DateTime64` 値に変換し、必要に応じてタイムゾーンを指定します。入力値は、その精度に応じて適切にスケールアップまたはスケールダウンされます。 @@ -8170,7 +7977,6 @@ SELECT └───────────────────────────────┴──────────────────────┘ ``` - ## formatRow {#formatrow} 任意の式を、指定されたフォーマットに従って文字列に変換します。 @@ -8242,7 +8048,6 @@ SETTINGS format_custom_result_before_delimiter='\n', format_custom_resul 注意: この関数では行ベースのフォーマットのみがサポートされています。 - ## formatRowNoNewline {#formatrownonewline} 任意の式を、与えられたフォーマットを使って文字列に変換します。`formatRow` との違いは、この関数は末尾にある `\n` があればそれを取り除く点です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md index a184c28c9a1..7f2e8eabc97 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md @@ -10,7 +10,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # ユーザー定義関数 (UDF) {#executable-user-defined-functions} @@ -48,12 +47,8 @@ ClickHouse は、任意の外部実行可能プログラムやスクリプトを コマンドは `STDIN` から引数を読み込み、結果を `STDOUT` に出力しなければなりません。コマンドは引数を逐次的に処理する必要があります。つまり、あるチャンクの引数を処理した後、次のチャンクを待機しなければなりません。 - - ## 実行可能なユーザー定義関数 {#executable-user-defined-functions} - - ## 例 {#examples} ### インラインスクリプトを用いた UDF {#udf-inline} @@ -193,7 +188,6 @@ SELECT test_function_python(toUInt64(2)); XML または YAML の設定を使用して、名前付き引数を取り、フォーマットに [JSONEachRow](/interfaces/formats/JSONEachRow) を指定した `test_function_sum_json` を作成します。 - ファイル `test_function.xml`(パス設定がデフォルトの場合は `/etc/clickhouse-server/test_function.xml`)。 @@ -332,7 +326,6 @@ if __name__ == "__main__": SELECT test_function_parameter_python(1)(2); ``` - ```text title="Result" ┌─test_function_parameter_python(1)(2)─┐ │ パラメータ1の値2 │ @@ -412,15 +405,12 @@ SELECT test_shell(number) FROM numbers(10); └────────────────────┘ ``` - ## エラー処理 {#error-handling} 一部の関数は、データが無効な場合に例外をスローすることがあります。 この場合、クエリは中断され、エラーメッセージがクライアントに返されます。 分散処理では、いずれかのサーバーで例外が発生すると、他のサーバーもクエリの中断を試みます。 - - ## 引数式の評価 {#evaluation-of-argument-expressions} ほとんどのプログラミング言語では、特定の演算子において、引数の一方が評価されないことがあります。 @@ -428,8 +418,6 @@ SELECT test_shell(number) FROM numbers(10); ClickHouse では、関数(演算子)の引数は常に評価されます。 これは、行ごとに個別に計算するのではなく、列の一部をまとめて一度に評価するためです。 - - ## 分散クエリ処理における関数の実行 {#performing-functions-for-distributed-query-processing} 分散クエリ処理では、クエリ処理のできるだけ多くの段階をリモートサーバー上で実行し、残りの段階(中間結果のマージとそれ以降のすべて)はリクエスト元サーバーで実行します。 @@ -446,13 +434,9 @@ ClickHouse では、関数(演算子)の引数は常に評価されます。 クエリ内の関数がリクエスト元サーバーで実行されるようになっていても、それをリモートサーバー上で実行する必要がある場合は、その関数を `any` 集約関数でラップするか、`GROUP BY` 句のキーに追加することができます。 - - ## SQL ユーザー定義関数 {#sql-user-defined-functions} ラムダ式を用いてカスタム関数を作成するには、[CREATE FUNCTION](../statements/create/function.md) ステートメントを使用します。これらの関数を削除するには、[DROP FUNCTION](../statements/drop.md#drop-function) ステートメントを使用します。 - - ## 関連コンテンツ {#related-content} - [ClickHouse Cloudのユーザー定義関数](https://clickhouse.com/blog/user-defined-functions-clickhouse-udfs) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md index 99e81c91790..2a389a9cd52 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md @@ -6,8 +6,6 @@ title: 'ULID を扱う関数' doc_type: 'reference' --- - - # ULID を扱うための関数 {#functions-for-working-with-ulids} :::note @@ -20,7 +18,6 @@ doc_type: 'reference' 詳細は https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md を参照してください。 */ } - {/*AUTOGENERATED_START*/ } ## ULIDStringToDateTime {#ULIDStringToDateTime} @@ -58,7 +55,6 @@ SELECT ULIDStringToDateTime('01GNB2S2FGN2P93QPXDNB4EN2R') └────────────────────────────────────────────────────┘ ``` - ## generateULID {#generateULID} 導入バージョン: v23.2 @@ -107,7 +103,6 @@ SELECT generateULID(1), generateULID(2) {/*AUTOGENERATED_END*/ } - ## 関連項目 {#see-also} - [UUID](../../sql-reference/functions/uuid-functions.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md index dec82f9a55f..196b745e74a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md @@ -128,7 +128,6 @@ SELECT └──────────────────────────┴──────────────────────────┘ ``` - ## データセットを扱う演算子 {#operators-for-working-with-data-sets} [IN 演算子](../../sql-reference/operators/in.md)および[EXISTS 演算子](../../sql-reference/operators/exists.md)を参照してください。 @@ -203,7 +202,6 @@ SELECT number AS a FROM numbers(10) WHERE a > ANY (SELECT number FROM numbers(3, └───┘ ``` - ## 日付と時刻を扱う演算子 {#operators-for-working-with-dates-and-times} ### EXTRACT {#extract} @@ -270,7 +268,6 @@ FROM test.Orders; さらに多くの例については、[tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql) を参照してください。 - ### INTERVAL {#interval} [Date](../../sql-reference/data-types/date.md) 型および [DateTime](../../sql-reference/data-types/datetime.md) 型の値との算術演算で使用するための [Interval](../../sql-reference/data-types/special-data-types/interval.md) 型の値を作成します。 @@ -345,7 +342,6 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul') AS time, time + 60 * 6 * [Interval](../../sql-reference/data-types/special-data-types/interval.md) データ型 * [toInterval](/sql-reference/functions/type-conversion-functions#tointervalyear) 型変換関数 - ## 論理AND演算子 {#logical-and-operator} 構文 `SELECT a AND b` — 関数 [and](/sql-reference/functions/logical-functions#and) を用いて、`a` と `b` の論理積を計算します。 @@ -382,7 +378,6 @@ END `transform` 関数は `NULL` を処理できません。 - ## 連結演算子 {#concatenation-operator} `s1 || s2` – `concat(s1, s2)` 関数。 @@ -433,7 +428,6 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` - ### IS NOT NULL {#is_not_null} * [Nullable](../../sql-reference/data-types/nullable.md) 型の値に対しては、`IS NOT NULL` 演算子は次を返します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md index d3a14022076..b37f3d92f0d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md @@ -33,7 +33,6 @@ ALTER [TEMPORARY] TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COM これらのアクションについては、以下で詳しく説明します。 - ## ADD COLUMN(列を追加) {#add-column} ```sql @@ -70,7 +69,6 @@ ToDrop UInt32 Added3 UInt32 ``` - ## DROP COLUMN {#drop-column} ```sql @@ -91,7 +89,6 @@ DROP COLUMN [IF EXISTS] name ALTER TABLE visits DROP COLUMN browser ``` - ## 列名を変更する {#rename-column} ```sql @@ -108,7 +105,6 @@ RENAME COLUMN [IF EXISTS] name to new_name ALTER TABLE visits RENAME COLUMN webBrowser TO browser ``` - ## CLEAR COLUMN {#clear-column} ```sql @@ -125,7 +121,6 @@ CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() ``` - ## COMMENT 列 {#comment-column} ```sql @@ -144,7 +139,6 @@ COMMENT COLUMN [IF EXISTS] name 'テキストコメント' ALTER TABLE visits COMMENT COLUMN browser 'この列はサイトへのアクセスに使用されたブラウザを表示します。' ``` - ## MODIFY COLUMN {#modify-column} ```sql @@ -224,7 +218,6 @@ DESCRIBE users; `Nullable` カラムを `Non-Nullable` に変更する際は注意してください。カラム内に `NULL` 値が含まれていないことを必ず確認してください。そうでない場合、そのカラムから読み込む際に問題が発生します。その場合の回避策としては、`KILL MUTATION` を実行して mutation を停止し、カラムを `Nullable` 型に戻してください。 ::: - ## MODIFY COLUMN REMOVE {#modify-column-remove} 次の列プロパティのいずれかを削除します: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`, `SETTINGS`。 @@ -247,7 +240,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; * [REMOVE TTL](ttl.md) - ## MODIFY COLUMN MODIFY SETTING — 列設定の変更 {#modify-column-modify-setting} 列の設定を変更します。 @@ -266,7 +258,6 @@ ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING name=value,...; ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING max_compress_block_size = 1048576; ``` - ## MODIFY COLUMN RESET SETTING {#modify-column-reset-setting} 列の設定をリセットします。また、テーブルの CREATE クエリ内の列式から、その設定の宣言も削除します。 @@ -285,7 +276,6 @@ ALTER TABLE テーブル名 MODIFY COLUMN カラム名 RESET SETTING 設定名,. ALTER TABLE table_name MODIFY COLUMN column_name RESET SETTING max_compress_block_size; ``` - ## MATERIALIZE COLUMN {#materialize-column} `DEFAULT` または `MATERIALIZED` の値式を持つカラムをマテリアライズします。`ALTER TABLE table_name ADD COLUMN column_name MATERIALIZED` を使用してマテリアライズされたカラムを追加する場合、マテリアライズされた値を持たない既存の行は自動的には埋められません。`MATERIALIZE COLUMN` 文は、`DEFAULT` または `MATERIALIZED` の式が追加または更新された後(この操作はメタデータのみを更新し、既存データは変更しない)、既存のカラムデータを書き換えるために使用できます。ソートキー内のカラムをマテリアライズすることは、ソート順を破壊しうるため無効な操作である点に注意してください。 @@ -346,7 +336,6 @@ SELECT groupArray(x), groupArray(s) FROM tmp; * [MATERIALIZED](/sql-reference/statements/create/view#materialized-view) - ## 制限事項 {#limitations} `ALTER` クエリでは、ネストされたデータ構造内の個々の要素(カラム)の作成および削除はできますが、ネストされたデータ構造全体の作成や削除はできません。ネストされたデータ構造を追加するには、`name.nested_name` のような名前と型 `Array(T)` を持つカラムを追加します。ネストされたデータ構造は、「ドットの前のプレフィックスが同じ名前」を持つ複数の配列カラムと同等です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md index f05f68cd797..c9ba623bb56 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md @@ -8,21 +8,16 @@ keywords: ['ALTER TABLE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER TABLE ... MODIFY COMMENT {#alter-table-modify-comment} テーブルコメントを、コメントが事前に設定されていたかどうかに関係なく追加、変更、または削除します。コメントの変更は、[`system.tables`](../../../operations/system-tables/tables.md) と `SHOW CREATE TABLE` クエリの両方に反映されます。 - - ## 構文 {#syntax} ```sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' ``` - ## 例 {#examples} コメント付きテーブルを作成するには、次のようにします。 @@ -78,7 +73,6 @@ WHERE database = currentDatabase() AND name = 'table_with_comment'; └─────────┘ ``` - ## 注意事項 {#caveats} Replicated テーブルの場合、コメントはレプリカごとに異なる場合があります。 @@ -86,8 +80,6 @@ Replicated テーブルの場合、コメントはレプリカごとに異なる この機能はバージョン 23.9 以降で利用可能です。以前の ClickHouse のバージョンでは使用できません。 - - ## 関連コンテンツ {#related-content} - [`COMMENT`](/sql-reference/statements/create/table#comment-clause) 句 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md index d08dff0d57c..198896c936a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md @@ -8,21 +8,16 @@ keywords: ['ALTER DATABASE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER DATABASE ... MODIFY COMMENT {#alter-database-modify-comment} データベースのコメントを、あらかじめ設定されていたかどうかに関係なく追加、変更、または削除します。コメントの変更は、[`system.databases`](/operations/system-tables/databases.md) と `SHOW CREATE DATABASE` クエリの両方に反映されます。 - - ## 構文 {#syntax} ```sql ALTER DATABASE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' ``` - ## 例 {#examples} コメント付きの `DATABASE` を作成するには: @@ -73,7 +68,6 @@ WHERE name = 'database_with_comment'; └─────────┘ ``` - ## 関連コンテンツ {#related-content} - [`COMMENT`](/sql-reference/statements/create/table#comment-clause) 句 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md index 0a7e93c8ec8..6f92bcfa059 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... DELETE 文' doc_type: 'reference' --- - - # ALTER TABLE ... DELETE 文 {#alter-table-delete-statement} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] WHERE 句 filter_expr を満たす * [ALTER クエリの同期性](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * [mutations_sync](/operations/settings/settings.md/#mutations_sync) 設定 - ## 関連コンテンツ {#related-content} - ブログ記事: [ClickHouse における更新と削除の扱い](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md index 18596ca680b..ec59be55b9e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md @@ -7,8 +7,6 @@ title: 'ALTER' doc_type: 'reference' --- - - # ALTER {#alter} ほとんどの `ALTER TABLE` クエリは、テーブルの設定またはデータを変更します。 @@ -51,8 +49,6 @@ doc_type: 'reference' | [ALTER TABLE ... MODIFY COMMENT](/sql-reference/statements/alter/comment.md) | コメントがあらかじめ設定されていたかどうかに関係なく、テーブルのコメントを追加、変更、または削除します。 | | [ALTER NAMED COLLECTION](/sql-reference/statements/alter/named-collection.md) | [Named Collections](/operations/named-collections.md) を変更します。 | - - ## ミューテーション {#mutations} テーブルデータを変更するための `ALTER` クエリは、「ミューテーション」と呼ばれるメカニズムで実装されています。代表的なものは [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete.md) や [ALTER TABLE ... UPDATE](/sql-reference/statements/alter/update.md) です。これらは [MergeTree](/engines/table-engines/mergetree-family/index.md) テーブルにおけるマージと類似した非同期のバックグラウンドプロセスで、新しい「ミューテートされた」パーツのバージョンを生成します。 @@ -66,8 +62,6 @@ doc_type: 'reference' 完了したミューテーションのエントリはすぐには削除されません(保持されるエントリ数は `finished_mutations_to_keep` ストレージエンジンパラメータで決まります)。古いミューテーションエントリから順に削除されます。 - - ## ALTER クエリの同期性 {#synchronicity-of-alter-queries} 非レプリケートテーブルに対しては、すべての `ALTER` クエリは同期的に実行されます。レプリケートテーブルに対しては、クエリは該当するアクションの指示を `ZooKeeper` に追加するだけで、アクション自体は可能な限り早く実行されます。ただし、クエリ側で、これらのアクションがすべてのレプリカで完了するまで待機させることも可能です。 @@ -82,8 +76,6 @@ doc_type: 'reference' すべての `ALTER` クエリについて、`alter_sync = 2` であり、かつ一部のレプリカが `replication_wait_for_inactive_replica_timeout` 設定で指定された時間を超えて非アクティブな状態の場合、`UNFINISHED` という例外がスローされます。 ::: - - ## 関連コンテンツ {#related-content} - ブログ: [ClickHouse における更新と削除の扱い](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md index cd5a1f60b2c..5b05e6ffab6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md @@ -21,8 +21,6 @@ doc_type: 'reference' プロジェクションの内部動作に関する、より技術的な詳細はこの[ページ](/guides/best-practices/sparse-primary-indexes.md/#option-3-projections)を参照してください。 - - ## プライマリキーを使わずにフィルタリングする例 {#example-filtering-without-using-primary-keys} テーブルの作成: @@ -79,7 +77,6 @@ LIMIT 2 SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## 事前集計クエリの例 {#example-pre-aggregation-query} Projection を使用したテーブルの作成: @@ -157,7 +154,6 @@ GROUP BY user_agent SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## `_part_offset` フィールドを用いた通常のプロジェクション {#normal-projection-with-part-offset-field} `_part_offset` フィールドを利用する通常のプロジェクションを持つテーブルの作成: @@ -202,31 +198,22 @@ WHERE _part_starting_offset + _part_offset IN ( SETTINGS enable_shared_storage_snapshot_in_query = 1 ``` - # プロジェクションの操作 {#manipulating-projections} [プロジェクション](/engines/table-engines/mergetree-family/mergetree.md/#projections)に対して、次の操作を実行できます。 - - ## PROJECTION を追加する {#add-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT [GROUP BY] [ORDER BY] )` - テーブルのメタデータに PROJECTION の定義を追加します。 - - ## DROP PROJECTION {#drop-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - テーブルのメタデータからプロジェクションの定義を削除し、ディスクからプロジェクションファイルを削除します。[mutation](/sql-reference/statements/alter/index.md#mutations) として実装されています。 - - ## MATERIALIZE PROJECTION {#materialize-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - このクエリは、パーティション `partition_name` 内でプロジェクション `name` を再構築します。[mutation](/sql-reference/statements/alter/index.md#mutations) として実装されています。 - - ## CLEAR PROJECTION {#clear-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - 定義は削除せずに、ディスクからプロジェクションファイルを削除します。[mutation](/sql-reference/statements/alter/index.md#mutations)として実装されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md index d3416e2b165..c96567497e1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md @@ -8,32 +8,22 @@ toc_hidden_folder: true doc_type: 'reference' --- - - # データスキッピングインデックスの操作 {#manipulating-data-skipping-indices} 次の操作を行うことができます。 - - ## ADD INDEX {#add-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - テーブルのメタデータにインデックス定義を追加します。 - - ## DROP INDEX {#drop-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - テーブルのメタデータからインデックスの定義を削除し、ディスク上のインデックスファイルも削除します。これは [mutation](/sql-reference/statements/alter/index.md#mutations) として実装されています。 - - ## MATERIALIZE INDEX {#materialize-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - 指定された `partition_name` に対してセカンダリインデックス `name` を再構築します。[mutation](/sql-reference/statements/alter/index.md#mutations) として実装されています。`IN PARTITION` 句を省略した場合、テーブル全体のデータに対してインデックスを再構築します。 - - ## CLEAR INDEX {#clear-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - セカンダリインデックスの定義は残したまま、そのファイルをディスクから削除します。[mutation](/sql-reference/statements/alter/index.md#mutations) として実装されています。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md index 9fc0afd6c9e..455ef9e0d47 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... UPDATE 文' doc_type: 'reference' --- - - # ALTER TABLE ... UPDATE 文 {#alter-table-update-statements} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN P * [ALTER クエリの同期性](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * [mutations_sync](/operations/settings/settings.md/#mutations_sync) 設定 - ## 関連コンテンツ {#related-content} - ブログ記事: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md index 0f65528c9ad..fb34c3119c4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md @@ -30,7 +30,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] `ALTER USER` を使用するには、[ALTER USER](../../../sql-reference/statements/grant.md#access-management) 権限が必要です。 - ## GRANTEES 句 {#grantees-clause} このユーザー自身が `GRANT OPTION` 付きで必要なすべてのアクセス権を付与されていることを条件として、このユーザーから [権限](../../../sql-reference/statements/grant.md#privileges) を受け取ることが許可されているユーザーまたはロールを指定します。`GRANTEES` 句のオプションは次のとおりです: @@ -44,8 +43,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] 詳しくは [GRANT の構文](../../../sql-reference/statements/grant.md#granting-privilege-syntax) を参照してください。 - - ## 例 {#examples} 割り当てられたロールをデフォルトに設定する: @@ -106,7 +103,6 @@ ALTER USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by ' ALTER USER user1 RESET AUTHENTICATION METHODS TO NEW ``` - ## VALID UNTIL 句 {#valid-until-clause} 認証方式に対して、有効期限の日付と(必要に応じて)時刻を指定できます。パラメータとして文字列値を受け取ります。日時には `YYYY-MM-DD [hh:mm:ss] [timezone]` 形式の使用を推奨します。デフォルトでは、このパラメータの値は `'infinity'` です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md index 1ab2a69553b..13ce2576076 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... MODIFY QUERY ステートメント' doc_type: 'reference' --- - - # ALTER TABLE ... MODIFY QUERY ステートメント {#alter-table-modify-query-statement} `ALTER TABLE ... MODIFY QUERY` ステートメントを使用すると、インジェスト処理を中断することなく、[マテリアライズドビュー](/sql-reference/statements/create/view#materialized-view) 作成時に指定した `SELECT` クエリを変更できます。 @@ -92,7 +90,6 @@ ALTER TABLE mv MODIFY QUERY GROUP BY ts, event_type, browser; ``` - INSERT INTO events SELECT Date '2020-01-03' + interval number * 900 second, ['imp', 'click'][number%2+1], @@ -172,7 +169,6 @@ browser この方法は非常に制限的であり、新しいカラムを追加することなく`SELECT`セクションのみを変更することができます。 ``` - ```sql CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; @@ -204,7 +200,6 @@ SELECT * FROM mv; └───┘ ``` - ## ALTER TABLE ... MODIFY REFRESH ステートメント {#alter-table--modify-refresh-statement} `ALTER TABLE ... MODIFY REFRESH` ステートメントは、[リフレッシュ可能なマテリアライズドビュー](../create/view.md#refreshable-materialized-view)のリフレッシュパラメーターを変更します。詳しくは[リフレッシュパラメーターの変更](../create/view.md#changing-refresh-parameters)を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md index 55980937cb7..1153aaead8e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md @@ -9,8 +9,6 @@ doc_type: 'reference' `CHECK GRANT` クエリは、現在のユーザーまたはロールに特定の権限が付与されているかどうかを確認するために使用します。 - - ## 構文 {#syntax} クエリの基本構文は以下のとおりです。 @@ -21,7 +19,6 @@ CHECK GRANT 権限[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|tabl * `privilege` — 権限のタイプ。 - ## 例 {#examples} ユーザーにその権限が付与されている場合、レスポンスの `check_grant` は `1` になります。付与されていない場合、レスポンスの `check_grant` は `0` になります。 @@ -50,6 +47,5 @@ CHECK GRANT SELECT(col2) ON table_2; └────────┘ ``` - ## ワイルドカード {#wildcard} 権限を指定する際には、テーブル名やデータベース名の代わりにアスタリスク(`*`)を使用できます。ワイルドカードのルールについては [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md index 72b962b6aff..2517ef6f5f1 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md @@ -17,8 +17,6 @@ ClickHouse における `CHECK TABLE` クエリは、特定のテーブルまた このクエリはシステムのパフォーマンスを向上させるものではなく、何をしているか確信が持てない場合には実行すべきではありません。 ::: - - ## 構文 {#syntax} クエリの基本的な構文は次のとおりです。 @@ -55,7 +53,6 @@ CHECK TABLE table_name [PARTITION partition_expression | PART part_name] [FORMAT `*Log` ファミリーのエンジンは、障害発生時の自動データ復旧を提供しません。`CHECK TABLE` クエリを使用して、データ損失をタイムリーに検知してください。 - ## 例 {#examples} デフォルトでは、`CHECK TABLE` クエリはテーブル全体の総合的なチェック結果を表示します。 @@ -152,7 +149,6 @@ FORMAT PrettyCompactMonoBlock SETTINGS check_query_single_value_result = 0 ``` - ```text ┌─database─┬─table────┬─part_path───┬─is_passed─┬─message─┐ │ default │ t2 │ all_1_95_3 │ 1 │ │ @@ -168,7 +164,6 @@ SETTINGS check_query_single_value_result = 0 └──────────┴──────────┴─────────────┴───────────┴─────────┘ ``` - ## データが破損している場合 {#if-the-data-is-corrupted} テーブルが破損している場合は、破損していないデータを別のテーブルにコピーできます。そのためには、次の手順を実行します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md index 64e4da80084..b30f8861053 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md @@ -17,7 +17,6 @@ ROLE を作成する [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLU [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...] ``` - ## ロールの管理 {#managing-roles} 1人のユーザーには複数のロールを割り当てることができます。ユーザーは、[SET ROLE](../../../sql-reference/statements/set-role.md) ステートメントを使用して、自分に割り当てられたロールを任意の組み合わせで適用できます。最終的な権限の範囲は、適用されたすべてのロールが持つ権限を統合した集合になります。ユーザーアカウントに直接付与された権限がある場合、それらもロールによって付与された権限と統合されます。 @@ -28,8 +27,6 @@ ROLE を作成する [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLU ロールを削除するには、[DROP ROLE](/sql-reference/statements/drop#drop-role) ステートメントを使用します。削除されたロールは、それが割り当てられていたすべてのユーザーおよびロールから自動的に取り消されます。 - - ## 例 {#examples} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md index c3fcc7c075e..cc6426b6eaa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md @@ -24,13 +24,10 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` - ## USING 句 {#using-clause} 行をフィルタリングする条件を指定できます。条件がその行に対して 0 以外の値になると、その行はユーザーに表示されます。 - - ## TO 句 {#to-clause} `TO` 句では、このポリシーを適用するユーザーおよびロールのリストを指定できます。例えば、`CREATE ROW POLICY ... TO accountant, john@localhost` のようになります。 @@ -49,8 +46,6 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ::: - - ## AS 句 {#as-clause} 同じテーブルおよび同じユーザーに対して、同時に複数のポリシーを有効にすることができます。そのため、複数のポリシーに含まれる条件を組み合わせる方法が必要になります。 @@ -96,13 +91,10 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio ユーザー `peter` に対しては、`b=1` かつ `c=2` の両方を満たす場合にのみ table1 の行を参照できるように設定しつつ、 mydb 内の他のテーブルには、そのユーザーに対して `b=1` のポリシーのみが適用されるようにします。 - ## ON CLUSTER 句 {#on-cluster-clause} クラスター上で行ポリシーを作成できるようにします。[Distributed DDL](../../../sql-reference/distributed-ddl.md) を参照してください。 - - ## 例 {#examples} `CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md index 81fa96529dc..e80d15c0973 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md @@ -16,7 +16,6 @@ import TabItem from '@theme/TabItem'; デフォルトでは、テーブルは現在のサーバー上にのみ作成されます。分散 DDL クエリは `ON CLUSTER` 句として実装されており、[別途説明されています](../../../sql-reference/distributed-ddl.md)。 - ## 構文形式 {#syntax-forms} ### 明示的なスキーマ指定 {#with-explicit-schema} @@ -100,7 +99,6 @@ SELECT x, toTypeName(x) FROM t1; └───┴───────────────┘ ``` - ## NULL または NOT NULL 修飾子 {#null-or-not-null-modifiers} 列定義におけるデータ型の後ろに付ける `NULL` および `NOT NULL` 修飾子は、その列を [Nullable](/sql-reference/data-types/nullable) 型にできるかどうかを指定します。 @@ -109,8 +107,6 @@ SELECT x, toTypeName(x) FROM t1; [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable) 設定も参照してください。 - - ## デフォルト値 {#default_values} カラム定義では、`DEFAULT expr`、`MATERIALIZED expr`、`ALIAS expr` の形式でデフォルト値の式を指定できます。例: `URLDomain String DEFAULT domain(URL)`。 @@ -217,7 +213,6 @@ FROM test FORMAT Vertical; ``` - Row 1: ────── id: 1 @@ -263,7 +258,6 @@ SELECT * FROM test SETTINGS asterisk_include_alias_columns=1; └────┴────────────┴──────────┘ ```` - ## プライマリキー {#primary-key} テーブル作成時に[プライマリキー](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries)を定義できます。プライマリキーは次の 2 通りの方法で指定できます。 @@ -294,7 +288,6 @@ PRIMARY KEY(expr1[, expr2,...]); 1 つのクエリで両方の方法を併用することはできません。 ::: - ## 制約 {#constraints} カラムの説明に加えて、制約を定義することもできます。 @@ -339,13 +332,10 @@ ORDER BY (name_len, name); `ASSUME CONSTRAINT` は **制約を強制しません**。単にオプティマイザに対して、その制約が成り立つことを知らせるだけです。もし制約が実際には成り立たない場合、クエリ結果が不正確になる可能性があります。したがって、制約が正しいと確信できる場合にのみ `ASSUME CONSTRAINT` を使用すべきです。 - ## TTL Expression {#ttl-expression} 値の保持期間を定義します。MergeTree ファミリーのテーブルに対してのみ指定できます。詳細については、[列およびテーブルの TTL](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) を参照してください。 - - ## 列圧縮コーデック {#column_compression_codec} デフォルトでは、セルフマネージド版の ClickHouse では `lz4` 圧縮が、ClickHouse Cloud では `zstd` 圧縮が適用されます。 @@ -427,7 +417,6 @@ ClickHouse は、汎用 codec と用途特化 codec の両方をサポートし `DEFLATE_QPL` — Intel® Query Processing Library によって実装された [Deflate 圧縮アルゴリズム](https://github.com/intel/qpl) です。いくつかの制限があります。 - - DEFLATE_QPL はデフォルトでは無効になっており、設定 [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec) を有効化した後にのみ使用できます。 - DEFLATE_QPL には、SSE 4.2 命令でコンパイルされた ClickHouse ビルドが必要です(デフォルトでそのようにビルドされています)。詳細は [Build Clickhouse with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl) を参照してください。 - DEFLATE_QPL は、システムに Intel® IAA (In-Memory Analytics Accelerator) オフロードデバイスがある場合に最も効果的に動作します。詳細は [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) および [Benchmark with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl) を参照してください。 @@ -455,8 +444,6 @@ ClickHouse は、汎用 codec と用途特化 codec の両方をサポートし #### FPC {#fpc} - - `FPC(level, float_size)` - 2種類の予測器のうち優れている方を用いて系列中の次の浮動小数点値を繰り返し予測し、その予測値と実際の値を XOR し、その結果を先頭ゼロ圧縮するコーデックです。Gorilla と同様に、ゆっくり変化する浮動小数点値の系列を保存する場合に効率的です。64ビット値(double)の場合、FPC は Gorilla より高速であり、32ビット値の場合は状況によって異なります。`level` に指定可能な値は 1-28 で、デフォルト値は 12 です。`float_size` に指定可能な値は 4, 8 で、型が Float の場合のデフォルト値は `sizeof(type)` です。それ以外のすべてのケースでは 4 になります。アルゴリズムの詳細な説明については [High Throughput Compression of Double-Precision Floating-Point Data](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf) を参照してください。 #### T64 {#t64} @@ -522,7 +509,6 @@ CREATE TABLE mytable ENGINE = MergeTree ORDER BY x; ``` - ## 一時テーブル {#temporary-tables} :::note @@ -553,7 +539,6 @@ CREATE [OR REPLACE] TEMPORARY TABLE [IF NOT EXISTS] table_name 一時テーブルの代わりに、[ENGINE = Memory](../../../engines/table-engines/special/memory.md) を使用したテーブルを利用することもできます。 - ## REPLACE TABLE {#replace-table} `REPLACE` ステートメントを使用すると、テーブルを[アトミックに](/concepts/glossary#atomicity)更新できます。 @@ -720,7 +705,6 @@ WHERE CounterID <12345; - ## COMMENT 句 {#comment-clause} テーブル作成時にコメントを追加できます。 @@ -753,7 +737,6 @@ SELECT name, comment FROM system.tables WHERE name = 't1'; └──────┴─────────────────────┘ ``` - ## 関連コンテンツ {#related-content} - ブログ記事: [スキーマとコーデックによる ClickHouse の最適化](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md index e8539e5f4f2..d266840ae12 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md @@ -26,7 +26,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus `ON CLUSTER` 句を使用すると、クラスター全体にユーザーを作成できます。詳しくは [Distributed DDL](../../../sql-reference/distributed-ddl.md) を参照してください。 - ## 識別 {#identification} ユーザーを識別する方法には、以下のようなものがあります: @@ -73,7 +72,6 @@ ClickHouse Cloud では、パスワードは既定で次の複雑性要件を満 * 少なくとも1文字の特殊文字を含むこと ::: - ## 例 {#examples} 1. 次のユーザー名は `name1` であり、パスワードは不要です。つまり、当然ながらセキュリティはほとんど確保されません。 @@ -162,14 +160,10 @@ ClickHouse Cloud では、パスワードは既定で次の複雑性要件を満 CREATE USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'' ``` - - Notes: 1. 古いバージョンの ClickHouse では、複数の認証方式を用いる構文をサポートしていない場合があります。そのため、ClickHouse サーバーにそのようなユーザーが存在した状態で、これをサポートしないバージョンにダウングレードすると、そのユーザーは利用不能になり、一部のユーザー関連の操作が失敗します。正常にダウングレードするには、ダウングレード前にすべてのユーザーが単一の認証方式のみを持つように設定しておく必要があります。あるいは、適切な手順を踏まずにサーバーをダウングレードしてしまった場合は、問題のあるユーザーを削除する必要があります。 2. セキュリティ上の理由から、`no_password` は他の認証方式と同時に使用することはできません。したがって、クエリ内で `no_password` を指定できるのは、それが唯一の認証方式である場合に限られます。 - - ## ユーザーホスト {#user-host} ユーザーホストとは、ClickHouse サーバーへの接続を確立できるホストを指します。ホストはクエリ内の `HOST` セクションで次のように指定できます。 @@ -191,8 +185,6 @@ Notes: ClickHouse は `user_name@'address'` 全体を 1 つのユーザー名として扱います。そのため、技術的には同じ `user_name` に対して、`@` の後ろの指定が異なる複数のユーザーを作成できます。ただし、そのような運用は推奨しません。 ::: - - ## VALID UNTIL 句 {#valid-until-clause} 認証方式に対して、有効期限日と、必要に応じて有効期限の時刻を指定できます。文字列をパラメーターとして受け取ります。日時の指定には `YYYY-MM-DD [hh:mm:ss] [timezone]` 形式を使用することを推奨します。デフォルトでは、このパラメーターは `'infinity'` です。 @@ -206,8 +198,6 @@ ClickHouse は `user_name@'address'` 全体を 1 つのユーザー名として - ```CREATE USER name1 VALID UNTIL '2025-01-01 12:00:00 `Asia/Tokyo`'``` - `CREATE USER name1 IDENTIFIED WITH plaintext_password BY 'no_expiration', bcrypt_password BY 'expiration_set' VALID UNTIL '2025-01-01''` - - ## GRANTEES 句 {#grantees-clause} このユーザーが、`GRANT OPTION` 付きで必要なすべてのアクセス権を付与されていることを条件に、このユーザーから [権限](../../../sql-reference/statements/grant.md#privileges) を付与されることが許可されているユーザーまたはロールを指定します。`GRANTEES` 句のオプションは次のとおりです。 @@ -221,8 +211,6 @@ ClickHouse は `user_name@'address'` 全体を 1 つのユーザー名として さらに詳しくは [GRANT ステートメントの権限に関する項目](../../../sql-reference/statements/grant.md#privileges) と [GRANT OPTION の説明](../../../sql-reference/statements/grant.md#granting-privilege-syntax) を参照してください。 - - ## 例 {#examples-1} パスワード `qwerty` で保護されたユーザーアカウント `mira` を作成します: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md index a454157c8c9..814649bb5d7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md @@ -11,13 +11,10 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import DeprecatedBadge from '@theme/badges/DeprecatedBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # CREATE VIEW {#create-view} 新しいビューを作成します。ビューには[通常ビュー](#normal-view)、[マテリアライズドビュー](#materialized-view)、[リフレッシュ可能なマテリアライズドビュー](#refreshable-materialized-view)、および[ウィンドウビュー](/sql-reference/statements/create/view#window-view)の種類があります。 - - ## 標準表示 {#normal-view} 構文: @@ -49,7 +46,6 @@ SELECT a, b, c FROM view SELECT a, b, c FROM (SELECT ...) ``` - ## パラメータ化ビュー {#parameterized-view} パラメータ化ビューは通常のビューと似ていますが、ただちには解決されないパラメータを指定して作成できます。これらのビューはテーブル関数で使用でき、その際はビュー名を関数名として指定し、パラメータ値をその引数として渡します。 @@ -64,7 +60,6 @@ CREATE VIEW view AS SELECT * FROM TABLE WHERE Column1={column1:datatype1} and Co SELECT * FROM view(column1=value1, column2=value2 ...) ``` - ## マテリアライズドビュー {#materialized-view} ```sql @@ -119,7 +114,6 @@ ClickHouse のマテリアライズドビューは、エラー発生時の動作 ビューを削除するには、[DROP VIEW](../../../sql-reference/statements/drop.md#drop-view) を使用します。`DROP TABLE` も VIEW に対して動作します。 - ## SQL セキュリティ {#sql_security} `DEFINER` と `SQL SECURITY` を使用すると、ビューの背後で実行されるクエリを実行する際に、どの ClickHouse ユーザーを使用するかを指定できます。 @@ -166,7 +160,6 @@ SQL SECURITY INVOKER AS SELECT ... ``` - ## ライブビュー {#live-view} @@ -175,8 +168,6 @@ AS SELECT ... 参考までに、旧ドキュメントは[こちら](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md)にあります。 - - ## リフレッシュ可能なマテリアライズドビュー {#refreshable-materialized-view} ```sql @@ -247,7 +238,6 @@ REFRESH EVERY 1 DAY OFFSET 2 HOUR RANDOMIZE FOR 1 HOUR -- 毎日、01:30 から `APPEND` モードでは、`SETTINGS all_replicas = 1` を使用して調整を無効化できます。これにより、レプリカは互いに独立してリフレッシュを実行します。この場合、ReplicatedMergeTree は必須ではありません。 - 非 `APPEND` モードでは、協調リフレッシュのみがサポートされます。非協調なリフレッシュを行いたい場合は、`Atomic` データベースと `CREATE ... ON CLUSTER` クエリを使用して、すべてのレプリカ上にリフレッシュ可能なマテリアライズドビューを作成します。 協調処理は Keeper を通じて行われます。znode のパスは、[default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) サーバー設定によって決定されます。 @@ -320,7 +310,6 @@ ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPEND ### その他の操作 {#other-operations} - すべてのリフレッシュ可能なマテリアライズドビューのステータスは、テーブル [`system.view_refreshes`](../../../operations/system-tables/view_refreshes.md) で確認できます。特に、(実行中であれば)リフレッシュの進捗状況、直近および次回のリフレッシュ時刻、リフレッシュが失敗した場合の例外メッセージが含まれます。 リフレッシュを手動で停止、開始、トリガー、キャンセルするには、[`SYSTEM STOP|START|REFRESH|WAIT|CANCEL VIEW`](../system.md#refreshable-materialized-views) を使用します。 @@ -331,8 +320,6 @@ ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPEND 豆知識: リフレッシュクエリは、リフレッシュ対象のビューから読み取ることができ、その場合はリフレッシュ前のバージョンのデータが見えます。これは、Conway's Game of Life(ライフゲーム)を実装できることを意味します: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA== ::: - - ## ウィンドウビュー {#window-view} @@ -394,7 +381,6 @@ CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTE 遅延して発火した際に出力される要素は、以前の計算結果が更新されたものとして扱う必要があります。ウィンドウの終了時に発火するのではなく、ウィンドウビューは遅延イベントが到着したタイミングで即座に発火します。そのため、同じウィンドウに対して複数の出力が生成されます。ユーザーはこれらの重複した結果を考慮に入れるか、重複排除する必要があります。 - `ALTER TABLE ... MODIFY QUERY` ステートメントを使用して、ウィンドウビューで指定されている `SELECT` クエリを変更できます。新しい `SELECT` クエリで得られるデータ構造は、`TO [db.]name` 句の有無にかかわらず、元の `SELECT` クエリと同一である必要があります。中間状態は再利用できないため、現在のウィンドウ内のデータは失われることに注意してください。 ### 新しいウィンドウの監視 {#monitoring-new-windows} @@ -465,14 +451,11 @@ Window View は次のようなシナリオで有用です。 * **Monitoring**: メトリクスログを時間単位で集計・計算し、その結果をターゲットテーブルに出力します。ダッシュボードはターゲットテーブルをソーステーブルとして利用できます。 * **Analyzing**: 時間ウィンドウ内のデータを自動的に集計および前処理します。これは大量のログを分析する際に有用です。前処理によって複数のクエリにおける繰り返し計算が不要になり、クエリのレイテンシを低減できます。 - ## 関連コンテンツ {#related-content} - ブログ: [ClickHouse における時系列データの扱い方](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) - ブログ: [ClickHouse を用いたオブザーバビリティソリューションの構築 第2部: トレース](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse) - - ## 一時ビュー {#temporary-views} ClickHouse は、以下の特徴を持つ **一時ビュー (temporary view)** をサポートします(該当する場合は一時テーブルと同様の挙動になります)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md index f589d86b2ca..bd76310dc7d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md @@ -16,7 +16,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete) コマンドが重い処理であるのと対比して、これは「軽量な `DELETE`」と呼ばれます。 - ## 例 {#examples} ```sql @@ -24,7 +23,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE DELETE FROM hits WHERE Title LIKE '%hello%'; ``` - ## 軽量 `DELETE` は即座にデータを削除しない {#lightweight-delete-does-not-delete-data-immediately} 軽量 `DELETE` は、行を削除済みとしてマークするだけで、即座に物理削除は行わない [mutation](/sql-reference/statements/alter#mutations) として実装されています。 @@ -35,24 +33,18 @@ mutation は削除済みとマークされた行を物理的には削除せず 予測可能な時間内にストレージからデータが削除されることを保証する必要がある場合は、テーブル設定 [`min_age_to_force_merge_seconds`](/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds) の利用を検討してください。あるいは、[ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete) コマンドを使用することもできます。`ALTER TABLE ... DELETE` を使用してデータを削除する場合、影響を受けるすべてのパーツを再作成するため、多くのリソースを消費し得る点に注意してください。 - - ## 大量のデータの削除 {#deleting-large-amounts-of-data} 大規模な削除操作は ClickHouse のパフォーマンスに悪影響を与える可能性があります。テーブルからすべての行を削除する場合は、[`TRUNCATE TABLE`](/sql-reference/statements/truncate) コマンドの使用を検討してください。 頻繁に削除を行うことが想定される場合は、[カスタムパーティションキー](/engines/table-engines/mergetree-family/custom-partitioning-key) の利用を検討してください。その場合は、[`ALTER TABLE ... DROP PARTITION`](/sql-reference/statements/alter/partition#drop-partitionpart) コマンドを使用して、そのパーティションに属するすべての行を高速に削除できます。 - - ## 軽量な `DELETE` の制限事項 {#limitations-of-lightweight-delete} ### プロジェクションを持つ軽量な `DELETE` {#lightweight-deletes-with-projections} デフォルトでは、プロジェクションを持つテーブルでは `DELETE` は動作しません。これは、プロジェクション内の行が `DELETE` 操作の影響を受ける可能性があるためです。ただし、この挙動を変更するための [MergeTree 設定](/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` が用意されています。 - - ## 軽量な `DELETE` を使用する際のパフォーマンス上の考慮事項 {#performance-considerations-when-using-lightweight-delete} **軽量な `DELETE` 文で大量のデータを削除すると、SELECT クエリのパフォーマンスに悪影響を及ぼす可能性があります。** @@ -64,8 +56,6 @@ mutation は削除済みとマークされた行を物理的には削除せず - 対象のテーブルが非常に多くのデータパートを持っている場合。 - Compact パート内に大量のデータがある場合。Compact パートでは、すべてのカラムが 1 つのファイルに格納されます。 - - ## 削除権限 {#delete-permissions} `DELETE` には `ALTER DELETE` 権限が必要です。特定のユーザーに対して特定のテーブルで `DELETE` 文を有効化するには、次のコマンドを実行します。 @@ -74,7 +64,6 @@ mutation は削除済みとマークされた行を物理的には削除せず GRANT ALTER DELETE ON db.table TO username; ``` - ## ClickHouse における軽量な DELETE の内部動作 {#how-lightweight-deletes-work-internally-in-clickhouse} 1. **影響を受ける行に「マスク」が適用される** @@ -103,8 +92,6 @@ GRANT ALTER DELETE ON db.table TO username; 上記のステップからわかるように、マスキング手法を用いた軽量な `DELETE` は、影響を受けるパーツについてすべてのカラムファイルを書き直さないため、従来の `ALTER TABLE ... DELETE` と比べてパフォーマンスが向上します。 - - ## 関連情報 {#related-content} - ブログ: [ClickHouse における更新と削除の処理](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md index 284667b34ea..c3490ff3969 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md @@ -58,7 +58,6 @@ Union ReadFromStorage (SystemNumbers) ``` - ## EXPLAIN の種類 {#explain-types} - `AST` — 抽象構文木 (Abstract Syntax Tree)。 @@ -101,7 +100,6 @@ EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); ExpressionList ``` - ### EXPLAIN SYNTAX {#explain-syntax} 構文解析後のクエリの抽象構文木 (AST) を表示します。 @@ -146,7 +144,6 @@ ALL INNER JOIN system.numbers AS __table2 ON __table1.number = __table2.number ALL INNER JOIN system.numbers AS __table3 ON __table2.number = __table3.number ``` - ### EXPLAIN QUERY TREE {#explain-query-tree} Settings: @@ -176,21 +173,21 @@ QUERY id: 0 TABLE id: 3, table_name: default.test_table ``` - ### EXPLAIN PLAN {#explain-plan} クエリプランのステップを出力します。 -設定: +Settings: * `header` — ステップの出力ヘッダーを表示します。デフォルト: 0。 * `description` — ステップの説明を表示します。デフォルト: 1。 -* `indexes` — 使用された索引と、適用された各索引ごとにフィルタリングされたパーツ数およびグラニュール数を表示します。デフォルト: 0。[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) テーブルでサポートされます。ClickHouse >= v25.9 では、このステートメントは `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0` と併用した場合にのみ有用な出力を返します。 -* `projections` — 解析されたすべてのプロジェクションと、プロジェクションの主キー条件に基づくパーツレベルでのフィルタリングへの影響を表示します。各プロジェクションについて、このセクションには、そのプロジェクションの主キーを使って評価されたパーツ数、行数、マーク数、範囲数といった統計情報が含まれます。また、プロジェクション自体からデータを読み込むことなく、このフィルタリングによりスキップされたデータパーツの数も示します。プロジェクションが実際に読み取りに使用されたか、あるいはフィルタリングのために解析されたのみかは、`description` フィールドから判別できます。デフォルト: 0。[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) テーブルでサポートされます。 -* `actions` — ステップ内のアクションに関する詳細情報を表示します。デフォルト: 0。 -* `json` — クエリプランのステップを [JSON](/interfaces/formats/JSON) 形式の行として表示します。デフォルト: 0。不要なエスケープを避けるため、[TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw) 形式の使用を推奨します。 -* `input_headers` - ステップの入力ヘッダーを表示します。デフォルト: 0。主に、入力・出力ヘッダーの不整合に関連する問題をデバッグする開発者にとって有用です。 -* `column_structure` - ヘッダー内のカラム名と型に加えて、そのカラムの構造も表示します。デフォルト: 0。主に、入力・出力ヘッダーの不整合に関連する問題をデバッグする開発者にとって有用です。 +* `indexes` — 使用された索引、それぞれの索引に対してフィルタリングされたパーツ数およびフィルタリングされたグラニュール数を表示します。デフォルト: 0。[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) テーブルでサポートされています。ClickHouse >= v25.9 以降、このステートメントは `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0` と併用した場合にのみ有用な出力を表示します。 +* `projections` — 解析されたすべての PROJECTION と、その PROJECTION のプライマリキー条件に基づくパーツレベルのフィルタリングへの影響を表示します。各 PROJECTION について、このセクションには、PROJECTION のプライマリキーを使用して評価されたパーツ数、行数、マーク数、レンジ数などの統計情報が含まれます。また、PROJECTION 自体を読み取ることなく、このフィルタリングによってスキップされたデータパーツの数も表示します。PROJECTION が実際に読み取りに使用されたのか、それともフィルタリングのために解析されたのみなのかは、`description` フィールドによって判別できます。デフォルト: 0。[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) テーブルでサポートされています。 +* `actions` — ステップのアクションに関する詳細情報を表示します。デフォルト: 0。 +* `json` — クエリプランのステップを [JSON](/interfaces/formats/JSON) 形式の 1 行として表示します。デフォルト: 0。[TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw) 形式を使用して不要なエスケープを避けることを推奨します。 +* `input_headers` — ステップの入力ヘッダーを表示します。デフォルト: 0。主に入力と出力のヘッダー不一致に関連する問題をデバッグする開発者にとって有用です。 +* `column_structure` — 名前と型に加えて、ヘッダー内のカラム構造も表示します。デフォルト: 0。主に入力と出力のヘッダー不一致に関連する問題をデバッグする開発者にとって有用です。 +* `distributed` — 分散テーブルまたは並列レプリカに対してリモートノード上で実行されたクエリプランを表示します。デフォルト: 0。 `json=1` の場合、ステップ名には一意なステップ識別子を含む追加のサフィックスが付きます。 @@ -211,10 +208,10 @@ Union ``` :::note -ステップおよびクエリのコスト見積もりには対応していません。 +ステップおよびクエリコストの見積もりには対応していません。 ::: -`json = 1` の場合、クエリプランは JSON 形式で表現されます。各ノードは、必ず `Node Type` と `Plans` というキーを持つ辞書型オブジェクトです。`Node Type` はステップ名を表す文字列であり、`Plans` は子ステップの記述を含む配列です。ノードの種類と設定に応じて、その他の任意のキーが追加される場合があります。 +`json = 1` のとき、クエリプランは JSON 形式で表現されます。各ノードは、常に `Node Type` と `Plans` というキーを持つ Dictionary です。`Node Type` はステップ名を表す文字列です。`Plans` は子ステップの説明を含む配列です。その他の任意のキーが、ノードの種類や設定に応じて追加される場合があります。 例: @@ -255,7 +252,7 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; ] ``` -`description` = 1 の場合、ステップに `Description` キーが追加されます。 +`description` = 1 を指定すると、`Description` キーがステップに追加されます。 ```json { @@ -264,9 +261,9 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; } ``` -`header` = 1 の場合、ステップに `Header` キーがカラム配列として追加されます。 +`header` = 1 の場合、`Header` キーがカラムの配列としてステップに追加されます。 -例: +例: ```sql EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; @@ -401,10 +398,9 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; ] ``` +`actions` = 1 の場合、どのキーが追加されるかはステップの種類によって異なります。 -`actions` = 1 の場合、追加されるキーはステップタイプによって異なります。 - -例: +例: ```sql EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; @@ -461,6 +457,50 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; ] ``` +`distributed` = 1 の場合、出力にはローカルのクエリプランだけでなく、リモートノード上で実行されるクエリプランも含まれます。これは分散クエリの分析やデバッグに役立ちます。 + +分散テーブルを用いた例: + +```sql +EXPLAIN distributed=1 SELECT * FROM remote('127.0.0.{1,2}', numbers(2)) WHERE number = 1; +``` + +```sql +Union + Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromSystemNumbers + Expression ((Project names + (Projection + Change column names to column identifiers))) + ReadFromRemote (Read from remote replica) + Expression ((Project names + Projection)) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromSystemNumbers +``` + +並列レプリカの例: + +```sql +SET enable_parallel_replicas = 2, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'default'; + +EXPLAIN distributed=1 SELECT sum(number) FROM test_table GROUP BY number % 4; +``` + +```sql +Expression ((Project names + Projection)) + MergingAggregated + Union + Aggregating + Expression ((Before GROUP BY + Change column names to column identifiers)) + ReadFromMergeTree (default.test_table) + ReadFromRemoteParallelReplicas + BlocksMarshalling + Aggregating + Expression ((Before GROUP BY + Change column names to column identifiers)) + ReadFromMergeTree (default.test_table) +``` + +どちらの例でも、クエリプランはローカルおよびリモートのステップを含む実行フロー全体を示します。 + ### EXPLAIN PIPELINE {#explain-pipeline} @@ -494,7 +534,6 @@ ExpressionTransform NumbersRange × 2 0 → 1 ``` - ### EXPLAIN ESTIMATE {#explain-estimate} クエリを実行する際に、テーブルから読み取られる推定行数、マーク数、およびパーツ数を表示します。[MergeTree](/engines/table-engines/mergetree-family/mergetree) ファミリーのテーブルで利用できます。 @@ -523,7 +562,6 @@ EXPLAIN ESTIMATE SELECT * FROM ttt; └──────────┴───────┴───────┴──────┴───────┘ ``` - ### EXPLAIN TABLE OVERRIDE {#explain-table-override} テーブル関数経由でアクセスされるテーブルスキーマに対して、テーブルオーバーライドを適用した結果を表示します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md index 886620cd257..49d9ec934ff 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md @@ -9,7 +9,6 @@ doc_type: 'reference' import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # GRANT 文 {#grant-statement} - ClickHouse のユーザーアカウントまたはロールに[権限](#privileges)を付与します。 @@ -30,7 +29,6 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta `WITH GRANT OPTION` 句は、`user` または `role` に `GRANT` クエリを実行する権限を付与します。ユーザーは、自分が持つスコープと同じ、またはそれよりも狭いスコープの権限を付与できます。 `WITH REPLACE OPTION` 句は、`user` または `role` に対する既存の権限を新しい権限に置き換えます。指定しない場合は、権限が追加されます。 - ## ロール割り当ての構文 {#assigning-role-syntax} ```sql @@ -43,7 +41,6 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US `WITH ADMIN OPTION` 句は、`user` または `role` に [ADMIN OPTION](#admin-option) 権限を付与します。 `WITH REPLACE OPTION` 句は、`user` または `role` に対して既存のロールを新しいロールに置き換えます。指定されていない場合は、既存のロールにロールを追加します。 - ## GRANT CURRENT GRANTS 構文 {#grant-current-grants-syntax} ```sql @@ -57,7 +54,6 @@ GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|* `CURRENT GRANTS` ステートメントを使用すると、指定したユーザーまたはロールに、指定したすべての権限を付与できます。 権限が 1 つも指定されていない場合、そのユーザーまたはロールには、`CURRENT_USER` に対して利用可能なすべての権限が付与されます。 - ## 使用方法 {#usage} `GRANT` を使用するには、アカウントに `GRANT OPTION` 権限が付与されている必要があります。アカウントに付与されている権限の範囲内でのみ権限を付与できます。 @@ -87,7 +83,6 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION 1 つのクエリで複数のアカウントに複数の権限を付与できます。`GRANT SELECT, INSERT ON *.* TO john, robin` クエリは、アカウント `john` と `robin` に、サーバー上のすべてのデータベース内のすべてのテーブルに対して `INSERT` および `SELECT` クエリを実行することを許可します。 - ## ワイルドカードによる権限付与 {#wildcard-grants} 権限を指定する際、テーブル名やデータベース名の代わりにアスタリスク(`*`)を使用できます。たとえば、`GRANT SELECT ON db.* TO john` クエリは、データベース `db` 内のすべてのテーブルに対して、`john` が `SELECT` クエリを実行できるようにします。 @@ -139,7 +134,6 @@ GRANT SELECT ON *suffix TO john -- 誤り GRANT SELECT(foo) ON db.table* TO john -- 誤り ``` - ## 権限 {#privileges} 権限とは、ユーザーに対して特定の種類のクエリを実行することを許可するものです。 @@ -399,7 +393,6 @@ GRANT SELECT(x,y) ON db.table TO john この権限により、`john` は `db.table` の `x` カラムおよび/または `y` カラムのデータを含む任意の `SELECT` クエリを実行できます。たとえば、`SELECT x FROM db.table` です。`john` は `SELECT z FROM db.table` を実行することはできません。`SELECT * FROM db.table` も実行できません。このクエリを処理する際、ClickHouse は `x` や `y` であっても一切データを返しません。唯一の例外は、テーブルが `x` と `y` カラムのみを含む場合であり、この場合は ClickHouse はすべてのデータを返します。 - ### INSERT {#insert} [INSERT](../../sql-reference/statements/insert-into.md) クエリの実行を許可します。 @@ -418,7 +411,6 @@ GRANT INSERT(x,y) ON db.table TO john 付与された権限により、`john` は `db.table` の `x` カラムおよび `y` カラムの一方または両方にデータを挿入できます。 - ### ALTER {#alter} 以下の権限階層に基づいて [ALTER](../../sql-reference/statements/alter/index.md) クエリを実行できます。 @@ -510,7 +502,6 @@ GRANT CLUSTER ON *.* TO ``` - ### DROP {#drop} 次の権限階層に従って、[DROP](../../sql-reference/statements/drop.md) および [DETACH](../../sql-reference/statements/detach.md) クエリの実行を許可します。 @@ -740,7 +731,6 @@ GRANT CURRENT GRANTS(READ ON S3) TO alice * **部分的な取り消しはできません:** 付与したフィルターパターンの一部だけを取り消すことはできません。必要な場合は、付与全体をいったん取り消し、新しいパターンで改めて付与する必要があります。 * **ワイルドカードを使用した GRANT はできません:** `GRANT READ ON *('regexp')` のような、ワイルドカードのみを用いたパターンは使用できません。必ず特定のソースを指定する必要があります。 - ### dictGet {#dictget} - `dictGet` エイリアス: `dictHas`, `dictGetHierarchy`, `dictIsIn` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md index b924b4b8949..2a94ca82382 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md @@ -7,8 +7,6 @@ title: 'INSERT INTO ステートメント' doc_type: 'reference' --- - - # INSERT INTO ステートメント {#insert-into-statement} テーブルにデータを挿入します。 @@ -105,13 +103,10 @@ INSERT INTO table SETTINGS ... FORMAT format_name data_set ::: - ## 制約 {#constraints} テーブルに[制約](../../sql-reference/statements/create/table.md#constraints)がある場合、それらの式は挿入されたデータの各行に対して評価されます。これらの制約のいずれかが満たされない場合、サーバーは制約名と式を含む例外をスローし、クエリの実行は中断されます。 - - ## SELECT の結果の挿入 {#inserting-the-results-of-select} **構文** @@ -138,7 +133,6 @@ INSERT INTO x WITH y AS (SELECT * FROM numbers(10)) SELECT * FROM y; WITH y AS (SELECT * FROM numbers(10)) INSERT INTO x SELECT * FROM y; ``` - ## ファイルからのデータ挿入 {#inserting-data-from-a-file} **構文** @@ -197,7 +191,6 @@ INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV; ::: - ## テーブル関数を使った挿入 {#inserting-using-a-table-function} [テーブル関数](../../sql-reference/table-functions/index.md)で参照されるテーブルにデータを挿入できます。 @@ -227,7 +220,6 @@ SELECT * FROM simple_table; └─────┴───────────────────────┘ ``` - ## ClickHouse Cloud への挿入 {#inserting-into-clickhouse-cloud} デフォルトでは、ClickHouse Cloud のサービスは高可用性を実現するために複数のレプリカを持ちます。サービスに接続すると、これらのレプリカのいずれかに接続が確立されます。 @@ -242,15 +234,12 @@ SELECT .... SETTINGS select_sequential_consistency = 1; `select_sequential_consistency` を使用すると、ClickHouse Keeper(ClickHouse Cloud で内部的に使用されます)への負荷が増加し、サービスの負荷状況によってはパフォーマンスが低下する可能性がある点に注意してください。必要な場合を除き、この設定を有効にすることは推奨しません。推奨されるアプローチは、同一セッション内で読み取り/書き込みを実行するか、ネイティブプロトコルを利用する(そのためスティッキー接続をサポートする)クライアントドライバを使用することです。 - ## レプリケーション構成での挿入 {#inserting-into-a-replicated-setup} レプリケーション構成では、データは複製が完了した後に他のレプリカ上で参照できるようになります。`INSERT` の直後から、データのレプリケーション(他のレプリカへのダウンロード)が開始されます。これは、データが即座に共有ストレージに書き込まれ、レプリカがメタデータの変更をサブスクライブする ClickHouse Cloud とは挙動が異なります。 レプリケーション構成では、分散コンセンサスのために ClickHouse Keeper へのコミットが必要となるため、`INSERT` が完了するまでに比較的長い時間(1 秒程度)がかかる場合がある点に注意してください。ストレージに S3 を使用すると、さらに追加のレイテンシーが発生します。 - - ## パフォーマンス上の考慮事項 {#performance-considerations} `INSERT` は、入力データを主キーでソートし、パーティションキーによってパーティションに分割します。複数のパーティションに対して一度にデータを挿入すると、`INSERT` クエリのパフォーマンスが大きく低下する可能性があります。これを避けるには、次のようにします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md index 834695e3e62..895b0f73e76 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md @@ -7,14 +7,10 @@ title: 'PARALLEL WITH 句' doc_type: 'reference' --- - - # PARALLEL WITH 句 {#parallel-with-clause} 複数のステートメントを並列実行できます。 - - ## 構文 {#syntax} ```sql @@ -25,7 +21,6 @@ statement1 PARALLEL WITH statement2 [PARALLEL WITH statement3 ...] 多くの場合、同じステートメントを単純に順番に実行するよりも、並列に実行した方が高速になる場合があります。例えば、`statement1 PARALLEL WITH statement2 PARALLEL WITH statement3` は、`statement1; statement2; statement3` より高速になる可能性が高いです。 - ## 例 {#examples} 2 つのテーブルを並列に作成します: @@ -44,13 +39,10 @@ PARALLEL WITH DROP TABLE table2; ``` - ## 設定 {#settings} [max_threads](../../operations/settings/settings.md#max_threads) 設定は、起動されるスレッド数を制御します。 - - ## UNION との比較 {#comparison-with-union} `PARALLEL WITH` 句は、そのオペランドを並列実行するという点で [UNION](select/union.md) と少し似ています。ただし、いくつかの違いがあります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md index ebc4ebfb8ef..6351d18914b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md @@ -7,14 +7,10 @@ title: 'REVOKE ステートメント' doc_type: 'reference' --- - - # REVOKE ステートメント {#revoke-statement} ユーザーまたはロールから権限を取り消します。 - - ## 構文 {#syntax} **ユーザーから権限を取り消す** @@ -29,7 +25,6 @@ REVOKE [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.t REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] ``` - ## 説明 {#description} 権限を取り消す際には、取り消したい権限よりも広い範囲の権限を使って取り消すことができます。たとえば、ユーザーが `SELECT (x,y)` 権限を持っている場合、管理者はこの権限を取り消すために `REVOKE SELECT(x,y) ...`、`REVOKE SELECT * ...`、あるいは `REVOKE ALL PRIVILEGES ...` クエリを実行できます。 @@ -38,8 +33,6 @@ REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | rol 権限の一部だけを取り消すことができます。たとえば、ユーザーが `SELECT *.*` 権限を持っている場合、そのユーザーから、特定のテーブルまたはデータベースに対するデータ読み取り権限だけを取り消すことができます。 - - ## 例 {#examples} `john` ユーザーアカウントに、`accounts` 以外のすべてのデータベースから `SELECT` できる権限を付与します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md index e072c3a62c8..eaaa1caeabf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md @@ -7,21 +7,16 @@ keywords: ['APPLY', 'modifier'] doc_type: 'reference' --- - - # APPLY 修飾子 {#apply} > クエリの外側のテーブル式によって返される各行に対して、任意の関数を呼び出せるようにします。 - - ## 構文 {#syntax} ```sql SELECT APPLY( ) FROM [db.]table_name ``` - ## 例 {#example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md index ddca998b7ab..829b6914e1c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md @@ -6,8 +6,6 @@ title: 'ARRAY JOIN 句' doc_type: 'reference' --- - - # ARRAY JOIN 句 {#array-join-clause} 配列カラムを含むテーブルに対して、元の配列カラムの各要素ごとに 1 行を持つ新しいテーブルを生成し、その他のカラムの値は複製するという操作は一般的です。これは `ARRAY JOIN` 句が行う処理の基本的なケースです。 @@ -29,7 +27,6 @@ FROM * `ARRAY JOIN` - 通常、空配列は `JOIN` の結果に含まれません。 * `LEFT ARRAY JOIN` - `JOIN` の結果には、空配列を持つ行も含まれます。空配列に対する値は、その配列要素の型のデフォルト値(通常は 0、空文字列、または NULL)に設定されます。 - ## 基本的な ARRAY JOIN の例 {#basic-array-join-examples} ### ARRAY JOIN と LEFT ARRAY JOIN {#array-join-left-array-join-examples} @@ -151,7 +148,6 @@ ORDER BY Reaches DESC LIMIT 10 ``` - ```text ┌──ゴールID─┬─到達数─┬─訪問数─┐ │ 53225 │ 3214 │ 1097 │ @@ -167,7 +163,6 @@ LIMIT 10 └─────────┴─────────┴────────┘ ``` - ## エイリアスの使用 {#using-aliases} `ARRAY JOIN` 句では、配列にエイリアスを指定できます。この場合、配列要素にはそのエイリアスを用いてアクセスできますが、配列自体には元の名前でアクセスします。例: @@ -254,7 +249,6 @@ FROM arrays_test ARRAY JOIN arr AS a, [['a','b'],['c']] AS b SETTINGS enable_unaligned_array_join = 1; ``` - ```response ┌─s───────┬─arr─────┬─a─┬─b─────────┐ │ Hello │ [1,2] │ 1 │ ['a','b'] │ @@ -267,7 +261,6 @@ SETTINGS enable_unaligned_array_join = 1; └─────────┴─────────┴───┴───────────┘ ``` - ## ネストされたデータ構造での ARRAY JOIN {#array-join-with-nested-data-structure} `ARRAY JOIN` は [ネストされたデータ構造](../../../sql-reference/data-types/nested-data-structures/index.md) に対しても使用できます。 @@ -371,7 +364,6 @@ FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; ``` - ```response ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ @@ -382,7 +374,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; └───────┴─────┴─────┴─────────┴────────────┴─────┘ ``` - ## 実装の詳細 {#implementation-details} `ARRAY JOIN` を実行する際、クエリの実行順序は最適化されます。クエリ内では `ARRAY JOIN` は常に [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) 句より前に指定する必要がありますが、技術的には、`ARRAY JOIN` の結果がフィルタリングに使用されない限り、どの順序で実行されても問題ありません。処理順序はクエリオプティマイザによって制御されます。 @@ -393,8 +384,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; `arrayJoin` は常に実行され、ショートサーキット関数評価をサポートしません。これは、クエリ解析および実行時に他のすべての関数とは別に処理される特殊な関数であり、ショートサーキット関数実行とは両立しない追加のロジックを必要とするためです。その理由は、結果の行数が `arrayJoin` の結果に依存しており、`arrayJoin` の遅延実行を実装するのはあまりに複雑かつ高コストであるためです。 - - ## 関連コンテンツ {#related-content} - ブログ記事: [ClickHouse における時系列データの扱い方](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md index 8aa13e70f0f..bd1d3a9e013 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md @@ -6,8 +6,6 @@ title: 'DISTINCT 句' doc_type: 'reference' --- - - # DISTINCT 句 {#distinct-clause} `SELECT DISTINCT` が指定されている場合、クエリ結果には一意の行だけが残ります。つまり、結果内で完全に一致する行の集合ごとに、1 行だけが残ります。 @@ -56,7 +54,6 @@ SELECT DISTINCT ON (a,b) * FROM t1; └───┴───┴───┘ ``` - ## DISTINCT と ORDER BY {#distinct-and-order-by} ClickHouse では、1 つのクエリ内で `DISTINCT` 句と `ORDER BY` 句に異なる列を指定できます。`DISTINCT` 句は `ORDER BY` 句より先に実行されます。 @@ -104,13 +101,10 @@ SELECT DISTINCT a FROM t1 ORDER BY b DESC; クエリを記述する際には、このような実装上の特性を考慮してください。 - ## NULL の処理 {#null-processing} `DISTINCT` は、[`NULL`](/sql-reference/syntax#null) を特定の値であり、かつ `NULL==NULL` が成り立つかのように扱います。言い換えると、`DISTINCT` の結果においては、`NULL` を含む異なる組み合わせは 1 回しか出現しません。これは、他のほとんどのコンテキストにおける `NULL` の処理とは異なります。 - - ## 代替方法 {#alternatives} 集約関数を一切使用せずに、`SELECT` 句で指定されたものと同じ値の集合に対して [GROUP BY](/sql-reference/statements/select/group-by) を適用することで、同じ結果を得ることも可能です。ただし、この場合は `GROUP BY` を用いる方法とはいくつかの違いがあります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md index 33af346c0a0..fa3dbd2a647 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md @@ -7,21 +7,16 @@ keywords: ['EXCEPT', 'modifier'] doc_type: 'reference' --- - - # EXCEPT 修飾子 {#except} > 結果から除外する 1 つ以上の列名を指定します。指定した名前に一致するすべての列は出力から除外されます。 - - ## 構文 {#syntax} ```sql SELECT EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name ``` - ## 例 {#examples} ```sql title="Query" diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md index 2afa19e4db9..42f8e128c17 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md @@ -6,22 +6,16 @@ title: 'FORMAT 句' doc_type: 'reference' --- - - # FORMAT 句 {#format-clause} ClickHouse は、クエリ結果などに対して使用できる幅広い[シリアル化フォーマット](../../../interfaces/formats.md)をサポートしています。`SELECT` の出力フォーマットを選択する方法はいくつかあり、そのひとつはクエリの末尾で `FORMAT format` を指定して、結果データを任意の形式で取得する方法です。 特定のフォーマットは、利便性、他システムとの連携、あるいはパフォーマンス向上を目的として使用される場合があります。 - - ## デフォルトのフォーマット {#default-format} `FORMAT` 句を省略した場合はデフォルトのフォーマットが使用されます。これは、設定と、ClickHouse サーバーへのアクセスに使用するインターフェイスの両方に依存します。[HTTP インターフェイス](../../../interfaces/http.md)およびバッチモードでの[コマンドラインクライアント](../../../interfaces/cli.md)では、デフォルトのフォーマットは `TabSeparated` です。対話モードのコマンドラインクライアントでは、デフォルトのフォーマットは `PrettyCompact` です(人間が読みやすいコンパクトなテーブルを出力します)。 - - ## 実装の詳細 {#implementation-details} コマンドラインクライアントを使用する場合、データは常に内部の効率的なフォーマット(`Native`)でネットワーク経由で送受信されます。クライアントはクエリの `FORMAT` 句を自前で解釈し、自身でデータをフォーマットします(これにより、ネットワークとサーバーへの余分な負荷が軽減されます)。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md index 78507749c75..cf11fcfd551 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md @@ -6,8 +6,6 @@ title: 'FROM 句' doc_type: 'reference' --- - - # FROM 句 {#from-clause} `FROM` 句は、データを読み取る元となるソースを指定します。 @@ -29,7 +27,6 @@ FROM table SELECT * ``` - ## FINAL 修飾子 {#final-modifier} `FINAL` が指定されている場合、ClickHouse は結果を返す前にデータを完全にマージします。これにより、指定されたテーブルエンジンでマージ時に行われるすべてのデータ変換も実行されます。 @@ -78,7 +75,6 @@ SET final = 1; SELECT x, y FROM mytable WHERE x > 1; ``` - ## 実装の詳細 {#implementation-details} `FROM` 句が省略された場合、データは `system.one` テーブルから読み取られます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md index e4ac7f09ae3..8992856eb8a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md @@ -6,8 +6,6 @@ title: 'GROUP BY 句' doc_type: 'reference' --- - - # GROUP BY 句 {#group-by-clause} `GROUP BY` 句は `SELECT` クエリを集約モードに切り替え、その動作は次のようになります。 @@ -22,8 +20,6 @@ doc_type: 'reference' テーブルに対して集約を実行する別の方法もあります。クエリ内でテーブルのカラムが集約関数の内部にしか現れない場合、`GROUP BY` 句は省略でき、その場合は空のキー集合(キーをまったく指定しない)での集約が行われるとみなされます。このようなクエリは常にちょうど 1 行だけを返します。 ::: - - ## NULL の処理 {#null-processing} グループ化では、ClickHouse は [NULL](/sql-reference/syntax#null) を値として解釈し、`NULL==NULL` とみなします。これは、ほとんどの他のコンテキストにおける `NULL` の処理とは異なります。 @@ -56,7 +52,6 @@ doc_type: 'reference' `GROUP BY` に複数のキーを渡すと、結果は選択された値のあらゆる組み合わせを返し、あたかも `NULL` が特定の値であるかのように扱われます。 - ## ROLLUP 修飾子 {#rollup-modifier} `ROLLUP` 修飾子は、`GROUP BY` 句のリスト内での順序に基づいて、キー式ごとの小計を計算するために使用されます。小計の行は結果テーブルの末尾に追加されます。 @@ -130,7 +125,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP; * SQL 標準との互換性を確保するための [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) 設定。 - ## CUBE 修飾子 {#cube-modifier} `CUBE` 修飾子は、`GROUP BY` 句内のキー式のあらゆる組み合わせに対する小計を計算するために使用されます。小計行は結果テーブルの末尾に追加されます。 @@ -175,7 +169,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY CUBE(year, month, day); `GROUP BY` から除外された列はゼロで埋められます。 - ```text ┌─year─┬─month─┬─day─┬─count()─┐ │ 2020 │ 10 │ 15 │ 1 │ @@ -229,7 +222,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; * SQL 標準との互換性を確保するための設定については、[group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) を参照してください。 - ## WITH TOTALS 句修飾子 {#with-totals-modifier} `WITH TOTALS` 句修飾子が指定されている場合、追加の行が計算されます。この行では、キー列にはデフォルト値(ゼロまたは空文字列)が入り、集約関数の列にはすべての行に対して計算された値(`totals` 値)が入ります。 @@ -266,8 +258,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; `WITH TOTALS` はサブクエリ内で使用でき、[JOIN](/sql-reference/statements/select/join.md) 句内のサブクエリでも使用できます(この場合、対応する合計値は結合されます)。 - - ## GROUP BY ALL {#group-by-all} `GROUP BY ALL` は、集約関数ではないすべての SELECT 句の式を列挙することと同等です。 @@ -316,7 +306,6 @@ FROM t GROUP BY substring(a, 4, 2), substring(a, 1, 2) ``` - ## 使用例 {#examples} 例: @@ -344,7 +333,6 @@ GROUP BY domain 出現した異なるキー値ごとに、`GROUP BY` は集約関数の結果セットを計算します。 - ## GROUPING SETS 修飾子 {#grouping-sets-modifier} これは最も汎用的な修飾子です。 @@ -382,7 +370,6 @@ GROUPING SETS * SQL 標準との互換性に関する [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) 設定。 - ## 実装の詳細 {#implementation-details} 集約はカラム指向 DBMS において最も重要な機能の一つであり、このためその実装部分は ClickHouse の中でも特に高度に最適化されています。デフォルトでは、集約はハッシュテーブルを用いてメモリ内で実行されます。ハッシュテーブルには 40 以上の特殊化があり、「グルーピングキー」のデータ型に応じて自動的に選択されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md index 762062a26b5..8ea41bc4dfd 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md @@ -7,8 +7,6 @@ keywords: ['INNER JOIN', 'LEFT JOIN', 'LEFT OUTER JOIN', 'RIGHT JOIN', 'RIGHT OU doc_type: 'reference' --- - - # JOIN 句 {#join-clause} `JOIN` 句は、各テーブルに共通する値を用いて 1 つ以上のテーブルの列を結合し、新しいテーブルを生成します。これは SQL をサポートするデータベースで一般的な操作であり、[関係代数](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators)における join に相当します。単一のテーブル内での結合という特殊なケースは、しばしば「自己結合 (self-join)」と呼ばれます。 @@ -24,7 +22,6 @@ FROM `ON` 句の式および `USING` 句の列は「結合キー」と呼ばれます。特に断りがない限り、`JOIN` は一致する「結合キー」を持つ行から [デカルト積](https://en.wikipedia.org/wiki/Cartesian_product) を生成し、その結果、元のテーブルよりもはるかに多くの行を含むことがあります。 - ## サポートされている JOIN の種類 {#supported-types-of-join} すべての標準的な [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) タイプがサポートされています: @@ -55,8 +52,6 @@ ClickHouse では、追加で次の JOIN タイプも利用できます: [join_algorithm](../../../operations/settings/settings.md#join_algorithm) が `partial_merge` に設定されている場合、`RIGHT JOIN` および `FULL JOIN` は `ALL` ストリクト性の場合にのみサポートされます(`SEMI`、`ANTI`、`ANY`、`ASOF` はサポートされません)。 ::: - - ## 設定 {#settings} デフォルトの結合種別は、[`join_default_strictness`](../../../operations/settings/settings.md#join_default_strictness) 設定で上書きできます。 @@ -74,8 +69,6 @@ ClickHouse では、追加で次の JOIN タイプも利用できます: ClickHouse が `CROSS JOIN` を `INNER JOIN` に書き換えられなかった場合の動作を指定するには、`cross_to_inner_join_rewrite` 設定を使用します。デフォルト値は `1` であり、この場合は結合を継続しますが、処理は遅くなります。エラーをスローしたい場合は `cross_to_inner_join_rewrite` を `0` に設定し、カンマ結合/クロス結合を実行せず、すべてのカンマ/クロス結合の書き換えを強制したい場合は `2` に設定します。値が `2` のときに書き換えが失敗すると、"Please, try to simplify `WHERE` section" というエラーメッセージが返されます。 - - ## ON 句の条件 {#on-section-conditions} `ON` 句には、`AND` や `OR` 演算子を使って組み合わせた複数の条件を含めることができます。結合キーを指定する条件は、次を満たす必要があります。 @@ -167,7 +160,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key; :::note - デフォルトでは、同じテーブルの列を使用している限り、非等価条件もサポートされます。 たとえば、`t1.a = t2.key AND t1.b > 0 AND t2.b > t2.c` のような条件は有効です。これは、`t1.b > 0` が `t1` の列のみを使用し、`t2.b > t2.c` が `t2` の列のみを使用しているためです。 ただし、`t1.a = t2.key AND t1.b > t2.key` のような条件に対する実験的サポートを有効化して試すこともできます。詳細については、以下のセクションを参照してください。 @@ -188,7 +180,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key AND t2. └───┴────┴─────┘ ``` - ## 異なるテーブルの列に対する不等号条件を用いた JOIN {#join-with-inequality-conditions-for-columns-from-different-tables} ClickHouse は現在、等価条件に加えて、不等号条件を指定した `ALL/ANY/SEMI/ANTI INNER/LEFT/RIGHT/FULL JOIN` をサポートしています。不等号条件は、`hash` および `grace_hash` の JOIN アルゴリズムでのみ利用できます。不等号条件は `join_use_nulls` ではサポートされません。 @@ -239,7 +230,6 @@ key2 a2 1 1 1 0 0 \N key4 f 2 3 4 0 0 \N ``` - ## JOINキーにおけるNULL値 {#null-values-in-join-keys} `NULL` は、自分自身を含めてどの値とも等しくありません。これは、あるテーブルの `JOIN` キーに `NULL` 値がある場合、他のテーブルの `NULL` 値とは一致しないことを意味します。 @@ -294,7 +284,6 @@ SELECT A.name, B.score FROM A LEFT JOIN B ON isNotDistinctFrom(A.id, B.id) └─────────┴───────┘ ``` - ## ASOF JOIN の使用方法 {#asof-join-usage} `ASOF JOIN` は、完全一致するレコードが存在しないデータ同士を結合する必要がある場合に有用です。 @@ -349,7 +338,6 @@ USING (equi_column1, ... equi_columnN, asof_column) [Join](../../../engines/table-engines/special/join.md) テーブルエンジンでは**サポートされていません**。 ::: - ## PASTE JOIN の使用方法 {#paste-join-usage} `PASTE JOIN` の結果は、左側のサブクエリのすべてのカラムに続いて、右側のサブクエリのすべてのカラムを含むテーブルになります。 @@ -408,7 +396,6 @@ SETTINGS max_block_size = 2; └───┴──────┘ ``` - ## 分散 JOIN {#distributed-join} 分散テーブルが関わる JOIN を実行する方法は 2 つあります。 @@ -418,8 +405,6 @@ SETTINGS max_block_size = 2; `GLOBAL` を使用する際は注意してください。詳細については、[分散サブクエリ](/sql-reference/operators/in#distributed-subqueries) セクションを参照してください。 - - ## 暗黙の型変換 {#implicit-type-conversion} `INNER JOIN`、`LEFT JOIN`、`RIGHT JOIN`、`FULL JOIN` の各クエリでは、「結合キー」に対する暗黙の型変換がサポートされています。ただし、左側と右側のテーブルの結合キーを単一の型に変換できない場合は、クエリを実行できません(たとえば、`UInt64` と `Int64`、あるいは `String` と `Int32` の両方の値をすべて保持できるデータ型が存在しない場合など)。 @@ -462,7 +447,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); └────┴──────┴───────────────┴─────────────────┘ ``` - ## 使用上の推奨事項 {#usage-recommendations} ### 空セルまたは NULL セルの処理 {#processing-of-empty-or-null-cells} @@ -510,8 +494,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); これらのいずれかの制限に達した場合、ClickHouse は [join_overflow_mode](/operations/settings/settings#join_overflow_mode) 設定の指示どおりに動作します。 - - ## 例 {#examples} 例: @@ -555,7 +537,6 @@ LIMIT 10 └───────────┴────────┴────────┘ ``` - ## 関連コンテンツ {#related-content} - Blog: [ClickHouse: 非常に高速な DBMS による完全な SQL JOIN サポート - パート 1](https://clickhouse.com/blog/clickhouse-fully-supports-joins) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md index 381d92de282..3d2218a3b53 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md @@ -42,7 +42,6 @@ LIMIT n, m どちらの形式でも、`n` と `m` は 0 以上の整数でなければなりません。 - ## 負の LIMIT {#negative-limits} 負の値を使用して、結果セットの*末尾*から行を選択します。 @@ -81,7 +80,6 @@ LIMIT 10 OFFSET 0.5 -- 中間地点から10行 LIMIT 10 OFFSET -20 -- 最後の20行をスキップした後の10行 ``` - ## LIMIT ... WITH TIES {#limit--with-ties-modifier} `WITH TIES` 修飾子は、LIMIT 句で取得される最後の行と同じ `ORDER BY` の値を持つ行を、追加で結果に含めます。 @@ -129,7 +127,6 @@ SELECT * FROM ( この修飾子は、[`ORDER BY ... WITH FILL`](/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier) 修飾子と組み合わせて使用できます。 - ## 考慮事項 {#considerations} **非決定的な結果:** [`ORDER BY`](../../../sql-reference/statements/select/order-by.md) 句がない場合、返される行は任意のものとなり、クエリの実行ごとに結果が変わる可能性があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md index 3f21b2fee7a..6e4a71917ff 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md @@ -6,8 +6,6 @@ title: 'ORDER BY 句' doc_type: 'reference' --- - - # ORDER BY 句 {#order-by-clause} `ORDER BY` 句には次のいずれかを指定できます。 @@ -27,8 +25,6 @@ doc_type: 'reference' ソート対象の式の値が同一の行は、任意(非決定的)な順序で返されます。 `SELECT` 文で `ORDER BY` 句を省略した場合も、行の並び順は任意(非決定的)です。 - - ## 特殊値のソート順 {#sorting-of-special-values} `NaN` および `NULL` のソート順には、2 つの方法があります。 @@ -74,7 +70,6 @@ doc_type: 'reference' 浮動小数点数をソートする場合、NaN は他の値とは別扱いになります。ソート順に関係なく、NaN は常に末尾に並びます。言い換えると、昇順ソートでは NaN は他のすべての数値よりも大きいかのように扱われ、降順ソートでは残りの値よりも小さいかのように扱われます。 - ## 照合順序サポート {#collation-support} [String](../../../sql-reference/data-types/string.md) 値でソートする場合、照合順序(比較方法)を指定できます。例: `ORDER BY SearchPhrase COLLATE 'tr'` — 文字列が UTF-8 でエンコードされていることを前提として、トルコ語アルファベットを用い、大文字小文字を区別せずにキーワードを昇順でソートします。`COLLATE` は、ORDER BY 句内の各式ごとに個別に指定してもしなくてもかまいません。`ASC` または `DESC` を指定する場合は、その後ろに `COLLATE` を指定します。`COLLATE` を使用する場合、ソートは常に大文字小文字を区別しません。 @@ -83,8 +78,6 @@ doc_type: 'reference' `COLLATE` によるソートは通常のバイト列によるソートより効率が低いため、少数行の最終的なソートにのみ `COLLATE` を使用することを推奨します。 - - ## 照合順序の例 {#collation-examples} [String](../../../sql-reference/data-types/string.md) 値のみの例: @@ -229,7 +222,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; [Tuple](../../../sql-reference/data-types/tuple.md) を使った例: - ```response ┌─x─┬─s───────┐ │ 1 │ (1,'Z') │ @@ -262,7 +254,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; └───┴─────────┘ ``` - ## 実装の詳細 {#implementation-details} `ORDER BY` に加えて十分に小さい [LIMIT](../../../sql-reference/statements/select/limit.md) を指定すると、使用される RAM を抑えられます。そうでない場合、消費されるメモリ量はソート対象データ量に比例します。分散クエリ処理では、[GROUP BY](/sql-reference/statements/select/group-by) を省略すると、ソートはリモートサーバー側で部分的に実行され、その結果がリクエスト元サーバーでマージされます。これは、分散ソートの場合、ソート対象データ量が単一サーバーのメモリ量を上回る可能性があることを意味します。 @@ -273,8 +264,6 @@ RAM が不足している場合は、外部メモリ(ディスク)を使用 外部ソートは、RAM 内でのソートと比較して効率が大きく低下します。 - - ## データ読み取りの最適化 {#optimization-of-data-reading} `ORDER BY` 式の先頭部分がテーブルのソートキーと一致している場合、[optimize_read_in_order](../../../operations/settings/settings.md#optimize_read_in_order) 設定を使用することでクエリを最適化できます。 @@ -295,8 +284,6 @@ RAM が不足している場合は、外部メモリ(ディスク)を使用 `MaterializedView` エンジンのテーブルでは、`SELECT ... FROM merge_tree_table ORDER BY pk` のようなビューに対して最適化が機能します。ただし、ビュー定義のクエリに `ORDER BY` 句がない場合の `SELECT ... FROM view ORDER BY pk` のようなクエリではサポートされません。 - - ## ORDER BY Expr WITH FILL 修飾子 {#order-by-expr-with-fill-modifier} この修飾子は、[LIMIT ... WITH TIES 修飾子](/sql-reference/statements/select/limit#limit--with-ties-modifier)と組み合わせて使用することもできます。 @@ -385,7 +372,6 @@ ORDER BY 結果: - ```text ┌───d1───────┬───d2───────┬─source───┐ │ 1970-01-11 │ 1970-01-02 │ original │ @@ -448,7 +434,6 @@ ORDER BY d2 WITH FILL; ``` - 結果: ```response @@ -615,7 +600,6 @@ SELECT n, source, inter FROM ( 結果: - ```text ┌───n─┬─source───┬─inter─┐ │ 0 │ │ 0 │ @@ -634,7 +618,6 @@ SELECT n, source, inter FROM ( └─────┴──────────┴───────┘ ``` - ## ソートプレフィックス単位での補間 {#filling-grouped-by-sorting-prefix} 特定のカラムで同じ値を持つ行ごとに、独立して補間を行うと便利な場合があります。代表的な例は、時系列データの欠損値を補間するケースです。 @@ -687,7 +670,6 @@ INTERPOLATE ( value AS 9999 ) ここでは、`value` 列に `9999` を補間して、埋められた行がより目立つようにしています。 この挙動は、`use_with_fill_by_sorting_prefix` の設定によって制御されます(デフォルトで有効です)。 - ## 関連コンテンツ {#related-content} - ブログ記事: [ClickHouse でタイムシリーズデータを扱う方法](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md index 41a2a208718..5c556bfc5a6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md @@ -6,16 +6,12 @@ title: 'PREWHERE 句' doc_type: 'reference' --- - - # PREWHERE 句 {#prewhere-clause} `PREWHERE` は、フィルタリングをより効率的に適用するための最適化です。`PREWHERE` 句が明示的に指定されていない場合でも、デフォルトで有効になっています。これは、[WHERE](../../../sql-reference/statements/select/where.md) 条件の一部を自動的に PREWHERE ステージへ移動することで機能します。`PREWHERE` 句の役割は、このデフォルトの最適化よりも適切に制御できると考える場合に、その挙動を明示的にコントロールすることだけです。 PREWHERE 最適化では、まず PREWHERE 式を評価するために必要な列だけが読み込まれます。その後、クエリの残りの部分を実行するために必要な他の列が読み込まれますが、これは PREWHERE 式が少なくとも一部の行で `true` となるブロックに対してのみ行われます。すべての行に対して PREWHERE 式が `false` となるブロックが多数存在し、かつ PREWHERE がクエリの他の部分より少ない列しか必要としない場合には、クエリ実行時にディスクから読み取るデータ量を大幅に削減できることがよくあります。 - - ## PREWHERE を手動で制御する {#controlling-prewhere-manually} この句は `WHERE` 句と同じ意味を持ちます。違いは、テーブルからどのデータが読み込まれるかという点です。クエリ内の列のうち一部の列でしか使われないものの、強力なデータフィルタリングを提供する条件について `PREWHERE` を手動で制御すると、読み取るデータ量を削減できます。 @@ -30,14 +26,10 @@ PREWHERE 最適化では、まず PREWHERE 式を評価するために必要な `PREWHERE` セクションは `FINAL` より前に実行されるため、テーブルの `ORDER BY` セクションに含まれないフィールドと併用して `PREWHERE` を使うと、`FROM ... FINAL` クエリの結果が偏る可能性があります。 ::: - - ## 制限事項 {#limitations} `PREWHERE` は、[*MergeTree](../../../engines/table-engines/mergetree-family/index.md) ファミリーに属するテーブルでのみ使用できます。 - - ## 例 {#example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md index 006f58edf0c..0b69e0bbf70 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md @@ -6,22 +6,16 @@ title: 'QUALIFY 句' doc_type: 'reference' --- - - # QUALIFY 句 {#qualify-clause} ウィンドウ関数の結果をフィルタリングするために使用します。[WHERE](../../../sql-reference/statements/select/where.md) 句と似ていますが、`WHERE` はウィンドウ関数の評価より前に実行されるのに対し、`QUALIFY` はその後に実行される点が異なります。 `QUALIFY` 句では、`SELECT` 句内で定義したエイリアスを使用して、そのウィンドウ関数の結果を参照できます。あるいは、クエリ結果としては返さない追加のウィンドウ関数の結果に対してフィルタリングを行うこともできます。 - - ## 制限事項 {#limitations} 評価するウィンドウ関数が存在しない場合は、`QUALIFY` は使用できません。代わりに `WHERE` を使用してください。 - - ## 例 {#examples} 例: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md index 57f7be5fe85..ce7022046dd 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md @@ -7,8 +7,6 @@ doc_type: 'reference' keywords: ['WHERE'] --- - - # WHERE 句 {#where-clause} `WHERE` 句は、`SELECT` の [`FROM`](../../../sql-reference/statements/select/from.md) 句から得られるデータをフィルタリングするために使用します。 @@ -26,8 +24,6 @@ PREWHERE は、フィルタリングをより効率的に適用するための `PREWHERE` 句が明示的に指定されていなくても、デフォルトで有効になっています。 ::: - - ## `NULL` の判定 {#testing-for-null} 値が[`NULL`](/sql-reference/syntax#null)かどうかを判定する必要がある場合は、次を使用します。 @@ -36,8 +32,6 @@ PREWHERE は、フィルタリングをより効率的に適用するための `NULL` を含む式は、上記のように明示的に判定しない限り、真になることはありません。 - - ## 論理演算子を使用したデータのフィルタリング {#filtering-data-with-logical-operators} 複数の条件を組み合わせて指定するために、`WHERE` 句と組み合わせて次の[論理関数](/sql-reference/functions/logical-functions#and)を使用できます: @@ -47,15 +41,11 @@ PREWHERE は、フィルタリングをより効率的に適用するための - [`or()`](/sql-reference/functions/logical-functions#or) または `OR` - [`xor()`](/sql-reference/functions/logical-functions#xor) - - ## 条件としての UInt8 列の使用 {#using-uint8-columns-as-a-condition} ClickHouse では、`UInt8` 列をブール条件として直接使用でき、`0` は `false`、それ以外の非ゼロ値(一般的には `1`)は `true` を表します。 その例については、[下記](#example-uint8-column-as-condition)のセクションで説明します。 - - ## 比較演算子の使用 {#using-comparison-operators} 次の[比較演算子](/sql-reference/operators#comparison-operators)を使用できます。 @@ -76,8 +66,6 @@ ClickHouse では、`UInt8` 列をブール条件として直接使用でき、` | `a BETWEEN b AND c` | `a >= b AND a <= c` | 範囲チェック(両端を含む) | `price BETWEEN 100 AND 500` | | `a NOT BETWEEN b AND c` | `a < b OR a > c` | 範囲外のチェック | `price NOT BETWEEN 100 AND 500` | - - ## パターンマッチングと条件式 {#pattern-matching-and-conditional-expressions} 比較演算子に加えて、`WHERE` 句ではパターンマッチングと条件式も使用できます。 @@ -92,8 +80,6 @@ ClickHouse では、`UInt8` 列をブール条件として直接使用でき、` 使用例については「[パターンマッチングと条件式](#examples-pattern-matching-and-conditional-expressions)」を参照してください。 - - ## リテラル、カラム、サブクエリを用いた式 {#expressions-with-literals-columns-subqueries} `WHERE` 句の後に続く式には、[リテラル](/sql-reference/syntax#literals)、カラム、またはサブクエリ(条件で使用される値を返す入れ子の `SELECT` 文)を含めることができます。 @@ -119,7 +105,6 @@ WHERE category = 'Electronics' AND id IN (SELECT product_id FROM bestsellers) ``` - -- 3 つすべてに論理演算子を使用 WHERE (price > 100 OR category IN (SELECT category FROM featured)) AND in_stock = true @@ -240,7 +225,6 @@ WHERE (category = 'Electronics' OR category = 'Furniture') AND price < 400; ``` - ```response ┌─id─┬─name────┬─price─┬─category────┬─in_stock─┐ 1. │ 2 │ マウス │ 25.5 │ 電子機器 │ true │ @@ -366,7 +350,6 @@ WHERE category = 'Electronics' AND in_stock = true; #### LIKE の例 {#like-examples} - ```sql -- 名前に 'o' を含む製品を検索 SELECT * FROM products WHERE name LIKE '%o%'; diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md index 7f576a45cb7..2c4a4ae638d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md @@ -17,8 +17,6 @@ doc_type: 'reference' さらに、ユーザーは [`displaySecretsInShowAndSelect`](grant.md/#displaysecretsinshowandselect) 権限を持っている必要があります。 ::: - - ## SHOW CREATE TABLE | DICTIONARY | VIEW | DATABASE {#show-create-table--dictionary--view--database} これらのステートメントは、指定したオブジェクトの作成に使用された `CREATE` クエリを含む、`String` 型の単一列を返します。 @@ -35,7 +33,6 @@ SHOW [CREATE] TABLE | TEMPORARY TABLE | DICTIONARY | VIEW | DATABASE [db.]table| *擬似的な* クエリが返されます。 ::: - ## SHOW DATABASES {#show-databases} このステートメントは、すべてのデータベースを一覧表示します。 @@ -110,7 +107,6 @@ SHOW DATABASES LIMIT 2 * [`CREATE DATABASE`](/sql-reference/statements/create/database) - ## SHOW TABLES {#show-tables} `SHOW TABLES` ステートメントは、テーブルの一覧を表示します。 @@ -189,7 +185,6 @@ SHOW TABLES FROM system LIMIT 2 * [`Create Tables`](/sql-reference/statements/create/table) * [`SHOW CREATE TABLE`](#show-create-table--dictionary--view--database) - ## SHOW COLUMNS {#show_columns} `SHOW COLUMNS` ステートメントは、列の一覧を表示します。 @@ -242,7 +237,6 @@ SHOW COLUMNS FROM 'orders' LIKE 'delivery_%' * [`system.columns`](../../operations/system-tables/columns.md) - ## SHOW DICTIONARIES {#show-dictionaries} `SHOW DICTIONARIES` ステートメントは、[Dictionaries](../../sql-reference/dictionaries/index.md) の一覧を表示します。 @@ -276,7 +270,6 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` - ## SHOW INDEX {#show-index} テーブルのプライマリインデックスおよびデータスキッピングインデックスの一覧を表示します。 @@ -321,7 +314,6 @@ SHOW [EXTENDED] {INDEX | INDEXES | INDICES | KEYS } {FROM | IN}
[{FROM | SHOW INDEX FROM 'tbl' ``` - ```text title="Response" ┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐ │ tbl │ 1 │ blf_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ BLOOM_FILTER │ │ │ YES │ d, b │ @@ -338,7 +330,6 @@ SHOW INDEX FROM 'tbl' * [`system.tables`](../../operations/system-tables/tables.md) * [`system.data_skipping_indices`](../../operations/system-tables/data_skipping_indices.md) - ## SHOW PROCESSLIST {#show-processlist} 現在処理中のクエリの一覧を含む [`system.processes`](/operations/system-tables/processes) テーブルの内容を出力します。ただし、`SHOW PROCESSLIST` クエリは除外されます。 @@ -360,7 +351,6 @@ $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ::: - ## SHOW GRANTS {#show-grants} `SHOW GRANTS` ステートメントは、ユーザーの権限を表示します。 @@ -377,7 +367,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] `FINAL` 修飾子は、ユーザー自身の権限と、そのユーザーに付与されたロール(継承分を含む)からのすべての権限を統合します。 - ## SHOW CREATE USER {#show-create-user} `SHOW CREATE USER` 文は、[ユーザー作成](../../sql-reference/statements/create/user.md) 時に指定されたパラメータを表示します。 @@ -388,7 +377,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] ``` - ## SHOW CREATE ROLE {#show-create-role} `SHOW CREATE ROLE` ステートメントは、[ロールの作成](../../sql-reference/statements/create/role.md)時に使用されたパラメータを表示します。 @@ -399,7 +387,6 @@ SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] SHOW CREATE ROLE name1 [, name2 ...] ``` - ## SHOW CREATE ROW POLICY {#show-create-row-policy} `SHOW CREATE ROW POLICY` ステートメントは、[行ポリシーの作成](../../sql-reference/statements/create/row-policy.md) の際に使用されたパラメーターを表示します。 @@ -410,7 +397,6 @@ SHOW CREATE ROLE name1 [, name2 ...] SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] ``` - ## SHOW CREATE QUOTA {#show-create-quota} `SHOW CREATE QUOTA` ステートメントは、[クォータ作成](../../sql-reference/statements/create/quota.md)時に指定されたパラメーターを表示します。 @@ -421,7 +407,6 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] ``` - ## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile} `SHOW CREATE SETTINGS PROFILE` ステートメントは、[設定プロファイルの作成](../../sql-reference/statements/create/settings-profile.md) で使用されたパラメーターを表示します。 @@ -432,7 +417,6 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] ``` - ## SHOW USERS {#show-users} `SHOW USERS` ステートメントは、[ユーザーアカウント](../../guides/sre/user-management/index.md#user-account-management)名の一覧を返します。 @@ -444,7 +428,6 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] SHOW USERS ``` - ## SHOW ROLES {#show-roles} `SHOW ROLES` ステートメントは、[ロール](../../guides/sre/user-management/index.md#role-management)の一覧を返します。 @@ -453,8 +436,6 @@ SHOW USERS ### 構文 {#syntax-14} - - ```sql title="Syntax" SHOW [CURRENT|ENABLED] ROLES ``` @@ -470,7 +451,6 @@ SHOW [CURRENT|ENABLED] ROLES SHOW [SETTINGS] PROFILES ``` - ## SHOW POLICIES {#show-policies} `SHOW POLICIES` ステートメントは、指定したテーブルに対する [行ポリシー](../../guides/sre/user-management/index.md#row-policy-management) の一覧を返します。 @@ -482,7 +462,6 @@ SHOW [SETTINGS] PROFILES SHOW [ROW] POLICIES [ON [db.]table] ``` - ## SHOW QUOTAS {#show-quotas} `SHOW QUOTAS` ステートメントは、[クオータ](../../guides/sre/user-management/index.md#quotas-management)の一覧を返します。 @@ -494,7 +473,6 @@ SHOW [ROW] POLICIES [ON [db.]table] SHOW QUOTAS ``` - ## SHOW QUOTA {#show-quota} `SHOW QUOTA` ステートメントは、すべてのユーザーまたは現在のユーザーの[クオータ](../../operations/quotas.md)の消費状況を返します。 @@ -502,8 +480,6 @@ SHOW QUOTAS ### 構文 {#syntax-18} - - ```sql title="Syntax" SHOW [CURRENT] QUOTA ``` @@ -518,7 +494,6 @@ SHOW [CURRENT] QUOTA SHOW ACCESS ``` - ## SHOW CLUSTER(S) {#show-clusters} `SHOW CLUSTER(S)` ステートメントは、クラスタの一覧を返します。 @@ -577,7 +552,6 @@ host_address: 127.0.0.1 port: 9000 ``` - ## SHOW SETTINGS {#show-settings} `SHOW SETTINGS` ステートメントは、システム設定とその値の一覧を返します。 @@ -635,7 +609,6 @@ SHOW CHANGED SETTINGS ILIKE '%MEMORY%' └──────────────────┴────────┴─────────────┘ ``` - ## SHOW SETTING {#show-setting} `SHOW SETTING` ステートメントは、指定した設定名の設定値を表示します。 @@ -650,7 +623,6 @@ SHOW SETTING * [`system.settings`](../../operations/system-tables/settings.md) テーブル - ## SHOW FILESYSTEM CACHES {#show-filesystem-caches} ### 使用例 {#examples-7} @@ -669,7 +641,6 @@ SHOW SETTING * [`system.settings`](../../operations/system-tables/settings.md) テーブル - ## SHOW ENGINES {#show-engines} `SHOW ENGINES` ステートメントは、サーバーがサポートするテーブルエンジンの説明と、その機能のサポート状況が格納されている [`system.table_engines`](../../operations/system-tables/table_engines.md) テーブルの内容を出力します。 @@ -684,7 +655,6 @@ SHOW ENGINES [INTO OUTFILE filename] [FORMAT format] * [system.table_engines](../../operations/system-tables/table_engines.md) テーブル - ## SHOW FUNCTIONS {#show-functions} `SHOW FUNCTIONS` ステートメントは、[`system.functions`](../../operations/system-tables/functions.md) テーブルの内容を表示します。 @@ -701,7 +671,6 @@ SHOW FUNCTIONS [LIKE | ILIKE ''] * [`system.functions`](../../operations/system-tables/functions.md) テーブル - ## SHOW MERGES {#show-merges} `SHOW MERGES` ステートメントは、マージの一覧を返します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md index 8d8b487e3ba..923dfc9439b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md @@ -291,8 +291,9 @@ SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` SLEEP ENTRY 0 1 #### PROFILE {#instrument-add-profile} -関数の `ENTRY` から `EXIT` までに要した時間を計測します。 -プロファイリングの結果は [`system.trace_log`](../../operations/system-tables/trace_log.md) に保存されます。 +関数の`ENTRY`から`EXIT`までの処理に要した時間を計測します。 +プロファイリング結果は [`system.trace_log`](../../operations/system-tables/trace_log.md) に保存され、 +[Chrome Event Trace Format](../../operations/system-tables/trace_log.md#chrome-event-trace-format) に変換できます。 ```sql SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` PROFILE diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md index 69c4b2c9d7b..72ba4d2df5c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md @@ -28,7 +28,6 @@ UPDATE [db.]table [ON CLUSTER cluster] SET column1 = expr1 [, ...] [IN PARTITION `filter_expr` は `UInt8` 型でなければなりません。このクエリは、`filter_expr` が非ゼロの値を取る行について、指定された列の値を対応する式の評価結果に更新します。 値は `CAST` 演算子を使用して列の型にキャストされます。プライマリキーまたはパーティションキーの計算に使用されている列の更新はサポートされていません。 - ## 例 {#examples} ```sql @@ -37,15 +36,12 @@ UPDATE hits SET Title = 'Updated Title' WHERE EventDate = today(); UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; ``` - ## 軽量な更新はデータを即時には更新しない {#lightweight-update-does-not-update-data-immediately} 軽量な `UPDATE` は **パッチパーツ (patch parts)** を用いて実装されています。パッチパーツは、更新対象の列と行のみを含む特殊な種類のデータパーツです。 軽量な `UPDATE` はパッチパーツを作成しますが、ストレージ上の元のデータはすぐに物理的に書き換えられるわけではありません。 更新処理は `INSERT ... SELECT ...` クエリに似ていますが、`UPDATE` クエリはパッチパーツの作成が完了するまで待機してから結果を返します。 - - 更新された値は次のとおりです: - パッチの適用により `SELECT` クエリで**即座に参照可能**になります - 後続のマージおよびミューテーション時にのみ**物理的にマテリアライズ**されます @@ -57,14 +53,10 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; 軽量アップデートを使用するには、テーブル設定 [`enable_block_number_column`](/operations/settings/merge-tree-settings#enable_block_number_column) および [`enable_block_offset_column`](/operations/settings/merge-tree-settings#enable_block_offset_column) により `_block_number` および `_block_offset` カラムのマテリアライズを有効にする必要があります。 - - ## 軽量な削除 {#lightweight-delete} [軽量な `DELETE`](/sql-reference/statements/delete) クエリは、`ALTER UPDATE` ミューテーションではなく、軽量な `UPDATE` として実行できます。軽量な `DELETE` の実装は、[`lightweight_delete_mode`](/operations/settings/settings#lightweight_delete_mode) の設定によって制御されます。 - - ## パフォーマンスに関する考慮事項 {#performance-considerations} **軽量アップデートの利点:** @@ -79,15 +71,11 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; - ごく小さいアップデートを高頻度で行うと「too many parts」エラーにつながる可能性がある。`WHERE` 句内の単一の `IN` 句にアップデート対象の ID をまとめるなどして、複数のアップデートを 1 つのクエリにバッチ処理することが推奨される - 軽量アップデートは、テーブル全体の約 10% 程度までの少量の行を更新することを想定して設計されている。より多くの行を更新する必要がある場合は、[`ALTER TABLE ... UPDATE`](/sql-reference/statements/alter/update) ミューテーションを使用することが推奨される - - ## 同時実行操作 {#concurrent-operations} 軽量な更新は、重いミューテーションとは異なり、現在実行中のマージやミューテーションの完了を待ちません。 同時に行われる軽量更新の一貫性は、[`update_sequential_consistency`](/operations/settings/settings#update_sequential_consistency) および [`update_parallel_mode`](/operations/settings/settings#update_parallel_mode) の設定によって制御されます。 - - ## 更新権限 {#update-permissions} `UPDATE` には `ALTER UPDATE` 権限が必要です。特定のユーザーに対して特定のテーブルで `UPDATE` ステートメントを有効にするには、以下を実行します。 @@ -96,7 +84,6 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; GRANT ALTER UPDATE ON db.table TO username; ``` - ## 実装の詳細 {#details-of-the-implementation} パッチパーツは通常のパーツと同じ構造ですが、更新されたカラムと、いくつかのシステムカラムのみを含みます: @@ -132,8 +119,6 @@ GRANT ALTER UPDATE ON db.table TO username; ジョインモードはマージモードよりも低速で、より多くのメモリを必要としますが、利用頻度は低くなります。 - - ## 関連コンテンツ {#related-content} - [`ALTER UPDATE`](/sql-reference/statements/alter/update) - 負荷の大きい `UPDATE` 操作 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md index cf101ca4311..a8caaa14aef 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md @@ -11,20 +11,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # azureBlobStorage テーブル関数 {#azureblobstorage-table-function} [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) 内のファイルに対して SELECT および INSERT を行うための、テーブルのようなインターフェイスを提供します。このテーブル関数は、[s3 関数](../../sql-reference/table-functions/s3.md) と類似しています。 - - ## 構文 {#syntax} ```sql azureBlobStorage(- connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure, partition_strategy, partition_columns_in_data_file, extra_credentials(client_id=, tenant_id=)]) ``` - ## 引数 {#arguments} | Argument | Description | @@ -41,14 +37,10 @@ azureBlobStorage(- connection_string|storage_account_url, container_name, blobpa | `partition_columns_in_data_file` | 省略可能なパラメータです。`HIVE` パーティション戦略でのみ使用されます。パーティション列がデータファイル内に書き込まれているかどうかを ClickHouse に指示します。デフォルトは `false` です。 | | `extra_credentials` | 認証には `client_id` と `tenant_id` を使用します。`extra_credentials` が指定されている場合、`account_name` および `account_key` よりも優先して使用されます。 - - ## 返される値 {#returned_value} 指定されたファイル内のデータを読み取り/書き込みするための、指定された構造を持つテーブル。 - - ## 例 {#examples} [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) テーブルエンジンと同様に、ローカル環境での Azure Storage の開発には Azurite エミュレーターを使用できます。詳細は[こちら](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage)を参照してください。以下では、Azurite がホスト名 `azurite1` で利用可能であると仮定します。 @@ -88,7 +80,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -96,8 +87,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam - `_size` — ファイルサイズ(バイト単位)。型: `Nullable(UInt64)`。ファイルサイズが不明な場合、値は `NULL` です。 - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` です。 - - ## パーティション分割での書き込み {#partitioned-write} ### パーティション戦略 {#partition-strategy} @@ -123,7 +112,6 @@ select _path, * from azureBlobStorage(azure_conf2, storage_account_url = 'http:/ └─────────────────────────────────────────────────────────────────────────────────┴────┴──────┴─────────┘ ``` - ## use_hive_partitioning 設定 {#hive-style-partitioning} これは、読み取り時に ClickHouse が Hive スタイルのパーティション分割ファイルを解析するためのヒントとなる設定です。書き込み時には影響しません。読み取りと書き込みの動作を対称にしたい場合は、`partition_strategy` 引数を使用してください。 @@ -138,7 +126,6 @@ Hive スタイルのパーティション分割で作成された仮想カラム SELECT * FROM azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 共有アクセス署名 (SAS) の使用 {#using-shared-access-signatures-sas-sas-tokens} 共有アクセス署名 (SAS) は、Azure Storage のコンテナまたはファイルへの制限されたアクセス権を付与する URI です。これを使用すると、ストレージ アカウント キーを共有せずに、ストレージ アカウント リソースへの有効期限付きアクセスを提供できます。詳細は[こちら](https://learn.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature)を参照してください。 @@ -171,6 +158,5 @@ FROM azureBlobStorage('https://clickhousedocstest.blob.core.windows.net/?sp=r&st 1 row in set. 経過時間: 0.153秒 ``` - ## 関連項目 {#related} - [AzureBlobStorage テーブルエンジン](engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md index 273fd43ae72..7cb12ea71ac 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md @@ -7,22 +7,17 @@ title: 'azureBlobStorageCluster' doc_type: 'reference' --- - - # azureBlobStorageCluster テーブル関数 {#azureblobstoragecluster-table-function} 指定したクラスタ内の多数のノードで、[Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) 上のファイルを並列処理することを可能にします。イニシエーターノードでは、クラスタ内のすべてのノードへの接続を確立し、S3 ファイルパス中のアスタリスクを展開して、各ファイルを動的に振り分けます。ワーカーノードでは、処理すべき次のタスクをイニシエーターに問い合わせ、そのタスクを処理します。これは、すべてのタスクが完了するまで繰り返されます。 このテーブル関数は [s3Cluster 関数](../../sql-reference/table-functions/s3Cluster.md) と類似しています。 - - ## 構文 {#syntax} ```sql azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -37,14 +32,10 @@ azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, con | `compression` | サポートされる値: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`。デフォルトでは、ファイル拡張子から圧縮形式を自動検出します(`auto` を指定した場合と同じ)。 | | `structure` | テーブルの構造。形式は `'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返される値 {#returned_value} 指定された構造を持ち、指定されたファイル内のデータを読み書きするためのテーブル。 - - ## 例 {#examples} [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) テーブルエンジンと同様に、ローカルでの Azure Storage 開発には Azurite エミュレーターを使用できます。詳細は[こちら](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage)を参照してください。以下では、Azurite がホスト名 `azurite1` で利用可能であると仮定します。 @@ -58,13 +49,10 @@ SELECT count(*) FROM azureBlobStorageCluster( 'auto', 'key UInt64') ``` - ## 共有アクセス署名 (SAS) の使用 {#using-shared-access-signatures-sas-sas-tokens} 使用例については [azureBlobStorage](/sql-reference/table-functions/azureBlobStorage#using-shared-access-signatures-sas-sas-tokens) を参照してください。 - - ## 関連項目 {#related} - [AzureBlobStorage エンジン](../../engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md index a0f2d64012b..f5426a85523 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md @@ -7,8 +7,6 @@ title: 'clusterAllReplicas' doc_type: 'reference' --- - - # clusterAllReplicas テーブル関数 {#clusterallreplicas-table-function} `remote_servers` セクションで設定されたクラスター内のすべてのシャードに、[Distributed](../../engines/table-engines/special/distributed.md) テーブルを作成せずにアクセスできます。各シャードにつき 1 つのレプリカのみがクエリされます。 @@ -19,12 +17,8 @@ doc_type: 'reference' 利用可能なすべてのクラスターは、[system.clusters](../../operations/system-tables/clusters.md) テーブルに一覧表示されています。 ::: - - ## 構文 {#syntax} - - ```sql cluster(['cluster_name', db.table, sharding_key]) cluster(['cluster_name', db, table, sharding_key]) @@ -40,13 +34,10 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) | `db.table` or `db`, `table` | データベース名とテーブル名。 | | `sharding_key` | シャーディングキー。省略可能。クラスタに複数のシャードがある場合に指定する必要があります。 | - ## 戻り値 {#returned_value} クラスタからのデータセット。 - - ## マクロの使用 {#using_macros} `cluster_name` にはマクロ(波かっこで囲まれた置換式)を含めることができます。置換される値は、サーバー構成ファイルの [macros](../../operations/server-configuration-parameters/settings.md#macros) セクションから取得されます。 @@ -57,7 +48,6 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) SELECT * FROM cluster('{cluster}', default.example_table); ``` - ## 使用方法と推奨事項 {#usage_recommendations} `cluster` および `clusterAllReplicas` テーブル関数の使用は、各リクエストごとにサーバー接続が再確立されるため、`Distributed` テーブルを作成して利用する場合と比べて効率が低くなります。多数のクエリを処理する際は、必ず事前に `Distributed` テーブルを作成し、`cluster` および `clusterAllReplicas` テーブル関数の使用は避けてください。 @@ -70,8 +60,6 @@ SELECT * FROM cluster('{cluster}', default.example_table); `host`、`port`、`user`、`password`、`compression`、`secure` といった接続設定は、`` 設定セクションから取得されます。詳細は [Distributed engine](../../engines/table-engines/special/distributed.md) を参照してください。 - - ## 関連項目 {#related} - [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md index fb363201a05..7eb6130ffed 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md @@ -7,14 +7,10 @@ title: 'deltaLake' doc_type: 'reference' --- - - # deltaLake テーブル関数 {#deltalake-table-function} Amazon S3、Azure Blob Storage、またはローカルにマウントされたファイルシステムにある [Delta Lake](https://github.com/delta-io/delta) テーブルに対して、読み取り専用のテーブル形式インターフェイスを提供します。 - - ## 構文 {#syntax} `deltaLake` は `deltaLakeS3` のエイリアスであり、互換性維持のために提供されています。 @@ -29,20 +25,15 @@ deltaLakeAzure(connection_string|storage_account_url, container_name, blobpath, deltaLakeLocal(path, [,format]) ``` - ## 引数 {#arguments} 引数の説明は、それぞれ `s3`、`azureBlobStorage`、`HDFS`、`file` のテーブル関数における引数の説明と同じです。 `format` は、Delta Lake テーブル内のデータファイルのフォーマットを指定します。 - - ## 返される値 {#returned_value} 指定した Delta Lake テーブルからデータを読み取るための、指定した構造を持つテーブル。 - - ## 例 {#examples} S3 上のテーブル `https://clickhouse-public-datasets.s3.amazonaws.com/delta_lake/hits/` から行を選択する: @@ -63,7 +54,6 @@ LIMIT 2 └───────────────────────────────────────────────────────────────────────┴───────────┘ ``` - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -72,8 +62,6 @@ LIMIT 2 - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合は `NULL`。 - `_etag` — ファイルの ETag。型: `LowCardinality(String)`。ETag が不明な場合は `NULL`。 - - ## 関連項目 {#related} - [DeltaLake エンジン](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md index 92e0e28a58f..ce0232c4f4a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md @@ -7,16 +7,12 @@ title: 'deltaLakeCluster' doc_type: 'reference' --- - - # deltaLakeCluster テーブル関数 {#deltalakecluster-table-function} これは [deltaLake](sql-reference/table-functions/deltalake.md) テーブル関数の拡張です。 指定したクラスタ内の多数のノードから、Amazon S3 上の [Delta Lake](https://github.com/delta-io/delta) テーブルのファイルを並列処理できるようにします。イニシエーターはクラスタ内のすべてのノードへの接続を確立し、各ファイルを動的に割り振ります。ワーカーノードは、処理すべき次のタスクについてイニシエーターに問い合わせ、そのタスクを処理します。すべてのタスクが完了するまで、これが繰り返されます。 - - ## 構文 {#syntax} ```sql @@ -32,21 +28,16 @@ deltaLakeAzureCluster(cluster_name, named_collection[, option=value [,..]]) `deltaLakeS3Cluster` は `deltaLakeCluster` のエイリアスであり、どちらも S3 向けです。` - ## 引数 {#arguments} - `cluster_name` — リモートおよびローカルサーバーへのアドレスや接続パラメータのセットを構成するために使用されるクラスタの名前。 - その他すべての引数の説明は、同等の [deltaLake](sql-reference/table-functions/deltalake.md) テーブル関数における引数の説明と同一です。 - - ## 返される値 {#returned_value} S3 内の指定された Delta Lake テーブルのうち、クラスタからデータを読み取るために指定された構造を持つテーブル。 - - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -55,8 +46,6 @@ S3 内の指定された Delta Lake テーブルのうち、クラスタから - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` になります。 - `_etag` — ファイルの ETag。型: `LowCardinality(String)`。ETag が不明な場合、値は `NULL` になります。 - - ## 関連項目 {#related} - [deltaLake エンジン](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md index 67f998084dc..1e66a60e1aa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md @@ -7,33 +7,24 @@ title: 'dictionary' doc_type: 'reference' --- - - # dictionary テーブル関数 {#dictionary-table-function} [dictionary](../../sql-reference/dictionaries/index.md) のデータを ClickHouse のテーブルとして扱います。[Dictionary](../../engines/table-engines/special/dictionary.md) エンジンと同様に動作します。 - - ## 構文 {#syntax} ```sql dictionary('dict') ``` - ## 引数 {#arguments} - `dict` — 辞書名。[String](../../sql-reference/data-types/string.md) 型。 - - ## 戻り値 {#returned_value} ClickHouse テーブルです。 - - ## 例 {#examples} 入力テーブル `dictionary_source_table`: @@ -67,7 +58,6 @@ SELECT * FROM dictionary('new_dictionary'); └────┴───────┘ ``` - ## 関連 {#related} - [Dictionary エンジン](/engines/table-engines/special/dictionary) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md index 31846a91dad..3d6699bfc03 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md @@ -8,8 +8,6 @@ title: 'executable' doc_type: 'reference' --- - - # UDF 向け executable テーブル関数 {#executable-table-function-for-udfs} `executable` テーブル関数は、行を **stdout** に出力するスクリプト内で定義したユーザー定義関数 (UDF) の出力に基づいてテーブルを作成します。実行可能スクリプトは `users_scripts` ディレクトリに保存され、任意のソースからデータを読み取ることができます。ClickHouse サーバー上に、そのスクリプトを実行するために必要なパッケージがすべてインストールされていることを確認してください。たとえば、それが Python スクリプトの場合は、サーバーに必要な Python パッケージがインストールされていることを確認してください。 @@ -20,8 +18,6 @@ doc_type: 'reference' 通常の UDF と `executable` テーブル関数および `Executable` テーブルエンジンとの大きな利点の違いは、通常の UDF では行数を変更できないという点です。たとえば、入力が 100 行であれば、結果も 100 行を返さなければなりません。`executable` テーブル関数または `Executable` テーブルエンジンを使用する場合、スクリプトは複雑な集約を含め、任意のデータ変換を行うことができます。 ::: - - ## 構文 {#syntax} `executable` テーブル関数は 3 つのパラメーターを必須とし、オプションで入力クエリのリストを引数として受け取ります。 @@ -90,7 +86,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random └────┴────────────┘ ``` - ## 設定 {#settings} - `send_chunk_header` - データのチャンクを処理に送信する前に、行数を送信するかどうかを制御します。デフォルト値は `false` です。 @@ -100,8 +95,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random - `command_read_timeout` - コマンドの stdout からデータを読み取るタイムアウト(ミリ秒)。デフォルト値は 10000 です。 - `command_write_timeout` - コマンドの stdin にデータを書き込むタイムアウト(ミリ秒)。デフォルト値は 10000 です。 - - ## クエリ結果をスクリプトに渡す {#passing-query-results-to-a-script} `Executable` テーブルエンジンの[クエリ結果をスクリプトに渡す方法](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script)の例を必ず参照してください。ここでは、その例と同じスクリプトを `executable` テーブル関数を使って実行する方法を示します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md index f62956cdc3e..c93d7749580 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # file テーブル関数 {#file-table-function} `s3` テーブル関数と同様に、ファイルに対する `SELECT` や `INSERT` をテーブルと同じように扱えるインターフェイスを提供するテーブルエンジンです。ローカルファイルを扱う場合は `file()` を使用し、S3、GCS、MinIO などのオブジェクトストレージ内のバケットを扱う場合は [s3](/sql-reference/table-functions/url.md) のテーブル関数 `s3()` を使用します。 `file` 関数は、`SELECT` および `INSERT` クエリで使用して、ファイルからの読み取りやファイルへの書き込みを行うことができます。 - - ## 構文 {#syntax} ```sql file([path_to_archive ::] path [,format] [,structure] [,compression]) ``` - ## 引数 {#arguments} | パラメータ | 説明 | @@ -36,14 +32,10 @@ file([path_to_archive ::] path [,format] [,structure] [,compression]) | `structure` | テーブルの構造。形式:`'column1_name column1_type, column2_name column2_type, ...'`。 | | `compression` | `SELECT` クエリで使用する場合は既存の圧縮形式、`INSERT` クエリで使用する場合は指定する圧縮形式。サポートされる圧縮形式は `gz`、`br`、`xz`、`zst`、`lz4`、`bz2` です。 | - - ## 戻り値 {#returned_value} ファイル内のデータを読み書きするためのテーブル。 - - ## ファイルへの書き込み例 {#examples-for-writing-to-a-file} ### TSV ファイルへの書き込み {#write-to-a-tsv-file} @@ -56,7 +48,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) その結果、データはファイル `test.tsv` に書き込まれます。 - ```bash # cat /var/lib/clickhouse/user_files/test.tsv {#cat-varlibclickhouseuser_filestesttsv} 1 2 3 @@ -77,18 +68,14 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) その結果、データは次の3つのファイルに書き込まれます:`test_1.tsv`、`test_2.tsv`、`test_3.tsv`。 - ```bash # cat /var/lib/clickhouse/user_files/test_1.tsv {#cat-varlibclickhouseuser_filestest_1tsv} 3 2 1 ``` - # cat /var/lib/clickhouse/user_files/test_2.tsv {#cat-varlibclickhouseuser_filestest_2tsv} 1 3 2 - - # cat /var/lib/clickhouse/user_files/test_3.tsv {#cat-varlibclickhouseuser_filestest_3tsv} 1 2 3 @@ -96,7 +83,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) ``` ``` - ## ファイルから読み込む例 {#examples-for-reading-from-a-file} ### CSV ファイルからの SELECT {#select-from-a-csv-file} @@ -154,7 +140,6 @@ file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32'); SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` - ## パス内のグロブ {#globs-in-path} パスにはグロブを使用できます。ファイルは、接頭辞や接尾辞だけでなく、パターン全体に一致する必要があります。ただし 1 つだけ例外があり、パスが既存のディレクトリを指していて、かつグロブを使用していない場合は、そのディレクトリ内のすべてのファイルが選択されるように、パスの末尾に暗黙的に `*` が追加されます。 @@ -167,8 +152,6 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); `{}` を用いる構文は、[remote](remote.md) および [hdfs](hdfs.md) テーブル関数と同様です。 - - ## 例 {#examples} **例** @@ -228,7 +211,6 @@ SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); ``` - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -236,8 +218,6 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — ファイルサイズ(バイト単位)。型: `Nullable(UInt64)`。ファイルサイズが不明な場合、値は `NULL` です。 - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` です。 - - ## use_hive_partitioning 設定 {#hive-style-partitioning} `use_hive_partitioning` 設定を 1 にすると、ClickHouse はパス(`/name=value/`)内の Hive スタイルのパーティショニングを検出し、クエリ内でパーティション列を仮想列として使用できるようにします。これらの仮想列は、パーティションを表すパス内の名前と同じ名前を持ちますが、先頭に `_` が付きます。 @@ -250,7 +230,6 @@ Hive スタイルのパーティショニングで作成された仮想列を使 SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 設定 {#settings} | Setting | Description | @@ -261,8 +240,6 @@ SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > | [engine_file_skip_empty_files](operations/settings/settings.md#engine_file_skip_empty_files) | 読み取り時に空のファイルをスキップできるようにします。デフォルトでは無効です。 | | [storage_file_read_method](/operations/settings/settings#engine_file_empty_if_not_exists) | ストレージファイルからデータを読み取る方法です。`read`、`pread`、`mmap` のいずれかです(`mmap` は clickhouse-local のみ)。デフォルト値: clickhouse-server では `pread`、clickhouse-local では `mmap`。 | - - ## 関連項目 {#related} - [仮想列](engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md index 63da4545aea..9ef10d6e409 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md @@ -7,8 +7,6 @@ title: 'fileCluster' doc_type: 'reference' --- - - # fileCluster テーブル関数 {#filecluster-table-function} 指定されたパスに一致するファイルを、クラスター内の複数ノードにまたがって同時に処理できるようにします。イニシエータはワーカーノードへの接続を確立し、ファイルパス内のグロブを展開し、ファイル読み取りタスクをワーカーノードに委譲します。各ワーカーノードは、処理すべき次のファイルを取得するためにイニシエータへ問い合わせを行い、すべてのタスクが完了する(すべてのファイルが読み込まれる)までこれを繰り返します。 @@ -18,15 +16,12 @@ doc_type: 'reference' これらのファイルがノード間で異なる場合、戻り値は事前には決定できず、どの順序でワーカーノードがイニシエータへタスクを要求するかに依存します。 ::: - - ## 構文 {#syntax} ```sql fileCluster(cluster_name, path[, format, structure, compression_method]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -37,8 +32,6 @@ fileCluster(cluster_name, path[, format, structure, compression_method]) | `structure` | `'UserID UInt64, Name String'` 形式のテーブル構造。列名と型を決定します。型: [String](../../sql-reference/data-types/string.md)。 | | `compression_method` | 圧縮方式。サポートされる圧縮形式は `gz`、`br`、`xz`、`zst`、`lz4`、`bz2` です。 | - - ## 返される値 {#returned_value} 指定されたフォーマットと構造を持ち、指定されたパスに一致するファイルからのデータを含むテーブルが返されます。 @@ -88,13 +81,10 @@ SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s Str └────┴────────┘ ``` - ## パスのグロブ {#globs-in-path} [File](../../sql-reference/table-functions/file.md#globs-in-path) テーブル関数でサポートされているすべてのパターンは、FileCluster でもサポートされています。 - - ## 関連項目 {#related} - [file テーブル関数](../../sql-reference/table-functions/file.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md index 6eb6ca92cae..d9a1d8212d2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md @@ -7,35 +7,26 @@ title: 'format' doc_type: 'reference' --- - - # format テーブル関数 {#format-table-function} 指定された入力フォーマットに従って、引数からデータをパースします。`structure` 引数が指定されていない場合は、データから自動的に抽出されます。 - - ## 構文 {#syntax} ```sql format(format_name, [structure], data) ``` - ## 引数 {#arguments} - `format_name` — データの[フォーマット](/sql-reference/formats)。 - `structure` - テーブル構造。省略可能。形式は `column1_name column1_type, column2_name column2_type, ...`。 - `data` — 指定したフォーマットのデータを含む文字列を返す文字列リテラルまたは定数式。 - - ## 返される値 {#returned_value} 指定された形式および、指定または抽出された構造に従って `data` 引数を解析した結果を含むテーブル。 - - ## 例 {#examples} `structure` 引数なしの場合: @@ -109,7 +100,6 @@ $$) └───────┴─────┘ ``` - ## 関連項目 {#related} - [フォーマット](../../interfaces/formats.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md index d2ba0f183d8..8eebe35c701 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md @@ -7,21 +7,16 @@ title: 'fuzzJSON' doc_type: 'reference' --- - - # fuzzJSON テーブル関数 {#fuzzjson-table-function} JSON 文字列にランダムな変化を加えて撹乱します。 - - ## 構文 {#syntax} ```sql fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) ``` - ## 引数 {#arguments} | Argument | Description | @@ -41,14 +36,10 @@ fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) | `min_key_length` (UInt64) | キーの最小長。少なくとも 1 である必要があります。 | | `max_key_length` (UInt64) | キーの最大長。指定されている場合は `min_key_length` 以上である必要があります。 | - - ## 戻り値 {#returned_value} 摂動された JSON 文字列を含む単一列のテーブルオブジェクト。 - - ## 使用例 {#usage-example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md index 6314b56c9a5..ab2120d597a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md @@ -7,21 +7,16 @@ title: 'fuzzQuery' doc_type: 'reference' --- - - # fuzzQuery テーブル関数 {#fuzzquery-table-function} 指定されたクエリ文字列にランダムなゆらぎを与えます。 - - ## 構文 {#syntax} ```sql fuzzQuery(query[, max_query_length[, random_seed]]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -30,14 +25,10 @@ fuzzQuery(query[, max_query_length[, random_seed]]) | `max_query_length` | (UInt64) - ファジング処理中にクエリが取り得る最大長。 | | `random_seed` | (UInt64) - 安定した結果を得るために使用する乱数シード。 | - - ## 返される値 {#returned_value} 摂動が加えられたクエリ文字列を 1 列だけ含むテーブルオブジェクト。 - - ## 使用例 {#usage-example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md index dcdd2ff1b6d..232aa97aeae 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md @@ -8,8 +8,6 @@ title: 'gcs' doc_type: 'reference' --- - - # gcs テーブル関数 {#gcs-table-function} [Google Cloud Storage](https://cloud.google.com/storage/) からデータを `SELECT` および `INSERT` するためのテーブル形式のインターフェイスを提供します。[`Storage Object User` IAM ロール](https://cloud.google.com/storage/docs/access-control/iam-roles)の付与が必要です。 @@ -18,8 +16,6 @@ doc_type: 'reference' クラスター内に複数のレプリカがある場合は、代わりに [s3Cluster 関数](../../sql-reference/table-functions/s3Cluster.md)(GCS でも動作します)を使用して、INSERT の実行を並列化できます。 - - ## 構文 {#syntax} ```sql @@ -32,7 +28,6 @@ GCS Table Function は、GCS XML API と HMAC キーを使用して Google Cloud エンドポイントと HMAC の詳細については、[Google interoperability docs](https://cloud.google.com/storage/docs/interoperability) を参照してください。 ::: - ## 引数 {#arguments} | Argument | Description | @@ -65,13 +60,10 @@ and not ~~[https://storage.cloud.google.com](https://storage.cloud.google.com)~~ | `no_sign_request` | デフォルトでは無効。 | | `expiration_window_seconds` | デフォルト値は 120。 | - ## 返される値 {#returned_value} 指定されたファイル内のデータを読み書きするための、指定された構造を持つテーブルです。 - - ## 例 {#examples} GCS ファイル `https://storage.googleapis.com/my-test-bucket-768/data.csv` にあるテーブルから先頭 2 行を選択します: @@ -104,7 +96,6 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` - ## 使用方法 {#usage} GCS 上に、次の URI のファイルが複数存在するとします: @@ -198,7 +189,6 @@ SELECT count(*) FROM gcs(creds, url='https://s3-object-url.csv') ``` - ## パーティション分割書き込み {#partitioned-write} `GCS` テーブルにデータを挿入する際に `PARTITION BY` 式を指定すると、各パーティション値ごとに別々のファイルが作成されます。データを個別のファイルに分割することで、読み取り処理の効率が向上します。 @@ -225,7 +215,6 @@ INSERT INTO TABLE FUNCTION その結果、データはそれぞれ異なるバケット内の 3 つのファイル `my_bucket_1/file.csv`、`my_bucket_10/file.csv`、`my_bucket_20/file.csv` に書き込まれます。 - ## 関連項目 {#related} - [S3 テーブル関数](s3.md) - [S3 エンジン](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md index 0be16285fa1..a5b1da0ea6b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md @@ -7,23 +7,18 @@ title: 'generateRandom' doc_type: 'reference' --- - - # generateRandom テーブル関数 {#generaterandom-table-function} 指定したスキーマでランダムなデータを生成します。 そのデータを使用してテスト用テーブルを埋めることができます。 すべての型がサポートされているわけではありません。 - - ## 構文 {#syntax} ```sql generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -34,14 +29,10 @@ generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_str | `max_string_length` | 生成されるすべての文字列の最大文字数。デフォルトは `10`。 | | `max_array_length` | 生成されるすべての配列またはマップの要素数の最大値。デフォルトは `10`。 | - - ## 戻り値 {#returned_value} 指定されたスキーマを持つテーブルオブジェクト。 - - ## 使用例 {#usage-example} ```sql @@ -89,7 +80,6 @@ SELECT * FROM generateRandom(generateRandomStructure(4, 101), 101) LIMIT 3; SELECT * FROM generateRandom() LIMIT 3; ``` - ```text ┌───c1─┬─────────c2─┬─────────────────────c3─┬──────────────────────c4─┬─c5───────┐ │ -128 │ 317300854 │ 2030-08-16 08:22:20.65 │ 1994-08-16 12:08:56.745 │ R0qgiC46 │ @@ -104,7 +94,6 @@ SELECT * FROM generateRandom() LIMIT 3; SELECT * FROM generateRandom(11) LIMIT 3; ``` - ```text ┌───────────────────────────────────────c1─┬─────────────────────────────────────────────────────────────────────────────c2─┬─────────────────────────────────────────────────────────────────────────────c3─┬─────────c4─┬─────────────────────────────────────────────────────────────────────────────c5─┬──────────────────────c6─┬─c7──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─c8──────────────────────────────────────┬─────────c9─┐ │ -77422512305044606600216318673365695785 │ 636812099959807642229.503817849012019401335326013846687285151335352272727523 │ -34944452809785978175157829109276115789694605299387223845886143311647505037529 │ 544473976 │ 111220388331710079615337037674887514156741572807049614590010583571763691328563 │ 22016.22623506465 │ {'2052-01-31 20:25:33':4306400876908509081044405485378623663,'1993-04-16 15:58:49':164367354809499452887861212674772770279,'2101-08-19 03:07:18':-60676948945963385477105077735447194811,'2039-12-22 22:31:39':-59227773536703059515222628111999932330} │ a7b2:8f58:4d07:6707:4189:80cf:92f5:902d │ 1950-07-14 │ @@ -117,6 +106,5 @@ SELECT * FROM generateRandom(11) LIMIT 3; `max_array_length` を十分に大きくして `generateRandom(generateRandomStructure(), [random seed], max_string_length, max_array_length)` を実行すると、複合型(`Array`、`Tuple`、`Map`、`Nested`)のネストの深さが最大 16 階層まで深くなることがあるため、非常に大きな出力が生成される可能性があります。 ::: - ## 関連コンテンツ {#related-content} - ブログ記事: [ClickHouse でランダムなデータを生成する](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md index ca73f3f638e..2617b8cfb7d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md @@ -10,20 +10,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # hdfs テーブル関数 {#hdfs-table-function} HDFS 上のファイルからテーブルを作成します。このテーブル関数は、[url](../../sql-reference/table-functions/url.md) および [file](../../sql-reference/table-functions/file.md) テーブル関数と同様です。 - - ## 構文 {#syntax} ```sql hdfs(URI, format, structure) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -32,8 +28,6 @@ hdfs(URI, format, structure) | `format` | ファイルの[フォーマット](/sql-reference/formats)。 | | `structure`| テーブルの構造。形式 `'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返り値 {#returned_value} 指定されたファイル内のデータを読み書きするための、指定された構造を持つテーブル。 @@ -55,7 +49,6 @@ LIMIT 2 └─────────┴─────────┴─────────┘ ``` - ## パスでのグロブ {#globs_in_path} パスではグロブパターンを使用できます。ファイルは接頭辞や接尾辞だけでなく、パス全体のパターンに一致している必要があります。 @@ -110,7 +103,6 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -118,8 +110,6 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — ファイルのサイズ(バイト単位)。型: `Nullable(UInt64)`。サイズが不明な場合、値は `NULL`。 - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL`。 - - ## use_hive_partitioning 設定 {#hive-style-partitioning} `use_hive_partitioning` 設定値を 1 にすると、ClickHouse はパス(`/name=value/`)内の Hive スタイルのパーティショニングを検出し、クエリ内でパーティション列を仮想列として利用できるようにします。これらの仮想列の名前は、パーティションパス内の名前と同じですが、先頭に `_` が付きます。 @@ -132,15 +122,12 @@ Hive スタイルのパーティショニングで作成された仮想列を使 SELECT * FROM HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## ストレージ設定 {#storage-settings} - [hdfs_truncate_on_insert](operations/settings/settings.md#hdfs_truncate_on_insert) - 挿入前にファイルを切り詰められるようにします。デフォルトでは無効です。 - [hdfs_create_new_file_on_insert](operations/settings/settings.md#hdfs_create_new_file_on_insert) - フォーマットにサフィックスがある場合、挿入ごとに新しいファイルを作成できるようにします。デフォルトでは無効です。 - [hdfs_skip_empty_files](operations/settings/settings.md#hdfs_skip_empty_files) - 読み取り時に空のファイルをスキップできるようにします。デフォルトでは無効です。 - - ## 関連項目 {#related} - [仮想カラム](../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md index 0dfbd3cf3a4..a9563d14f1b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md @@ -7,21 +7,16 @@ title: 'hdfsCluster' doc_type: 'reference' --- - - # hdfsCluster テーブル関数 {#hdfscluster-table-function} 指定したクラスター内の複数ノードから、HDFS 上のファイルを並列に処理できます。イニシエーターでは、クラスター内のすべてのノードへの接続を確立し、HDFS のファイルパスに含まれるアスタリスクを展開して、各ファイルを動的に振り分けます。ワーカーノードでは、処理すべき次のタスクをイニシエーターに問い合わせ、そのタスクを処理します。これは、すべてのタスクが完了するまで繰り返されます。 - - ## 構文 {#syntax} ```sql hdfsCluster(cluster_name, URI, format, structure) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -31,14 +26,10 @@ hdfsCluster(cluster_name, URI, format, structure) | `format` | ファイルの[フォーマット](/sql-reference/formats)。 | | `structure` | テーブルの構造。形式: `'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返される値 {#returned_value} 指定した構造を持ち、指定したファイル内のデータを読み取るためのテーブル。 - - ## 例 {#examples} 1. `cluster_simple` という名前の ClickHouse クラスターと、HDFS 上に次の URI を持つ複数のファイルがあるとします: @@ -68,7 +59,6 @@ FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TS ファイル一覧に先頭ゼロ付きの数値範囲が含まれている場合は、各桁を個別に波かっこで囲む構文を用いるか、`?` を使用してください。 ::: - ## 関連項目 {#related} - [HDFS エンジン](../../engines/table-engines/integrations/hdfs.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md index 31ddce01b7b..b04114673fe 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md @@ -7,21 +7,16 @@ title: 'hudi' doc_type: 'reference' --- - - # hudi テーブル関数 {#hudi-table-function} Amazon S3 上の Apache [Hudi](https://hudi.apache.org/) テーブルに対して、読み取り専用のテーブルライクなインターフェースを提供します。 - - ## 構文 {#syntax} ```sql hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` - ## 引数 {#arguments} | Argument | Description | @@ -32,14 +27,10 @@ hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,co | `structure` | テーブルの構造。形式は `'column1_name column1_type, column2_name column2_type, ...'`。 | | `compression` | 省略可能なパラメータ。指定可能な値は `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst` です。既定では、圧縮形式はファイル拡張子によって自動検出されます。 | - - ## 返される値 {#returned_value} S3 上の指定した Hudi テーブルのデータを読み取るための、指定した構造を持つテーブル。 - - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -48,8 +39,6 @@ S3 上の指定した Hudi テーブルのデータを読み取るための、 - `_time` — ファイルの最終更新日時。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` です。 - `_etag` — ファイルの ETag。型: `LowCardinality(String)`。ETag が不明な場合、値は `NULL` です。 - - ## 関連項目 {#related} - [Hudi エンジン](/engines/table-engines/integrations/hudi.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md index ec3ffb8aee5..1dc85a7f4cf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md @@ -7,23 +7,18 @@ title: 'hudiCluster テーブル関数' doc_type: 'reference' --- - - # hudiCluster テーブル関数 {#hudicluster-table-function} これは [hudi](sql-reference/table-functions/hudi.md) テーブル関数の拡張機能です。 指定したクラスタ内の多数のノードを使って、Amazon S3 上の Apache [Hudi](https://hudi.apache.org/) テーブル内のファイルを並列処理できます。イニシエータでは、クラスタ内のすべてのノードへの接続を確立し、各ファイルを動的に割り当てます。ワーカーノードでは、次に処理すべきタスクをイニシエータに問い合わせて、そのタスクを処理します。すべてのタスクが完了するまで、これを繰り返します。 - - ## 構文 {#syntax} ```sql hudiCluster(cluster_name, URL[, aws_access_key_id, aws_secret_access_key][, format][, structure][, compression]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -35,14 +30,10 @@ hudiCluster(cluster_name, URL[, aws_access_key_id, aws_secret_access_key][, form | `structure` | テーブルの構造。`'column1_name column1_type, column2_name column2_type, ...'` という形式で指定します。 | | `compression` | 省略可能なパラメータ。サポートされる値は `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst` です。既定では、圧縮形式はファイル拡張子から自動検出されます。 | - - ## 返される値 {#returned_value} S3 上の指定した Hudi テーブルに対し、クラスタからデータを読み取るための、指定した構造を持つテーブル。 - - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -51,8 +42,6 @@ S3 上の指定した Hudi テーブルに対し、クラスタからデータ - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` です。 - `_etag` — ファイルの etag。型: `LowCardinality(String)`。etag が不明な場合、値は `NULL` です。 - - ## 関連項目 {#related} - [Hudi エンジン](engines/table-engines/integrations/hudi.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md index a3baa30cce4..7368f96c67c 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md @@ -1,5 +1,5 @@ --- -description: 'Amazon S3、Azure、HDFS、またはローカルに保存された Apache Iceberg テーブルに対して、読み取り専用のテーブルライクなインターフェイスを提供します。' +description: 'Amazon S3、Azure、HDFS、またはローカルに保存された Apache Iceberg テーブルに対して、読み取り専用のテーブル形式インターフェイスを提供します。' sidebar_label: 'iceberg' sidebar_position: 90 slug: /sql-reference/table-functions/iceberg @@ -7,13 +7,9 @@ title: 'iceberg' doc_type: 'reference' --- - - # iceberg テーブル関数 {#iceberg-table-function} -Amazon S3、Azure、HDFS、またはローカルに保存された Apache [Iceberg](https://iceberg.apache.org/) テーブルに対する読み取り専用のテーブル形式インターフェイスを提供します。 - - +Amazon S3、Azure、HDFS 上またはローカルに保存された Apache [Iceberg](https://iceberg.apache.org/) テーブルを、読み取り専用のテーブルとして扱うためのインターフェイスを提供します。 ## 構文 {#syntax} @@ -34,12 +30,12 @@ icebergLocal(named_collection[, option=value [,..]]) ## 引数 {#arguments} -引数の説明は、それぞれテーブル関数 `s3`、`azureBlobStorage`、`HDFS`、`file` の引数の説明と同様です。\ -`format` は、Iceberg テーブル内のデータファイルのフォーマットを表します。 +引数の説明は、対応するテーブル関数 `s3`、`azureBlobStorage`、`HDFS`、`file` における引数の説明と同様です。 +`format` は、Iceberg テーブル内のデータファイルの形式を表します。 -### 戻り値 {#returned-value} +### 返される値 {#returned-value} -指定された Iceberg テーブル内のデータを読み取るための、指定された構造を持つテーブルです。 +指定した Iceberg テーブルのデータを読み取るための、指定した構造を持つテーブルです。 ### 例 {#example} @@ -48,13 +44,13 @@ SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_tab ``` :::important -ClickHouse は現在、`icebergS3`、`icebergAzure`、`icebergHDFS`、`icebergLocal` テーブル関数および `IcebergS3`、`icebergAzure`、`IcebergHDFS`、`IcebergLocal` テーブルエンジンを通じて、Iceberg フォーマット v1 および v2 の読み取りをサポートしています。 +ClickHouse は現在、`icebergS3`、`icebergAzure`、`icebergHDFS`、`icebergLocal` テーブル関数および `IcebergS3`、`icebergAzure`、`IcebergHDFS`、`IcebergLocal` テーブルエンジンを介して、Iceberg フォーマット v1 および v2 の読み取りをサポートしています。 ::: ## 名前付きコレクションの定義 {#defining-a-named-collection} -URL および認証情報を保存するための名前付きコレクションを設定する例を次に示します。 +URL と認証情報を保存するための名前付きコレクションを構成する例を次に示します。 ```xml @@ -76,105 +72,143 @@ DESCRIBE icebergS3(iceberg_conf, filename = 'test_table') ``` -## スキーマ進化 {#schema-evolution} +## データカタログの使用 {#iceberg-writes-catalogs} -現時点では、CH の機能により、時間の経過とともにスキーマが変更された Iceberg テーブルを読み取ることができます。現在、列の追加・削除や列順の変更が行われたテーブルの読み取りをサポートしています。また、値が必須だった列を、NULL を許容する列に変更することもできます。加えて、単純型に対する許可された型変換もサポートしており、具体的には次のとおりです。   +Iceberg テーブルは、[REST Catalog](https://iceberg.apache.org/rest-catalog-spec/)、[AWS Glue Data Catalog](https://docs.aws.amazon.com/prescriptive-guidance/latest/serverless-etl-aws-glue/aws-glue-data-catalog.html)、[Unity Catalog](https://www.unitycatalog.io/) など、さまざまなデータカタログと併用できます。 -* int -> long -* float -> double -* decimal(P, S) -> decimal(P', S) ここで P' > P。 +:::important +カタログを使用する場合、ほとんどのユーザーは `DataLakeCatalog` データベースエンジンを使用することになるでしょう。これは ClickHouse をカタログに接続し、テーブルを検出できるようにします。このデータベースエンジンを使用すれば、`IcebergS3` テーブルエンジンで個々のテーブルを手動で作成する必要がなくなります。 +::: -現時点では、ネストされた構造や、配列および Map 内の要素型を変更することはできません。 +これらのカタログを使用するには、`IcebergS3` エンジンでテーブルを作成し、必要な設定を指定します。 +たとえば、MinIO ストレージと REST Catalog を使用する場合は次のとおりです。 +```sql +CREATE TABLE `database_name.table_name` +ENGINE = IcebergS3( + 'http://minio:9000/warehouse-rest/table_name/', + 'minio_access_key', + 'minio_secret_key' +) +SETTINGS + storage_catalog_type="rest", + storage_warehouse="demo", + object_storage_endpoint="http://minio:9000/warehouse-rest", + storage_region="us-east-1", + storage_catalog_url="http://rest:8181/v1" +``` -## パーティションプルーニング {#partition-pruning} +または、S3 と併用して AWS Glue Data Catalog を使う場合: -ClickHouse は Iceberg テーブルに対する SELECT クエリ実行時のパーティションプルーニングをサポートしており、不要なデータファイルをスキップすることでクエリ パフォーマンスを最適化できます。パーティションプルーニングを有効にするには、`use_iceberg_partition_pruning = 1` を設定します。Iceberg のパーティションプルーニングの詳細については、https://iceberg.apache.org/spec/#partitioning を参照してください。 +```sql +CREATE TABLE `my_database.my_table` +ENGINE = IcebergS3( + 's3://my-data-bucket/warehouse/my_database/my_table/', + 'aws_access_key', + 'aws_secret_key' +) +SETTINGS + storage_catalog_type = 'glue', + storage_warehouse = 'my_database', + object_storage_endpoint = 's3://my-data-bucket/', + storage_region = 'us-east-1', + storage_catalog_url = 'https://glue.us-east-1.amazonaws.com/iceberg/v1' +``` +## スキーマの進化 {#schema-evolution} -## タイムトラベル {#time-travel} +現時点では、CH を利用することで、時間の経過とともにスキーマが変更された iceberg テーブルを読み込むことができます。現在、カラムの追加・削除やカラム順の変更が行われたテーブルの読み取りをサポートしています。また、値が必須だったカラムを、NULL を許可するカラムに変更することもできます。さらに、単純な型に対する許可された型キャストもサポートしており、具体的には次のとおりです。   -ClickHouse は Iceberg テーブルに対するタイムトラベル機能をサポートしており、特定のタイムスタンプまたはスナップショット ID を指定して履歴データをクエリできます。 +* int -> long +* float -> double +* decimal(P, S) -> decimal(P', S) (P' > P の場合) + +現在のところ、ネストされた構造や、配列およびマップ内の要素の型を変更することはできません。 + +## パーティションプルーニング {#partition-pruning} + +ClickHouse は Iceberg テーブルに対する SELECT クエリでパーティションプルーニングをサポートしており、不要なデータファイルをスキップすることでクエリパフォーマンスを最適化できます。パーティションプルーニングを有効にするには、`use_iceberg_partition_pruning = 1` に設定します。Iceberg のパーティションプルーニングの詳細については、https://iceberg.apache.org/spec/#partitioning を参照してください。 +## タイムトラベル {#time-travel} +ClickHouse は Iceberg テーブルに対するタイムトラベルをサポートしており、特定のタイムスタンプまたはスナップショット ID を指定して過去のデータをクエリできます。 -## 削除行を含むテーブルの処理 {#deleted-rows} +## 削除済み行を含むテーブルの処理 {#deleted-rows} -現在、[position deletes](https://iceberg.apache.org/spec/#position-delete-files) を使用する Iceberg テーブルのみがサポートされています。 +現在サポートされているのは、[position deletes](https://iceberg.apache.org/spec/#position-delete-files) を使用する Iceberg テーブルのみです。 -次の削除方式は **サポートされていません**。 +次の削除方法は**サポートされていません**: -* [Equality deletes](https://iceberg.apache.org/spec/#equality-delete-files) -* [Deletion vectors](https://iceberg.apache.org/spec/#deletion-vectors)(v3 で導入) +- [Equality deletes](https://iceberg.apache.org/spec/#equality-delete-files) +- [Deletion vectors](https://iceberg.apache.org/spec/#deletion-vectors)(v3 で導入) ### 基本的な使い方 {#basic-usage} ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_timestamp_ms = 1714636800000 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_timestamp_ms = 1714636800000 ``` ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_snapshot_id = 3547395809148285433 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_snapshot_id = 3547395809148285433 ``` -注意: 同じクエリ内で `iceberg_timestamp_ms` パラメータと `iceberg_snapshot_id` パラメータの両方を指定することはできません。 +注記: 同じクエリ内で `iceberg_timestamp_ms` パラメータと `iceberg_snapshot_id` パラメータを同時に指定することはできません。 -### 重要な考慮事項 {#important-considerations} -* **スナップショット** は通常、次のタイミングで作成されます。 +### 重要な考慮事項 {#important-considerations} +* **スナップショット** は通常、次のような場合に作成されます: * 新しいデータがテーブルに書き込まれたとき +* 何らかのデータコンパクションが実行されたとき -* 何らかのデータのコンパクション処理が実行されたとき - -* **スキーマ変更では通常スナップショットは作成されません** — このため、スキーマ進化が行われたテーブルでタイムトラベルを使用する場合に重要な挙動となります。 +* **スキーマ変更によってスナップショットが作成されることは通常ありません** - これは、スキーマ進化が行われたテーブルでタイムトラベルを使用する際の重要な挙動につながります。 -### 例シナリオ {#example-scenarios} +### サンプルシナリオ {#example-scenarios} -すべてのシナリオは Spark で記述されています。これは、CH がまだ Iceberg テーブルへの書き込みをサポートしていないためです。 +CH はまだ Iceberg テーブルへの書き込みをサポートしていないため、すべてのシナリオは Spark で記述されています。 #### シナリオ 1: 新しいスナップショットを伴わないスキーマ変更 {#scenario-1} -次の一連の操作を考えてみましょう: +次の一連の操作を考えてみます。 ```sql --- 2つのカラムを持つテーブルを作成 - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( - order_number bigint, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2') - --- テーブルにデータを挿入 - INSERT INTO spark_catalog.db.time_travel_example VALUES - (1, 'Mars') + -- 2つのカラムを持つテーブルを作成 + CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( + order_number bigint, + product_code string + ) + USING iceberg + OPTIONS ('format-version'='2') - ts1 = now() // 疑似コードの一部 +- - テーブルにデータを挿入 + INSERT INTO spark_catalog.db.time_travel_example VALUES + (1, 'Mars') --- 新しいカラムを追加するためにテーブルを変更 - ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + ts1 = now() // 疑似コードの一部 - ts2 = now() +- - 新しいカラムを追加するためにテーブルを変更 + ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + + ts2 = now() --- テーブルにデータを挿入 - INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) +- - テーブルにデータを挿入 + INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) - ts3 = now() + ts3 = now() --- 各タイムスタンプでテーブルをクエリ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +- - 各タイムスタンプでテーブルをクエリ + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +------------+------------+ |order_number|product_code| +------------+------------+ | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; +------------+------------+ |order_number|product_code| @@ -182,7 +216,7 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; +------------+------------+-----+ |order_number|product_code|price| @@ -194,15 +228,16 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 異なるタイムスタンプにおけるクエリ結果: -* ts1 と ts2 の場合: 元の 2 列のみが表示される -* ts3 の場合: 3 列すべてが表示され、1 行目の price は NULL になる +* ts1 および ts2 では: 元の 2 つのカラムのみが表示される +* ts3 では: 3 つすべてのカラムが表示され、1 行目の価格は NULL になる + -#### シナリオ 2: 過去と現在のスキーマの違い {#scenario-2} +#### シナリオ 2: 過去と現在のスキーマの差異 {#scenario-2} -現在時点で実行したタイムトラベルクエリでは、現在のテーブルとは異なるスキーマが表示される場合があります: +現在時点でタイムトラベルクエリを実行すると、テーブルの現在のスキーマとは異なるスキーマが表示される場合があります。 ```sql --- テーブルを作成 +-- テーブルを作成する CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example_2 ( order_number bigint, product_code string @@ -210,15 +245,15 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 USING iceberg OPTIONS ('format-version'='2') --- テーブルに初期データを挿入 +-- テーブルに初期データを挿入する INSERT INTO spark_catalog.db.time_travel_example_2 VALUES (2, 'Venus'); --- テーブルを変更して新しい列を追加 +-- 新しいカラムを追加するためにテーブルを変更する ALTER TABLE spark_catalog.db.time_travel_example_2 ADD COLUMN (price double); ts = now(); --- タイムスタンプ構文を使用して現在の時点でテーブルをクエリ +-- タイムスタンプ構文を使用して現在の時点でテーブルをクエリする SELECT * FROM spark_catalog.db.time_travel_example_2 TIMESTAMP AS OF ts; @@ -228,7 +263,7 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 | 2| Venus| +------------+------------+ --- 現在の時点でテーブルをクエリ +-- 現在の時点でテーブルをクエリする SELECT * FROM spark_catalog.db.time_travel_example_2; +------------+------------+-----+ |order_number|product_code|price| @@ -237,12 +272,12 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 +------------+------------+-----+ ``` -これは、`ALTER TABLE` が新しいスナップショットを作成しない一方で、Spark は現在のテーブルに対して、スナップショットではなく最新のメタデータファイルから `schema_id` の値を取得するために発生します。 +これは、`ALTER TABLE` は新しいスナップショットを作成しない一方で、Spark が現在のテーブルについてスナップショットではなく最新のメタデータファイルから `schema_id` の値を取得するために発生します。 -#### シナリオ 3: 過去と現在のスキーマの差異 {#scenario-3} +#### シナリオ 3: 過去のスキーマと現在のスキーマの違い {#scenario-3} -2 つ目の制約は、タイムトラベルを行っても、テーブルにまだ一切データが書き込まれていない時点の状態は取得できないという点です。 +2つ目の制約は、タイムトラベルを行う際、テーブルに一切データが書き込まれる前の状態は取得できないという点です。 ```sql -- テーブルを作成 @@ -259,63 +294,59 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- エラーで終了:ts より古いスナップショットが見つかりません。 ``` -ClickHouse における動作は Spark と同様です。Spark の SELECT クエリを ClickHouse の SELECT クエリに置き換えるイメージで考えれば、同じように動作します。 +ClickHouse における挙動は Spark と同様です。頭の中で Spark の SELECT クエリを ClickHouse の SELECT クエリに置き換えて考えれば、同じように動作します。 ## メタデータファイルの解決 {#metadata-file-resolution} -ClickHouse で `iceberg` テーブル関数を使用する場合、システムは Iceberg テーブル構造を記述する正しい metadata.json ファイルを特定する必要があります。ここでは、この解決処理がどのように行われるかを説明します。 +ClickHouse で `iceberg` テーブル関数を使用する場合、Iceberg テーブルの構造を記述している正しい metadata.json ファイルを特定する必要があります。ここでは、この解決プロセスがどのように行われるかを説明します。 -### 候補検索(優先順位順) {#candidate-search} +### 候補検索(優先順) {#candidate-search} -1. **パスの直接指定**: - *`iceberg_metadata_file_path` を設定した場合、システムは Iceberg テーブルディレクトリパスと結合して、このパスをそのまま使用します。 +1. **直接パス指定**: +* `iceberg_metadata_file_path` を設定した場合、システムは Iceberg テーブルディレクトリパスと組み合わせて、このパスをそのまま使用します。 -* この設定が指定されている場合、他のすべての解決用設定は無視されます。 +* この設定が指定されている場合、他のすべての解決用の設定は無視されます。 -2. **テーブル UUID の一致**: - *`iceberg_metadata_table_uuid` が指定されている場合、システムは次のように動作します: - *`metadata` ディレクトリ内の `.metadata.json` ファイルのみを対象とする - *指定した UUID(大文字小文字を区別しない)と一致する `table-uuid` フィールドを含むファイルだけを残すようにフィルタリングする +2. **テーブル UUID の照合**: +* `iceberg_metadata_table_uuid` が指定されている場合、システムは次のように動作します: + * `metadata` ディレクトリ内の `.metadata.json` ファイルのみを対象とします。 + * 指定された UUID と一致する `table-uuid` フィールドを含むファイル(大文字/小文字は区別しない)でフィルタリングします。 3. **デフォルト検索**: - *上記いずれの設定も指定されていない場合、`metadata` ディレクトリ内のすべての `.metadata.json` ファイルが候補となります +* 上記いずれの設定も指定されていない場合、`metadata` ディレクトリ内のすべての `.metadata.json` ファイルが候補となります。 -### 最新ファイルの選択 {#most-recent-file} +### 最新のファイルの選択 {#most-recent-file} -上記のルールで候補ファイルを特定した後、システムはどのファイルが最も新しいかを判断します。 +上記のルールで候補ファイルを特定した後、システムは次のようにして最も新しいファイルを判定します。 -* `iceberg_recent_metadata_file_by_last_updated_ms_field` が有効な場合: +* `iceberg_recent_metadata_file_by_last_updated_ms_field` が有効な場合: * `last-updated-ms` の値が最大のファイルが選択されます -* それ以外の場合: +* それ以外の場合: * バージョン番号が最も大きいファイルが選択されます -* (バージョンは、`V.metadata.json` または `V-uuid.metadata.json` の形式のファイル名における `V` として現れます) +* (バージョンは、`V.metadata.json` または `V-uuid.metadata.json` という形式のファイル名内の `V` として表されます) -**注記**: ここで言及している設定はすべてテーブル関数の設定(グローバル設定やクエリレベル設定ではありません)であり、以下に示すように指定する必要があります。 +**注記**: ここで言及している設定はすべてテーブル関数の設定(グローバル設定やクエリレベルの設定ではない)であり、以下に示すように指定する必要があります。 ```sql SELECT * FROM iceberg('s3://bucket/path/to/iceberg_table', SETTINGS iceberg_metadata_table_uuid = 'a90eed4c-f74b-4e5b-b630-096fb9d09021'); ``` -**注意**: 通常は Iceberg カタログがメタデータの解決を行いますが、ClickHouse の `iceberg` テーブル関数は S3 に保存されたファイルを Iceberg テーブルとして直接解釈します。そのため、これらの解決ルールを理解しておくことが重要です。 +**補足**: 通常は Iceberg カタログ側でメタデータの解決が行われますが、ClickHouse の `iceberg` テーブル関数は S3 に保存されたファイルを直接 Iceberg テーブルとして解釈します。そのため、これらの解決ルールを理解しておくことが重要です。 ## メタデータキャッシュ {#metadata-cache} -`Iceberg` テーブルエンジンおよびテーブル関数は、マニフェストファイル、マニフェストリスト、メタデータ JSON の情報を格納するメタデータキャッシュをサポートします。キャッシュはメモリ上に保存されます。この機能は `use_iceberg_metadata_files_cache` 設定で制御されており、デフォルトで有効になっています。 - - - -## 別名 {#aliases} - -テーブル関数 `iceberg` は、現在 `icebergS3` のエイリアスです。 +`Iceberg` テーブルエンジンおよびテーブル関数は、マニフェストファイル、マニフェストリスト、メタデータ JSON の情報を保持するメタデータキャッシュに対応しています。キャッシュはメモリ上に保存されます。この機能は `use_iceberg_metadata_files_cache` によって制御されており、デフォルトで有効です。 +## エイリアス {#aliases} +現在、テーブル関数 `iceberg` は `icebergS3` のエイリアスになっています。 ## 仮想カラム {#virtual-columns} @@ -325,22 +356,21 @@ SELECT * FROM iceberg('s3://bucket/path/to/iceberg_table', - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` になります。 - `_etag` — ファイルの ETag。型: `LowCardinality(String)`。ETag が不明な場合、値は `NULL` になります。 +## iceberg テーブルへの書き込み {#writes-into-iceberg-table} +バージョン 25.7 以降、ClickHouse はユーザーの Iceberg テーブルに対する変更をサポートします。 -## Iceberg テーブルへの書き込み {#writes-into-iceberg-table} - -バージョン 25.7 から、ClickHouse はユーザーの Iceberg テーブルの変更をサポートしています。 - -現在、これは実験的な機能のため、まず有効化する必要があります。 +これは現在実験的な機能のため、まず明示的に有効化する必要があります。 ```sql SET allow_experimental_insert_into_iceberg = 1; ``` + ### テーブルの作成 {#create-iceberg-table} -空の Iceberg テーブルを新規作成するには、読み取り時と同じコマンドを使用しつつ、スキーマを明示的に指定します。 -書き込み処理は、Parquet、Avro、ORC など、Iceberg 仕様で定義されているすべてのデータ形式をサポートします。 +空の独自の Iceberg テーブルを作成するには、読み取り時に使用したものと同じコマンドを利用しつつ、スキーマを明示的に指定します。 +書き込み処理では、Parquet、Avro、ORC など、Iceberg 仕様で定義されているすべてのデータ形式をサポートします。 ### 例 {#example-iceberg-writes-create} @@ -353,12 +383,13 @@ CREATE TABLE iceberg_writes_example ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') ``` -注記: バージョンヒントファイルを作成するには、`iceberg_use_version_hint` 設定を有効にします。 -metadata.json ファイルを圧縮する場合は、`iceberg_metadata_compression_method` 設定でコーデック名を指定します。 +注意: version hint ファイルを作成するには、`iceberg_use_version_hint` 設定を有効にします。 +metadata.json ファイルを圧縮する場合は、`iceberg_metadata_compression_method` 設定で使用するコーデック名を指定します。 + ### INSERT {#writes-inserts} -新しいテーブルを作成した後は、通常の ClickHouse 構文を使用してデータを挿入できます。 +新しいテーブルを作成した後、通常の ClickHouse 構文を使用してデータを挿入できます。 ### 例 {#example-iceberg-writes-insert} @@ -380,16 +411,17 @@ x: Ivanov y: 993 ``` + ### DELETE {#iceberg-writes-delete} -merge-on-read 形式で不要な行を削除することも、ClickHouse でサポートされています。 +merge-on-read フォーマットで不要な行を削除することも、ClickHouse でサポートされています。 このクエリは、position delete ファイルを含む新しいスナップショットを作成します。 -注意: 将来、他の Iceberg エンジン(Spark など)でテーブルを読み込みたい場合は、設定項目 `output_format_parquet_use_custom_encoder` と `output_format_parquet_parallel_encoding` を無効にする必要があります。 -これは、Spark が Parquet のフィールド ID によってこれらのファイルを読み込む一方で、これらのフラグが有効な場合、ClickHouse は現在フィールド ID の書き込みをサポートしていないためです。 +注記: 将来、他の Iceberg エンジン(Spark など)でテーブルを読み取りたい場合は、`output_format_parquet_use_custom_encoder` と `output_format_parquet_parallel_encoding` の設定を無効にする必要があります。 +これは、Spark は Parquet の field-id によってこれらのファイルを読み取る一方で、ClickHouse はこれらのフラグが有効な場合、field-id の書き込みを現在サポートしていないためです。 この挙動は将来的に修正する予定です。 -### Example {#example-iceberg-writes-delete} +### 例 {#example-iceberg-writes-delete} ```sql ALTER TABLE iceberg_writes_example DELETE WHERE x != 'Ivanov'; @@ -404,9 +436,10 @@ x: Ivanov y: 993 ``` + ### スキーマの進化 {#iceberg-writes-schema-evolution} -ClickHouse では、単純な型(タプル型・配列型・マップ型以外)のカラムを追加・削除・変更できます。 +ClickHouse では、単純な型(タプル型、配列型、マップ型以外)のカラムを追加、削除、または変更できます。 ### 例 {#example-iceberg-writes-evolution} @@ -440,42 +473,39 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -Row 1: +行 1: ────── x: Ivanov y: 993 z: ᴺᵁᴸᴸ -``` - -ALTER TABLE iceberg_writes_example DROP COLUMN z; -SHOW CREATE TABLE iceberg_writes_example; -┌─statement─────────────────────────────────────────────────┐ - -1. │ CREATE TABLE default.iceberg_writes_example ↴│ +ALTER TABLE iceberg_writes_example DROP COLUMN z; +SHOW CREATE TABLE iceberg_writes_example; + ┌─statement─────────────────────────────────────────────────┐ +1. │ CREATE TABLE default.iceberg_writes_example ↴│ │↳( ↴│ │↳ `x` Nullable(String), ↴│ │↳ `y` Nullable(Int64) ↴│ │↳) ↴│ - │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ + │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ └───────────────────────────────────────────────────────────┘ SELECT * -FROM iceberg_writes_example +FROM iceberg_writes_example FORMAT VERTICAL; -Row 1: +行 1: ────── x: Ivanov y: 993 +``` -```` -### コンパクション {#iceberg-writes-compaction} +### 圧縮処理(コンパクション) {#iceberg-writes-compaction} -ClickHouseはIcebergテーブルのコンパクションをサポートしています。現在、メタデータの更新と同時に、位置削除ファイルをデータファイルにマージできます。以前のスナップショットIDとタイムスタンプは変更されないため、タイムトラベル機能は同じ値で引き続き使用可能です。 +ClickHouse は Iceberg テーブルのコンパクションをサポートしています。現在は、メタデータを更新しつつ、position delete ファイルをデータファイルにマージできます。以前のスナップショット ID とタイムスタンプは変更されないため、タイムトラベル機能は同じ値で引き続き使用できます。 -使用方法: +使用方法: ```sql SET allow_experimental_iceberg_compaction = 1 @@ -486,25 +516,14 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -Row 1: +行 1: ────── x: Ivanov y: 993 -```` - - -## カタログを利用するテーブル {#iceberg-writes-catalogs} - -前述のすべての書き込み機能は、REST カタログおよび Glue カタログでも利用できます。 -これらを使用するには、`IcebergS3` エンジンでテーブルを作成し、必要な設定を指定します。 - -```sql -CREATE TABLE `database_name.table_name` ENGINE = IcebergS3('http://minio:9000/warehouse-rest/table_name/', 'minio_access_key', 'minio_secret_key') -SETTINGS storage_catalog_type="rest", storage_warehouse="demo", object_storage_endpoint="http://minio:9000/warehouse-rest", storage_region="us-east-1", storage_catalog_url="http://rest:8181/v1", ``` ## 関連項目 {#see-also} * [Iceberg エンジン](/engines/table-engines/integrations/iceberg.md) -* [Iceberg クラスター テーブル関数](/sql-reference/table-functions/icebergCluster.md) +* [Iceberg クラスターテーブル関数](/sql-reference/table-functions/icebergCluster.md) \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md index c7b7210d15c..6715a64fac4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md @@ -7,16 +7,12 @@ title: 'icebergCluster' doc_type: 'reference' --- - - # icebergCluster テーブル関数 {#icebergcluster-table-function} これは、[iceberg](/sql-reference/table-functions/iceberg.md) テーブル関数の拡張です。 指定されたクラスター内の複数のノードから Apache [Iceberg](https://iceberg.apache.org/) のファイルを並列処理できるようにします。イニシエーターはクラスター内のすべてのノードに接続し、各ファイルを動的に割り当てます。ワーカー ノードは、処理すべき次のタスクをイニシエーターに問い合わせてから、それを処理します。これは、すべてのタスクが完了するまで繰り返されます。 - - ## 構文 {#syntax} ```sql @@ -30,7 +26,6 @@ icebergHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## 引数 {#arguments} * `cluster_name` — リモートおよびローカルサーバーへのアドレスと接続パラメータの集合を構成するために使用されるクラスター名。 @@ -46,7 +41,6 @@ icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) SELECT * FROM icebergS3Cluster('cluster_simple', 'http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test') ``` - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md index 03035b74a0a..4adcc153c00 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md @@ -5,12 +5,8 @@ title: 'loop' doc_type: 'reference' --- - - # loop テーブル関数 {#loop-table-function} - - ## 構文 {#syntax} ```sql @@ -20,7 +16,6 @@ SELECT ... FROM loop(table); SELECT ... FROM loop(other_table_function(...)); ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -29,14 +24,10 @@ SELECT ... FROM loop(other_table_function(...)); | `table` | テーブル名。 | | `other_table_function(...)` | 他のテーブル関数。この場合の例: `SELECT * FROM loop(numbers(10));` において、`other_table_function(...)` は `numbers(10)`。 | - - ## 戻り値 {#returned_values} クエリ結果を返し続ける無限ループ。 - - ## 例 {#examples} ClickHouse からデータを取得する: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md index 8b2553564cb..d42f0011b09 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md @@ -7,20 +7,14 @@ title: 'merge' doc_type: 'reference' --- - - # merge テーブル関数 {#merge-table-function} 一時的な [Merge](../../engines/table-engines/special/merge.md) テーブルを作成します。 テーブルのスキーマは、元となるテーブルの列の和集合と、共通の型を導出することで定義されます。 [Merge](../../engines/table-engines/special/merge.md) テーブルエンジンと同じ仮想列を使用できます。 - - ## 構文 {#syntax} - - ```sql merge(['db_name',] 'tables_regexp') ``` @@ -32,7 +26,6 @@ merge(['db_name',] 'tables_regexp') | `db_name` | 指定可能な値(省略可能。既定値は `currentDatabase()`):
- データベース名、
- データベース名の文字列を返す定数式(例: `currentDatabase()`)、
- `REGEXP(expression)`。ここで `expression` は DB 名にマッチする正規表現。 | | `tables_regexp` | 指定された 1 つまたは複数の DB 内のテーブル名にマッチする正規表現。 | - ## 関連項目 {#related} - [Merge](../../engines/table-engines/special/merge.md) テーブルエンジン diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md index 44757a5748d..c3c1a743153 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md @@ -8,21 +8,16 @@ title: 'mergeTreeIndex' doc_type: 'reference' --- - - # mergeTreeIndex テーブル関数 {#mergetreeindex-table-function} MergeTree テーブルのインデックスおよびマークファイルの内容を表します。内部状態を調査する目的で使用できます。 - - ## 構文 {#syntax} ```sql mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -32,8 +27,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) | `with_marks` | 結果にマークを含むカラムを含めるかどうか。 | | `with_minmax` | 結果に min-max インデックスを含めるかどうか。 | - - ## 返される値 {#returned_value} 次の列を持つテーブルオブジェクトです。ソーステーブルのプライマリインデックスおよび min-max インデックス(有効な場合)の値を持つ列、ソーステーブルのデータパーツ内の存在しうるすべてのファイルに対するマーク(有効な場合)の値を持つ列、さらに仮想列から構成されます。 @@ -44,8 +37,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) Marks 列には、データパーツに対象の列が存在しない場合、またはそのサブストリームのいずれかについてマークが書き込まれていない場合(例:コンパクトパーツ)に、`(NULL, NULL)` の値が含まれることがあります。 - - ## 使用例 {#usage-example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md index 919d2e5ebbd..2c8455eace0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md @@ -8,21 +8,16 @@ title: 'mergeTreeProjection' doc_type: 'reference' --- - - # mergeTreeProjection テーブル関数 {#mergetreeprojection-table-function} MergeTree テーブル内の特定のプロジェクションの内容を表します。内部の状態を調査・確認するために使用できます。 - - ## 構文 {#syntax} ```sql mergeTreeProjection(database, table, projection) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -31,14 +26,10 @@ mergeTreeProjection(database, table, projection) | `table` | プロジェクションを読み取るテーブル名。 | | `projection` | 読み取るプロジェクション。 | - - ## 返される値 {#returned_value} 指定されたプロジェクションで定義された列を持つテーブルオブジェクト。 - - ## 使用例 {#usage-example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md index 733284de514..9f6e1119f32 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md @@ -7,21 +7,16 @@ title: 'mongodb' doc_type: 'reference' --- - - # mongodb テーブル関数 {#mongodb-table-function} リモートの MongoDB サーバーに保存されているデータに対して `SELECT` クエリを実行できるようにします。 - - ## 構文 {#syntax} ```sql mongodb(host:port, database, collection, user, password, structure[, options[, oid_columns]]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -57,13 +52,10 @@ mongodb(uri, collection, structure[, oid_columns]) | `structure` | この関数から返される ClickHouse テーブルのスキーマ。 | | `oid_columns` | WHERE 句で `oid` として扱う列をカンマ区切りで指定したリスト。デフォルトは `_id`。 | - ## 返される値 {#returned_value} 元の MongoDB テーブルと同じ列を持つテーブルオブジェクトです。 - - ## 例 {#examples} MongoDB データベース `test` に `my_collection` という名前のコレクションが定義されており、そこにいくつかのドキュメントを挿入するとします。 @@ -106,7 +98,6 @@ SELECT * FROM mongodb( ) ``` - ## 関連項目 {#related} - [`MongoDB` テーブルエンジン](engines/table-engines/integrations/mongodb.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md index 914054746d8..055c71fa27d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md @@ -7,21 +7,16 @@ title: 'mysql' doc_type: 'reference' --- - - # mysql テーブル関数 {#mysql-table-function} リモートの MySQL サーバー上のデータに対して、`SELECT` および `INSERT` クエリを実行できるようにします。 - - ## 構文 {#syntax} ```sql mysql({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]}) ``` - ## 引数 {#arguments} | Argument | Description | @@ -52,7 +47,6 @@ SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'us SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` - ## 返される値 {#returned_value} 元の MySQL テーブルと同じカラムを持つテーブルオブジェクト。 @@ -65,8 +59,6 @@ MySQL の一部のデータ型は、異なる ClickHouse の型にマッピン `INSERT` クエリでは、テーブル関数 `mysql(...)` と、カラム名リスト付きのテーブル名を区別するために、キーワード `FUNCTION` または `TABLE FUNCTION` を必ず使用する必要があります。以下の例を参照してください。 ::: - - ## 例 {#examples} MySQL のテーブル: @@ -150,7 +142,6 @@ SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password') WHERE id > (SELECT max(id) FROM mysql_copy); ``` - ## 関連項目 {#related} - [MySQL テーブルエンジン](../../engines/table-engines/integrations/mysql.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md index 85e7c867785..00848a14ec7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md @@ -7,33 +7,24 @@ title: 'null' doc_type: 'reference' --- - - # null テーブル関数 {#null-table-function} 指定した構造を持つ一時テーブルを、[Null](../../engines/table-engines/special/null.md) テーブルエンジンで作成します。`Null` エンジンの特性に従い、テーブルのデータは破棄され、クエリの実行が完了すると同時にテーブル自体も即座に削除されます。この関数は、テストやデモンストレーション用のクエリを作成しやすくするために使用されます。 - - ## 構文 {#syntax} ```sql null('structure') ``` - ## 引数 {#argument} - `structure` — 列とその型のリスト。[String](../../sql-reference/data-types/string.md)。 - - ## 返される値 {#returned_value} 指定された構造を持つ一時的な `Null` エンジンのテーブル。 - - ## 例 {#example} `null` 関数を使用したクエリ: @@ -50,7 +41,6 @@ INSERT INTO t SELECT * FROM numbers_mt(1000000000); DROP TABLE IF EXISTS t; ``` - ## 関連項目 {#related} - [Null テーブルエンジン](../../engines/table-engines/special/null.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md index 12c74b83b16..1da9daf2adf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md @@ -7,14 +7,10 @@ title: 'odbc' doc_type: 'reference' --- - - # odbc テーブル関数 {#odbc-table-function} [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity) 経由で接続されたテーブルを返します。 - - ## 構文 {#syntax} ```sql @@ -23,7 +19,6 @@ odbc(データソース, 外部テーブル) odbc(名前付きコレクション) ``` - ## 引数 {#arguments} | Argument | Description | @@ -38,8 +33,6 @@ ODBC 接続を安全に実装するために、ClickHouse は別プログラム 外部テーブルのうち値が `NULL` のフィールドは、基になるデータ型のデフォルト値に変換されます。たとえば、リモートの MySQL テーブルフィールドが `INT NULL` 型の場合、0(ClickHouse の `Int32` データ型におけるデフォルト値)に変換されます。 - - ## 使用例 {#usage-example} **ODBC を介してローカルの MySQL インストールからデータを取得する** @@ -117,7 +110,6 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') └────────┴──────────────┴───────┴────────────────┘ ``` - ## 関連項目 {#see-also} - [ODBC 辞書](/sql-reference/dictionaries#dbms) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md index cfb37a2cbfe..4aeac952e79 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # paimon テーブル関数 {#paimon-table-function} Amazon S3、Azure、HDFS、またはローカルに保存された Apache [Paimon](https://paimon.apache.org/) テーブルに対して、読み取り専用のテーブルライクなインターフェースを提供します。 - - ## 構文 {#syntax} ```sql @@ -32,7 +29,6 @@ paimonHDFS(path_to_table, [,format] [,compression_method]) paimonLocal(path_to_table, [,format] [,compression_method]) ``` - ## 引数 {#arguments} 引数の説明は、それぞれのテーブル関数 `s3`、`azureBlobStorage`、`HDFS`、`file` における引数の説明と同一です。 @@ -42,8 +38,6 @@ paimonLocal(path_to_table, [,format] [,compression_method]) 指定された Paimon テーブルからデータを読み取るための、指定された構造を持つテーブルが返されます。 - - ## 名前付きコレクションの定義 {#defining-a-named-collection} 次の例は、URL と認証情報を保存するための名前付きコレクションの設定方法を示しています。 @@ -67,13 +61,10 @@ SELECT * FROM paimonS3(paimon_conf, filename = 'test_table') DESCRIBE paimonS3(paimon_conf, filename = 'test_table') ``` - ## エイリアス {#aliases} テーブル関数 `paimon` は、現在 `paimonS3` のエイリアスになっています。 - - ## 仮想列 {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 @@ -82,8 +73,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` となります。 - `_etag` — ファイルの etag。型: `LowCardinality(String)`。etag が不明な場合、値は `NULL` となります。 - - ## サポートされるデータ型 {#data-types-supported} | Paimon データ型 | ClickHouse データ型 @@ -106,8 +95,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') |ARRAY |Array | |MAP |Map | - - ## サポートされるパーティション {#partition-supported} Paimon のパーティションキーでサポートされるデータ型: * `CHAR` @@ -125,8 +112,6 @@ Paimon のパーティションキーでサポートされるデータ型: * `FLOAT` * `DOUBLE` - - ## 関連項目 {#see-also} * [Paimon クラスターテーブル関数](/sql-reference/table-functions/paimonCluster.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md index 092aaa458f8..e0b27157472 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md @@ -9,7 +9,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # paimonCluster テーブル関数 {#paimoncluster-table-function} @@ -18,8 +17,6 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; 指定したクラスタ内の複数のノードで、Apache [Paimon](https://paimon.apache.org/) のファイルを並列処理できます。イニシエーターはクラスタ内のすべてのノードへの接続を確立し、各ファイルの処理を動的に割り当てます。ワーカーノードは、処理すべき次のタスクをイニシエーターに問い合わせて処理を行います。この処理は、すべてのタスクが完了するまで繰り返されます。 - - ## 構文 {#syntax} ```sql @@ -30,7 +27,6 @@ paimonAzureCluster(cluster_name, connection_string|storage_account_url, containe paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) ``` - ## 引数 {#arguments} - `cluster_name` — リモートおよびローカルサーバーへのアドレスと接続パラメータのセットを構成するために使用されるクラスタ名。 @@ -40,8 +36,6 @@ paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) 指定された Paimon テーブル内のクラスタからデータを読み取るための、指定した構造を持つテーブルが返されます。 - - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md index 933aa3c6c51..8d47b94ae08 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md @@ -7,21 +7,16 @@ title: 'postgresql' doc_type: 'reference' --- - - # postgresql テーブル関数 {#postgresql-table-function} リモートの PostgreSQL サーバー上に保存されたデータに対して、`SELECT` および `INSERT` クエリを実行できます。 - - ## 構文 {#syntax} ```sql postgresql({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]}) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -36,8 +31,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] 引数は [named collections](operations/named-collections.md) を使用して渡すこともできます。この場合、`host` と `port` は個別に指定する必要があります。この方法を本番環境で使用することを推奨します。 - - ## 返される値 {#returned_value} 元の PostgreSQL テーブルと同じ列を持つテーブルオブジェクト。 @@ -46,8 +39,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] `INSERT` クエリで、テーブル関数 `postgresql(...)` と、列名リストを伴うテーブル名の指定とを区別するには、キーワード `FUNCTION` または `TABLE FUNCTION` を使用する必要があります。以下の例を参照してください。 ::: - - ## 実装の詳細 {#implementation-details} PostgreSQL 側での `SELECT` クエリは、読み取り専用の PostgreSQL トランザクション内で `COPY (SELECT ...) TO STDOUT` として実行され、各 `SELECT` クエリの後にコミットされます。 @@ -78,7 +69,6 @@ SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database' PostgreSQL の辞書ソースで、レプリカの優先度指定をサポートします。マップ内の数値が大きいほど優先度は低くなります。最も高い優先度は `0` です。 - ## 例 {#examples} PostgreSQL のテーブルの例: @@ -157,7 +147,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); ``` - ## 関連 {#related} - [PostgreSQL テーブルエンジン](../../engines/table-engines/integrations/postgresql.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md index 3981203330f..cd8d7fe444f 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md @@ -7,14 +7,10 @@ title: 'prometheusQuery' doc_type: 'reference' --- - - # prometheusQuery テーブル関数 {#prometheusquery-table-function} TimeSeries テーブルのデータを使用して Prometheus のクエリを評価します。 - - ## 構文 {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQuery(db_name.time_series_table, 'promql_query', evaluation_time) prometheusQuery('time_series_table', 'promql_query', evaluation_time) ``` - ## Arguments {#arguments} - `db_name` - TimeSeries テーブルが存在するデータベースの名前。 @@ -31,8 +26,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) - `promql_query` - [PromQL 構文](https://prometheus.io/docs/prometheus/latest/querying/basics/) で記述されたクエリ。 - `evaluation_time` - 評価時刻のタイムスタンプ。クエリを現在時刻で評価するには、`evaluation_time` に `now()` を使用します。 - - ## 返される値 {#returned_value} この関数は、パラメータ `promql_query` に渡されたクエリの結果型に応じて、返される列が異なります。 @@ -44,8 +37,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## 例 {#example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md index db30cc1ae7e..6ba2477a7ca 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md @@ -7,14 +7,10 @@ title: 'prometheusQueryRange' doc_type: 'reference' --- - - # prometheusQuery テーブル関数 {#prometheusquery-table-function} 複数の評価時刻にわたって、TimeSeries テーブルのデータを使用して Prometheus クエリを評価します。 - - ## 構文 {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQueryRange(db_name.time_series_table, 'promql_query', start_time, end_ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, step) ``` - ## 引数 {#arguments} - `db_name` - TimeSeries テーブルが存在するデータベースの名前。 @@ -33,8 +28,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, - `end_time` - 評価範囲の終了時刻。 - `step` - `start_time` から `end_time` まで(両端を含む)評価時刻を反復する際に使用されるステップ間隔。 - - ## 戻り値 {#returned_value} この関数は、引数 `promql_query` に渡されたクエリの結果の型に応じて、異なる列を返します。 @@ -46,8 +39,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## 使用例 {#example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md index 2569ac28606..aad81737b24 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md @@ -7,21 +7,16 @@ title: 'redis' doc_type: 'reference' --- - - # redis テーブル関数 {#redis-table-function} このテーブル関数により、ClickHouse を [Redis](https://redis.io/) と統合できます。 - - ## 構文 {#syntax} ```sql redis(host:port, key, structure[, db_index[, password[, pool_size]]]) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -39,14 +34,10 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]]) 現在のところ、`redis` テーブル関数では [Named collections](/operations/named-collections.md) はサポートされていません。 - - ## 返される値 {#returned_value} Redis のキーをキー列とし、その他のカラムをまとめて 1 つの Redis の値として格納するテーブルオブジェクトです。 - - ## 使用例 {#usage-example} Redis からの読み込み: @@ -68,7 +59,6 @@ INSERT INTO TABLE FUNCTION redis( 'key String, v1 String, v2 UInt32') values ('1', '1', 1); ``` - ## 関連項目 {#related} - [`Redis` テーブルエンジン](/engines/table-engines/integrations/redis.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md index 022f1fe5e40..0b63c486e80 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md @@ -7,16 +7,12 @@ title: 'remote, remoteSecure' doc_type: 'reference' --- - - # remote, remoteSecure テーブル関数 {#remote-remotesecure-table-function} テーブル関数 `remote` は、[Distributed](../../engines/table-engines/special/distributed.md) テーブルを作成することなく、オンデマンドでリモートサーバーにアクセスできるようにします。テーブル関数 `remoteSecure` は、セキュアな接続を使用する点を除き `remote` と同じです。 どちらの関数も `SELECT` および `INSERT` クエリで使用できます。 - - ## 構文 {#syntax} ```sql @@ -28,7 +24,6 @@ remoteSecure(addresses_expr, [db.table, user [, password], sharding_key]) remoteSecure(named_collection[, option=value [,..]]) ``` - ## パラメータ {#parameters} | 引数 | 説明 | @@ -42,14 +37,10 @@ remoteSecure(named_collection[, option=value [,..]]) 引数は [named collections](operations/named-collections.md)(名前付きコレクション)を使用して渡すこともできます。 - - ## 戻り値 {#returned-value} リモートサーバー上にあるテーブルです。 - - ## 使用方法 {#usage} テーブル関数 `remote` および `remoteSecure` はリクエストごとに接続を再確立するため、代わりに `Distributed` テーブルを使用することを推奨します。また、ホスト名が設定されている場合には名前解決が行われ、複数のレプリカに対して処理を行う際のエラーはカウントされません。大量のクエリを処理する場合は、常に事前に `Distributed` テーブルを作成し、`remote` テーブル関数は使用しないでください。 @@ -81,7 +72,6 @@ localhost example01-01-1,example01-02-1 ``` - ## 例 {#examples} ### リモートサーバーからデータを取得する: {#selecting-data-from-a-remote-server} @@ -171,7 +161,6 @@ remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD') SELECT * from imdb.actors ``` - ## グロブ {#globs-in-addresses} 中括弧 `{ }` 内のパターンは、シャードの集合を生成し、レプリカを指定するために使用されます。複数の中括弧のペアがある場合、それぞれの集合の直積が生成されます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md index 852a4dfce41..336d5b516c0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md @@ -11,7 +11,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # s3 テーブル関数 {#s3-table-function} [Amazon S3](https://aws.amazon.com/s3/) および [Google Cloud Storage](https://cloud.google.com/storage/) 上のファイルに対して、SELECT/INSERT を行うためのテーブル状のインターフェイスを提供します。このテーブル関数は [hdfs 関数](../../sql-reference/table-functions/hdfs.md) と似ていますが、S3 固有の機能を備えています。 @@ -20,8 +19,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; `s3 table function` を [`INSERT INTO...SELECT`](../../sql-reference/statements/insert-into#inserting-the-results-of-select) とともに使用する場合、データはストリーミング方式で読み取りおよび挿入されます。S3 から連続的にブロックが読み込まれて宛先テーブルにプッシュされる間、メモリ上には少数のデータブロックしか存在しません。 - - ## 構文 {#syntax} ```sql @@ -72,13 +69,10 @@ and not ~~[https://storage.cloud.google.com](https://storage.cloud.google.com)~~ | `no_sign_request` | デフォルトでは無効です。 | | `expiration_window_seconds` | デフォルト値は 120 です。 | - ## 返される値 {#returned_value} 指定した構造を持ち、指定したファイル内のデータを読み書きするためのテーブル。 - - ## 例 {#examples} S3 上のファイル `https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv` に格納されたテーブルから先頭の 5 行を選択します。 @@ -133,7 +127,6 @@ FROM s3( ::: - ## 使用方法 {#usage} S3 上に、次の URI の複数のファイルがあるとします: @@ -216,7 +209,6 @@ SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bu SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); ``` - 注意。サーバー構成ファイルで独自の URL マッパーを指定することもできます。例: ```sql @@ -252,7 +244,6 @@ SELECT count(*) FROM s3(creds, url='https://s3-object-url.csv') ``` - ## パーティション化書き込み {#partitioned-write} ### パーティション戦略 {#partition-strategy} @@ -299,7 +290,6 @@ INSERT INTO TABLE FUNCTION その結果、データは異なるバケットにある 3 つのファイル `my_bucket_1/file.csv`、`my_bucket_10/file.csv`、`my_bucket_20/file.csv` に書き込まれます。 - ## 公開バケットへのアクセス {#accessing-public-buckets} ClickHouse は、さまざまなソースから認証情報を取得しようとします。 @@ -316,7 +306,6 @@ FROM s3( LIMIT 5; ``` - ## S3 認証情報の使用 (ClickHouse Cloud) {#using-s3-credentials-clickhouse-cloud} 非公開バケットの場合、`aws_access_key_id` および `aws_secret_access_key` を関数に渡すことができます。例えば次のようにします。 @@ -337,7 +326,6 @@ SELECT count() FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.co その他の例は[こちら](/cloud/data-sources/secure-s3#access-your-s3-bucket-with-the-clickhouseaccess-role)でご覧いただけます。 - ## アーカイブの操作 {#working-with-archives} S3 上に次の URI を持つ複数のアーカイブファイルがあるとします: @@ -363,13 +351,10 @@ TAR ZIP および TAR アーカイブは、サポートされている任意のストレージロケーションからアクセスできますが、7Z アーカイブは ClickHouse がインストールされているローカルファイルシステムからのみ読み取り可能です。 ::: - ## データの挿入 {#inserting-data} 行は新しいファイルにのみ挿入できます。マージサイクルやファイル分割処理は存在しません。一度ファイルが書き込まれると、その後の挿入は失敗します。詳細は[こちら](/integrations/s3#inserting-data)を参照してください。 - - ## 仮想カラム {#virtual-columns} - `_path` — ファイルへのパス。型: `LowCardinality(String)`。アーカイブの場合は、`"{path_to_archive}::{path_to_file_inside_archive}"` という形式でアーカイブ内のファイルパスを示します。 @@ -377,8 +362,6 @@ ZIP および TAR アーカイブは、サポートされている任意のス - `_size` — ファイルサイズ(バイト単位)。型: `Nullable(UInt64)`。ファイルサイズが不明な場合、この値は `NULL` になります。アーカイブの場合は、アーカイブ内のファイルの非圧縮ファイルサイズを示します。 - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、この値は `NULL` になります。 - - ## use_hive_partitioning setting {#hive-style-partitioning} これは、読み込み時に ClickHouse が Hive スタイルでパーティション分割されたファイルをパースするためのヒントです。書き込みには影響しません。読み込みと書き込みを対称に(整合するように)したい場合は、`partition_strategy` 引数を使用します。 @@ -391,7 +374,6 @@ ZIP および TAR アーカイブは、サポートされている任意のス SELECT * FROM s3('s3://data/path/date=*/country=*/code=*/*.parquet') WHERE date > '2020-01-01' AND country = 'Netherlands' AND code = 42; ``` - ## リクエスタ支払いバケットへのアクセス {#accessing-requester-pays-buckets} リクエスタ支払いバケットにアクセスするには、すべてのリクエストでヘッダー `x-amz-request-payer = requester` を渡す必要があります。これは、パラメータ `headers('x-amz-request-payer' = 'requester')` を s3 関数に渡すことで実現します。たとえば次のように指定します。 @@ -410,15 +392,12 @@ FROM s3('https://coiled-datasets-rp.s3.us-east-1.amazonaws.com/1trc/measurements Peak memory usage: 192.27 KiB. ``` - ## ストレージ設定 {#storage-settings} - [s3_truncate_on_insert](operations/settings/settings.md#s3_truncate_on_insert) - 挿入前にファイルを truncate(内容を消去)できるようにします。デフォルトでは無効です。 - [s3_create_new_file_on_insert](operations/settings/settings.md#s3_create_new_file_on_insert) - フォーマットにサフィックスがある場合、挿入ごとに新しいファイルを作成できるようにします。デフォルトでは無効です。 - [s3_skip_empty_files](operations/settings/settings.md#s3_skip_empty_files) - 読み取り時に空のファイルをスキップできるようにします。デフォルトで有効です。 - - ## ネストされた Avro スキーマ {#nested-avro-schemas} **ネストされたレコード** を含み、かつファイル間で構造が異なる Avro ファイル(例: 一部のファイルではネストされたオブジェクト内に追加のフィールドがある)を読み込む際、ClickHouse は次のようなエラーを返す場合があります: @@ -447,7 +426,6 @@ FROM s3('https://bucket-name/*.avro', 'Avro') SETTINGS schema_inference_mode='union'; ``` - ## 関連項目 {#related} - [S3 エンジン](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md index 2b8bf967f9c..8a0969a8331 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md @@ -7,16 +7,12 @@ title: 's3Cluster' doc_type: 'reference' --- - - # s3Cluster テーブル関数 {#s3cluster-table-function} これは [s3](sql-reference/table-functions/s3.md) テーブル関数の拡張です。 指定したクラスタ内の多数のノードで、[Amazon S3](https://aws.amazon.com/s3/) および [Google Cloud Storage](https://cloud.google.com/storage/) 上のファイルを並列処理できます。イニシエーター側では、クラスタ内のすべてのノードへの接続を確立し、S3 ファイルパス中のアスタリスクを展開し、各ファイルを動的に割り当てます。ワーカーノード側では、処理すべき次のタスクをイニシエーターに問い合わせて、そのタスクを処理します。すべてのタスクが完了するまで、これを繰り返します。 - - ## 構文 {#syntax} ```sql @@ -24,7 +20,6 @@ s3Cluster(cluster_name, url[, NOSIGN | access_key_id, secret_access_key,[session s3Cluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## 引数 {#arguments} | Argument | Description | @@ -49,14 +44,10 @@ s3Cluster(cluster_name, named_collection[, option=value [,..]]) | `no_sign_request` | デフォルトで無効です。 | | `expiration_window_seconds` | デフォルト値は 120 です。 | - - ## 返される値 {#returned_value} 指定した構造を持ち、指定したファイル内のデータを読み書きするためのテーブル。 - - ## 例 {#examples} 次の例では、`cluster_simple` クラスター内のすべてのノードを使用して、`/root/data/clickhouse` および `/root/data/database/` ディレクトリ内のすべてのファイルからデータを選択します。 @@ -91,19 +82,14 @@ SELECT count(*) FROM s3Cluster( ) ``` - ## プライベートバケットとパブリックバケットへのアクセス {#accessing-private-and-public-buckets} ユーザーは、`s3` 関数について[こちら](/sql-reference/table-functions/s3#accessing-public-buckets)で説明されているのと同様の方法を利用できます。 - - ## パフォーマンス最適化 {#optimizing-performance} `s3` 関数のパフォーマンスを最適化する方法の詳細は、[詳細ガイド](/integrations/s3/performance) を参照してください。 - - ## 関連項目 {#related} - [S3 エンジン](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md index ad12e944337..37b7b146693 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md @@ -7,34 +7,25 @@ title: 'sqlite' doc_type: 'reference' --- - - # sqlite テーブル関数 {#sqlite-table-function} [SQLite](../../engines/database-engines/sqlite.md) データベースに保存されているデータに対してクエリを実行できます。 - - ## 構文 {#syntax} ```sql sqlite('db_path', 'table_name') ``` - ## 引数 {#arguments} - `db_path` — SQLite データベースファイルへのパス。[String](../../sql-reference/data-types/string.md)。 - `table_name` — SQLite データベース内のテーブル名。[String](../../sql-reference/data-types/string.md)。 - - ## 戻り値 {#returned_value} - 元の `SQLite` テーブルと同じ列を持つテーブルオブジェクト。 - - ## 例 {#example} クエリ: @@ -53,7 +44,6 @@ SELECT * FROM sqlite('sqlite.db', 'table1') ORDER BY col2; └───────┴──────┘ ``` - ## 関連項目 {#related} - [SQLite](../../engines/table-engines/integrations/sqlite.md) テーブルエンジン diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md index bd575ebf5ec..26f988dd540 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md @@ -7,15 +7,11 @@ title: 'timeSeriesSelector' doc_type: 'reference' --- - - # timeSeriesSelector テーブル関数 {#timeseriesselector-table-function} `TimeSeries` テーブルから、セレクタによってフィルタされ、指定された区間内のタイムスタンプを持つ時系列データを読み取ります。 この関数は [range selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#range-vector-selectors) に類似していますが、[instant selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#instant-vector-selectors) を実装するためにも使用されます。 - - ## 構文 {#syntax} ```sql @@ -24,7 +20,6 @@ timeSeriesSelector(db_name.time_series_table, 'instant_query', min_time, max_tim timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) ``` - ## 引数 {#arguments} - `db_name` - TimeSeries テーブルが存在するデータベース名。 @@ -33,8 +28,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) - `min_time` - 開始タイムスタンプ(開始時刻を含む)。 - `max_time` - 終了タイムスタンプ(終了時刻を含む)。 - - ## 返される値 {#returned_value} この関数は 3 つの列を返します: @@ -44,8 +37,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) 返されるデータの順序は特に保証されません。 - - ## 例 {#example} ```sql diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md index 9965cac46eb..1a70097b9ec 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # url テーブル関数 {#url-table-function} `url` 関数は、指定された `format` および `structure` を使用して、`URL` からテーブルを作成します。 `url` 関数は、[URL](../../engines/table-engines/special/url.md) テーブル内のデータに対する `SELECT` および `INSERT` クエリで使用できます。 - - ## 構文 {#syntax} ```sql url(URL [,format] [,structure] [,headers]) ``` - ## パラメータ {#parameters} | Parameter | Description | @@ -35,14 +31,10 @@ url(URL [,format] [,structure] [,headers]) | `structure` | `'UserID UInt64, Name String'` 形式のテーブル構造。カラム名と型を決定します。型: [String](../../sql-reference/data-types/string.md)。 | | `headers` | `'headers('key1'='value1', 'key2'='value2')'` 形式のヘッダー。HTTP 呼び出しで使用するヘッダーを設定できます。 | - - ## 戻り値 {#returned_value} 指定された形式と構造を持ち、定義された `URL` からのデータを含むテーブル。 - - ## 例 {#examples} `String` 列と [UInt32](../../sql-reference/data-types/int-uint.md) 型の列を含むテーブルの先頭 3 行を、[CSV](/interfaces/formats/CSV) 形式で応答する HTTP サーバーから取得します。 @@ -59,14 +51,11 @@ INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FO SELECT * FROM test_table; ``` - ## URL のグロブ {#globs-in-url} 波かっこ `{ }` 内のパターンは、シャードの集合を生成するか、フェイルオーバーアドレスを指定するために使用されます。サポートされるパターンの種類と例については、[remote](remote.md#globs-in-addresses) 関数の説明を参照してください。 パターン内の文字 `|` はフェイルオーバーアドレスを指定するために使用されます。これらはパターン内で列挙された順序どおりに試行されます。生成されるアドレス数は、[glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) 設定によって制限されます。 - - ## 仮想カラム {#virtual-columns} - `_path` — `URL` へのパス。型: `LowCardinality(String)`。 @@ -75,8 +64,6 @@ SELECT * FROM test_table; - `_time` — ファイルの最終更新時刻。型: `Nullable(DateTime)`。時刻が不明な場合、値は `NULL` になります。 - `_headers` — HTTP レスポンスヘッダー。型: `Map(LowCardinality(String), LowCardinality(String))`。 - - ## use_hive_partitioning 設定 {#hive-style-partitioning} `use_hive_partitioning` の値が 1 の場合、ClickHouse はパス内の Hive 形式のパーティショニング(`/name=value/`)を検出し、クエリ内でパーティション列を仮想列として使用できるようにします。これらの仮想列はパーティションパス内の名前と同じですが、先頭に `_` が付きます。 @@ -89,20 +76,15 @@ Hive 形式のパーティショニングで作成された仮想列を使用す SELECT * FROM url('http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## ストレージ設定 {#storage-settings} - [engine_url_skip_empty_files](/operations/settings/settings.md#engine_url_skip_empty_files) - 読み込み時に空のファイルをスキップします。デフォルトでは無効です。 - [enable_url_encoding](/operations/settings/settings.md#enable_url_encoding) - URI 内のパスのデコード/エンコードを有効/無効にします。デフォルトでは有効です。 - - ## 権限 {#permissions} `url` 関数には `CREATE TEMPORARY TABLE` 権限が必要です。そのため、[readonly](/operations/settings/permissions-for-queries#readonly) = 1 に設定されているユーザーでは動作しません。少なくとも `readonly` = 2 が必要です。 - - ## 関連項目 {#related} - [仮想列](/engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md index 6d1e8cf154b..b54bdc240ac 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md @@ -7,21 +7,16 @@ title: 'urlCluster' doc_type: 'reference' --- - - # urlCluster テーブル関数 {#urlcluster-table-function} 指定したクラスタ内の複数ノードで、URL から取得したファイルを並列処理できます。イニシエーター側では、クラスタ内のすべてのノードへの接続を確立し、URL のファイルパス中のアスタリスクを展開して、各ファイルを動的に割り当てます。ワーカーノードでは、処理すべき次のタスクをイニシエーターに問い合わせ、そのタスクを処理します。これは、すべてのタスクが完了するまで繰り返されます。 - - ## 構文 {#syntax} ```sql urlCluster(cluster_name, URL, format, structure) ``` - ## 引数 {#arguments} | 引数 | 説明 | @@ -31,14 +26,10 @@ urlCluster(cluster_name, URL, format, structure) | `format` | データの[フォーマット](/sql-reference/formats)。型: [String](../../sql-reference/data-types/string.md)。 | | `structure` | `'UserID UInt64, Name String'` の形式のテーブル構造。カラム名と型を決定する。型: [String](../../sql-reference/data-types/string.md)。 | - - ## 戻り値 {#returned_value} 指定されたフォーマットと構造を持ち、定義された `URL` から取得したデータを含むテーブル。 - - ## 例 {#examples} `String` 列と [UInt32](../../sql-reference/data-types/int-uint.md) 型の列を含むテーブルについて、[CSV](/interfaces/formats/CSV) 形式で応答する HTTP サーバー経由で先頭 3 行を取得します。 @@ -65,14 +56,11 @@ if __name__ == "__main__": SELECT * FROM urlCluster('cluster_simple','http://127.0.0.1:12345', CSV, 'column1 String, column2 UInt32') ``` - ## URL のグロブ {#globs-in-url} 波括弧 `{ }` 内のパターンは、シャード集合の生成やフェイルオーバーアドレスの指定に使用されます。サポートされているパターンの型と例については、[remote](remote.md#globs-in-addresses) 関数の説明を参照してください。 パターン内の文字 `|` はフェイルオーバーアドレスを指定するために使用されます。これらはパターン内で列挙された順に試行されます。生成されるアドレスの数は、[glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) 設定によって制限されます。 - - ## 関連項目 {#related} - [HDFS エンジン](/engines/table-engines/integrations/hdfs) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md index f12d1fea999..9b838f77553 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md @@ -8,8 +8,6 @@ title: 'values' doc_type: 'reference' --- - - # Values テーブル関数 {#values-table-function} `Values` テーブル関数を使用すると、一時的なストレージを作成し、その列を値で埋めることができます。簡単なテストやサンプルデータの生成に便利です。 @@ -18,8 +16,6 @@ doc_type: 'reference' Values は大文字小文字を区別しない関数です。つまり、`VALUES` と `values` のどちらも有効です。 ::: - - ## 構文 {#syntax} `VALUES` テーブル関数の基本構文は次のとおりです。 @@ -39,7 +35,6 @@ VALUES( ) ``` - ## 引数 {#arguments} - `column1_name Type1, ...`(省略可)。列名と型を指定する[String](/sql-reference/data-types/string) 型。 @@ -52,14 +47,10 @@ VALUES( 各値は新しい行として扱われます。詳細は[例](#examples)セクションを参照してください。 ::: - - ## 戻り値 {#returned-value} - 指定された値を含む一時テーブルを返します。 - - ## 使用例 {#examples} ```sql title="Query" @@ -196,7 +187,6 @@ FROM VALUES( └──────────┘ ``` - ## 関連項目 {#see-also} - [Values 形式](/interfaces/formats/Values) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md index 6001ccee62e..4cadf027fdc 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md @@ -7,33 +7,24 @@ title: 'view' doc_type: 'reference' --- - - # view テーブル関数 {#view-table-function} 副問い合わせをテーブルに変換します。この関数はビューを実装します([CREATE VIEW](/sql-reference/statements/create/view) を参照)。生成されるテーブルはデータを保存せず、指定された `SELECT` クエリのみを保持します。テーブルから読み出すとき、ClickHouse はこのクエリを実行し、結果から不要なカラムをすべて削除します。 - - ## 構文 {#syntax} ```sql view(subquery) ``` - ## 引数 {#arguments} - `subquery` — `SELECT` クエリ。 - - ## 戻り値 {#returned_value} - テーブル - - ## 例 {#examples} 入力テーブル: @@ -74,7 +65,6 @@ SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)); SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)); ``` - ## 関連項目 {#related} - [View テーブルエンジン](/engines/table-engines/special/view/) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md index bdfea8de189..f54fdc628c0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # ytsaurus テーブル関数 {#ytsaurus-table-function} このテーブル関数を使用すると、YTsaurus クラスターからデータを読み込むことができます。 - - ## 構文 {#syntax} ```sql @@ -30,7 +27,6 @@ YTsaurus テーブル関数の使用を有効にするには、[allow_experi `set allow_experimental_ytsaurus_table_function = 1` コマンドを実行します。 ::: - ## 引数 {#arguments} - `http_proxy_url` — YTsaurus の HTTP プロキシの URL。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md index ad6fda58cd2..a85ca39f976 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/transactions.md @@ -8,11 +8,8 @@ doc_type: 'guide' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # トランザクション(ACID)対応 {#transactional-acid-support} - - ## ケース 1: MergeTree* ファミリーの 1 つのテーブルの 1 つのパーティションへの INSERT {#case-1-insert-into-one-partition-of-one-table-of-the-mergetree-family} 挿入される行が 1 つのブロックとしてまとめて挿入される場合(注を参照)、これはトランザクション特性(ACID)を満たします: @@ -22,35 +19,25 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - 永続性 (Durable): 成功した INSERT は、クライアントに応答する前にファイルシステムに書き込まれます。これは単一レプリカまたは複数レプリカ(`insert_quorum` 設定で制御)に対して行われ、ClickHouse は OS に対してストレージメディア上のファイルシステムデータの同期を要求できます(`fsync_after_insert` 設定で制御)。 - マテリアライズドビューが関与している場合、1 つのステートメントで複数のテーブルに対する INSERT が可能です(クライアントからの INSERT は、関連するマテリアライズドビューを持つテーブルに対して行われます)。 - - ## ケース 2: MergeTree* ファミリーの 1 つのテーブルに対する、複数パーティションへの INSERT {#case-2-insert-into-multiple-partitions-of-one-table-of-the-mergetree-family} 上記のケース 1 と同様ですが、次の点が異なります: - テーブルに多数のパーティションがあり、INSERT が多くのパーティションにまたがる場合、各パーティションへの挿入はそれぞれ独立したトランザクションとして扱われます - - ## ケース 3: MergeTree* ファミリーの 1 つの分散テーブルへの INSERT {#case-3-insert-into-one-distributed-table-of-the-mergetree-family} 上のケース 1 と同様ですが、次の点が異なります: - Distributed テーブルへの INSERT は全体としてはトランザクションとして扱われませんが、各シャードへの挿入はトランザクションとして扱われます - - ## ケース 4: Buffer テーブルの使用 {#case-4-using-a-buffer-table} - Buffer テーブルへの INSERT 操作では、アトミック性 (Atomicity)、分離性 (Isolation)、一貫性 (Consistency)、永続性 (Durability) のいずれも保証されません - - ## ケース5: async_insert の使用 {#case-5-using-async_insert} 上記のケース1と同様ですが、次の点が異なります: - `async_insert` が有効で、`wait_for_async_insert` が 1(デフォルト)に設定されている場合にはアトミック性が保証されますが、`wait_for_async_insert` が 0 に設定されている場合にはアトミック性は保証されません。 - - ## Notes {#notes} - クライアントからあるデータフォーマットで挿入された行は、次の場合に 1 つのブロックにまとめられます: - 挿入フォーマットが行ベース(CSV、TSV、Values、JSONEachRow など)の場合で、データが `max_insert_block_size` 行(デフォルトでは約 1 000 000 行)未満、または並列パースを使用する場合(デフォルトで有効)には `min_chunk_bytes_for_parallel_parsing` バイト(デフォルトでは 10 MB)未満であるとき @@ -63,8 +50,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - ACID における「一貫性」は分散システムのセマンティクスを対象としていません。分散システムの一貫性については https://jepsen.io/consistency を参照してください。これは(`select_sequential_consistency` などの)別の設定によって制御されます - この説明では、複数テーブルやマテリアライズドビュー、複数の SELECT などに対してフル機能のトランザクションを提供する新しいトランザクション機能は扱っていません(次の「Transactions, Commit, and Rollback」のセクションを参照してください) - - ## トランザクション、コミット、ロールバック {#transactions-commit-and-rollback} @@ -204,7 +189,6 @@ ENGINE = MergeTree ORDER BY n ``` - ```response Ok. ``` @@ -322,7 +306,6 @@ is_readonly: 1 state: RUNNING ``` - ## 詳細情報 {#more-details} より包括的なテスト内容や進捗の最新情報については、この [meta issue](https://github.com/ClickHouse/ClickHouse/issues/48794) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md index e80b89ecab2..ac7c921c183 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md @@ -7,15 +7,11 @@ title: 'ウィンドウ関数' doc_type: 'reference' --- - - # ウィンドウ関数 {#window-functions} ウィンドウ関数を使用すると、現在の行と関連する行の集合を対象に計算を実行できます。 実行できる計算の一部は集約関数で行えるものと似ていますが、ウィンドウ関数では行が 1 つの結果行にグループ化されないため、各行は個別の行として返されます。 - - ## 標準ウィンドウ関数 {#standard-window-functions} ClickHouse は、ウィンドウおよびウィンドウ関数を定義するための標準的な文法をサポートしています。以下の表は、各機能が現在サポートされているかどうかを示します。 @@ -36,8 +32,6 @@ ClickHouse は、ウィンドウおよびウィンドウ関数を定義するた | `lag/lead(value, offset)` | ✅
次のいずれかの回避策も使用できます:
1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`、または `lead` の場合は `following` を使用します。
2) ウィンドウフレームを考慮する、類似の `lagInFrame/leadInFrame` を使用します。`lag/lead` と同じ動作を得るには、`rows between unbounded preceding and unbounded following` を使用します。 | | ntile(buckets) | ✅
次のようにウィンドウを指定します: (partition by x order by y rows between unbounded preceding and unbounded following)。 | - - ## ClickHouse固有のウィンドウ関数 {#clickhouse-specific-window-functions} 以下のClickHouse固有のウィンドウ関数も提供されています: @@ -51,7 +45,6 @@ ClickHouse は、ウィンドウおよびウィンドウ関数を定義するた - 1行目:`0` - $i$行目:${\text{metric}_i - \text{metric}_{i-1} \over \text{timestamp}_i - \text{timestamp}_{i-1}} * \text{interval}$ - ## 構文 {#syntax} ```text @@ -97,7 +90,6 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column] * [`lagInFrame(x)`](./lagInFrame.md) - 順序付けられたフレーム内で、現在の行から指定された物理オフセットだけ前の行で評価された値を返します。 * [`leadInFrame(x)`](./leadInFrame.md) - 順序付けられたフレーム内で、現在の行から指定されたオフセットだけ後ろの行で評価された値を返します。 - ## 例 {#examples} ウィンドウ関数をどのように利用できるか、いくつかの例を見ていきます。 @@ -196,7 +188,6 @@ SELECT FROM salaries; ``` - ```text ┌─player──────────┬─salary─┬─team──────────────────────┬─teamMax─┬───diff─┐ │ Charles Juarez │ 190000 │ New Coreystad Archdukes │ 190000 │ 0 │ @@ -280,7 +271,6 @@ ORDER BY └──────────┴───────┴───────┴──────────────┘ ``` - ```sql -- 短縮形 - 境界式なし、ORDER BY なし -- `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` と同等 @@ -355,7 +345,6 @@ ORDER BY └──────────┴───────┴───────┴────────────────────┴──────────────┘ ``` - ```sql -- フレームはパーティションの先頭から現在行までに制限されるが、順序は降順 SELECT @@ -451,7 +440,6 @@ ORDER BY value ASC; ``` - ┌─part_key─┬─value─┬─order─┬─frame_values─┬─rn_1─┬─rn_2─┬─rn_3─┬─rn_4─┐ │ 1 │ 1 │ 1 │ [5,4,3,2,1] │ 5 │ 5 │ 5 │ 2 │ │ 1 │ 2 │ 2 │ [5,4,3,2] │ 4 │ 4 │ 4 │ 2 │ @@ -520,7 +508,6 @@ ORDER BY value ASC; ``` - ┌─frame_values_1─┬─second_value─┐ │ [1] │ ᴺᵁᴸᴸ │ │ [1,2] │ 2 │ @@ -532,7 +519,6 @@ ORDER BY ``` ``` - ## 実例 {#real-world-examples} 以下は、現場でよくある課題を解決する例です。 @@ -646,7 +632,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('cpu_temp', '2020-01-01 00:00:00', 87), ('cpu_temp', '2020-01-01 00:00:01', 77), ('cpu_temp', '2020-01-01 00:00:02', 93), @@ -725,7 +710,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('ambient_temp', '2020-01-01 00:00:00', 16), ('ambient_temp', '2020-01-01 12:00:00', 16), ('ambient_temp', '2020-01-02 11:00:00', 9), @@ -769,7 +753,6 @@ ORDER BY └──────────────┴─────────────────────┴───────┴─────────────────────────┘ ```` - ## 参考 {#references} ### GitHub Issues {#github-issues} @@ -804,8 +787,6 @@ https://dev.mysql.com/doc/refman/8.0/en/window-functions-usage.html https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html - - ## 関連コンテンツ {#related-content} - ブログ: [ClickHouse における時系列データの扱い方](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md index 1c2d05ceed4..c2572191dc0 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md @@ -19,14 +19,10 @@ title: 'レッスン - デバッグインサイト' description: '遅いクエリ、メモリエラー、接続問題、設定の問題など、ClickHouse でよく発生する問題に対する代表的な解決策をまとめています。' --- - - # ClickHouse の運用: コミュニティによるデバッグインサイト {#clickhouse-operations-community-debugging-insights} *このガイドは、コミュニティミートアップから得られた知見を集約したコレクションの一部です。より多くの実運用に基づく解決策や知見については、[特定の問題別に閲覧](./community-wisdom.md)できます。* *高い運用コストにお悩みですか?[コスト最適化](./cost-optimization.md)に関するコミュニティのインサイトガイドを参照してください。* - - ## 重要なシステムテーブル {#essential-system-tables} これらのシステムテーブルは、本番環境でのデバッグに不可欠です。 @@ -86,7 +82,6 @@ GROUP BY database, table ORDER BY count() DESC; ``` - ## 本番環境でよく起きる問題 {#common-production-issues} ### ディスク容量の問題 {#disk-space-problems} @@ -126,7 +121,6 @@ WHERE is_done = 0; まずは小規模なデータセットでスキーマ変更を検証してください。 - ## メモリとパフォーマンス {#memory-and-performance} ### 外部集約 {#external-aggregation} @@ -170,7 +164,6 @@ Distributed テーブルを使用する場合は、一時データの蓄積を * [カスタムパーティションキー](/engines/table-engines/mergetree-family/custom-partitioning-key) - ## クイックリファレンス {#quick-reference} | 課題 | 検知 | 解決策 | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md index e120e1d2586..ff216b1c596 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md @@ -21,24 +21,18 @@ title: '実践から学ぶマテリアライズドビュー' description: 'マテリアライズドビューの実例と、発生しうる問題およびその解決策' --- - - # マテリアライズドビュー: 諸刃の剣になり得る理由 {#materialized-views-the-double-edged-sword} *このガイドは、コミュニティミートアップで得られた知見をまとめたコレクションの一部です。より実践的なソリューションや知見については、[特定の問題別に閲覧](./community-wisdom.md)できます。* *大量のパーツがデータベースのパフォーマンスを低下させていませんか?[Too Many Parts](./too-many-parts.md) コミュニティインサイトガイドを参照してください。* *[マテリアライズドビュー](/materialized-views) についてさらに詳しく学びましょう。* - - ## 10倍ストレージアンチパターン {#storage-antipattern} **実際の本番環境で発生した問題:** *「マテリアライズドビューを使っていました。生ログテーブルは約20GBでしたが、そのログテーブルを元にしたビューが190GBまで膨れ上がり、生テーブルのほぼ10倍のサイズになってしまいました。これは、属性ごとに1行ずつ作成しており、各ログが最大で10個の属性を持つ可能性があったために起きました。」* **ルール:** `GROUP BY` によって削減される行数よりも多くの行が生成される場合、それはマテリアライズドビューではなく、高コストなインデックスを作っていることになります。 - - ## 本番環境マテリアライズドビューの健全性検証 {#mv-health-validation} このクエリを使うと、マテリアライズドビューを作成する前に、それがデータを圧縮するのか、あるいは肥大化させてしまうのかを予測できます。実際のテーブルとカラムに対して実行し、「190GB への肥大化」シナリオを回避してください。 @@ -62,7 +56,6 @@ WHERE your_filter_conditions; -- aggregation_ratioが10%未満の場合、良好な圧縮率が得られます ``` - ## マテリアライズドビューが問題になるとき {#mv-problems} **監視すべき警告サイン:** @@ -73,7 +66,5 @@ WHERE your_filter_conditions; `system.query_log` を使用してクエリ実行時間の推移を追跡することで、マテリアライズドビューを追加する前後の挿入パフォーマンスを比較できます。 - - ## 動画資料 {#video-sources} - [ClickHouse at CommonRoom - Kirill Sapchuk](https://www.youtube.com/watch?v=liTgGiTuhJE) - 「マテリアライズドビューに過度に熱狂した」事例と「20GB から 190GB への爆発的増加」事例の出典 \ No newline at end of file diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md index 44d8446b688..1425093bcab 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md @@ -21,15 +21,11 @@ title: '事例集 - パフォーマンス最適化' description: 'パフォーマンス最適化戦略の実践的な実例集' --- - - # パフォーマンス最適化: コミュニティで検証された手法 {#performance-optimization} *このガイドは、コミュニティミートアップから得られた知見をまとめたコレクションの一部です。より実践的な解決策や知見については、[問題別のトピック](./community-wisdom.md)を参照してください。* *マテリアライズドビューでお困りですか?[Materialized Views](./materialized-views.md) に関するコミュニティの知見をまとめたガイドをご覧ください。* *クエリが遅く、さらに多くの例が必要な場合は、[Query Optimization](/optimize/query-optimization) ガイドも参照してください。* - - ## カーディナリティの低い順に並べる {#cardinality-ordering} ClickHouse のプライマリインデックスは、カーディナリティの低いカラムを先頭に配置することで最も効率よく動作し、大きなデータチャンクを効果的にスキップできます。キーの後半にカーディナリティの高いカラムを配置することで、それらのチャンク内でのきめ細かなソートが可能になります。異なる値の数が少ないカラム(status、category、country など)から始め、異なる値の数が多いカラム(user_id、timestamp、session_id など)で終わるようにしてください。 @@ -37,8 +33,6 @@ ClickHouse のプライマリインデックスは、カーディナリティの - [プライマリキーの選び方](/best-practices/choosing-a-primary-key) - [プライマリインデックス](/primary-indexes) - - ## 時間の粒度は重要 {#time-granularity} ORDER BY 句でタイムスタンプを使用する場合は、カーディナリティと精度のトレードオフを考慮してください。マイクロ秒精度のタイムスタンプは非常に高いカーディナリティ(ほぼ 1 行につき 1 つのユニークな値)を生み出し、ClickHouse のスパースなプライマリインデックスの有効性を低下させます。一方、タイムスタンプを丸めるとカーディナリティを低く抑えられ、より効果的なインデックススキップが可能になりますが、その代わりに時間ベースのクエリの精度が失われます。 @@ -68,7 +62,6 @@ FROM github.github_events WHERE created_at >= '2024-01-01'; ``` - ## 個々のクエリに注目し、平均値に頼らない {#focus-on-individual-queries-not-averages} ClickHouse のパフォーマンスをデバッグする際は、クエリ時間の平均値やシステム全体のメトリクスに頼らないでください。代わりに、特定のクエリがなぜ遅いのかを特定します。システム全体としては平均パフォーマンスが良好でも、個々のクエリがメモリ枯渇、不適切なフィルタリング、高カーディナリティな処理などの影響を受けている場合があります。 @@ -77,8 +70,6 @@ ClickHouse の CTO である Alexey によれば、*「正しいやり方は、 クエリが遅いときは、平均値だけを見て終わりにしてはいけません。「なぜこの特定のクエリは遅かったのか?」と問い、実際のリソース使用パターンを確認してください。 - - ## メモリと行スキャン {#memory-and-row-scanning} Sentry は開発者を第一に考えたエラートラッキングプラットフォームで、400 万人以上の開発者からの数十億件のイベントを毎日処理しています。Sentry の重要な知見は次のとおりです: *「この特定の状況でメモリ使用量を決定づけるのは、グルーピングキーのカーディナリティである」* — 高カーディナリティの集約は、行スキャンではなくメモリ枯渇によってパフォーマンスを低下させます。 @@ -95,7 +86,6 @@ WHERE cityHash64(user_id) % 10 = 0 -- 常に同一の10%のユーザー これにより、すべてのクエリで同じユーザーが現れ、期間をまたいでも一貫した結果が得られます。重要なポイントは、`cityHash64()` が同じ入力に対して常に同じハッシュ値を生成することです。そのため、`user_id = 12345` は常に同じ値にハッシュされ、そのユーザーは 10% サンプルに必ず含まれるか、あるいはまったく含まれないかのどちらかになり、クエリ間で出たり消えたりすることがなくなります。 - ## Sentry のビットマスク最適化 {#bit-mask-optimization} 高カーディナリティ列(URL など)で集約を行う場合、各ユニーク値ごとに個別の集約状態がメモリ上に作成されるため、メモリ枯渇を引き起こす可能性があります。Sentry の解決策は、実際の URL 文字列でグループ化する代わりに、ビットマスクに変換されるブール式でグループ化することです。 @@ -139,7 +129,6 @@ LIMIT 20 Sentry のエンジニアリングチームによると、「これらの重いクエリは 10 倍以上高速になり、メモリ使用量は 100 倍少なくなりました(しかも、より重要なことに、上限が決まっています)。最大規模のお客様でもリプレイ検索時にエラーが発生しなくなり、メモリ不足を心配することなく、あらゆる規模のお客様をサポートできるようになりました。」 - ## 動画資料 {#video-sources} - [Lost in the Haystack - Optimizing High Cardinality Aggregations](https://www.youtube.com/watch?v=paK84-EUJCA) - Sentry 本番環境でのメモリ最適化に関する知見 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md index 424af9052e7..388b874aef7 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md @@ -21,14 +21,10 @@ title: 'レッスン - パーツ数が多すぎる問題' description: 'Too Many Parts の解決策と防止策' --- - - # パーツが多すぎる問題 {#the-too-many-parts-problem} *このガイドは、コミュニティミートアップから得られた知見をまとめたコレクションの一部です。より実践的なソリューションやインサイトについては、[問題別に閲覧](./community-wisdom.md)できます。* *さらにパフォーマンス最適化のヒントが必要な場合は、[パフォーマンス最適化](./performance-optimization.md) に関するコミュニティインサイトガイドを参照してください。* - - ## 問題の理解 {#understanding-the-problem} ClickHouse は深刻なパフォーマンス低下を防ぐため、「Too many parts」エラーを発生させます。小さな part が多いと、さまざまな問題を引き起こします。クエリ処理時に読み込み・マージすべきファイルが増えることによるクエリパフォーマンスの低下、各 part ごとにメモリ上でメタデータを保持する必要があることによるメモリ使用量の増加、小さなデータブロックは圧縮効率が低いため圧縮率が悪化すること、より多くのファイルハンドルとシーク処理が必要になることによる I/O オーバーヘッドの増加、そしてマージスケジューラの負荷増大によるバックグラウンドマージの低速化です。 @@ -38,8 +34,6 @@ ClickHouse は深刻なパフォーマンス低下を防ぐため、「Too many - [Parts](/parts) - [Parts システムテーブル](/operations/system-tables/parts) - - ## 問題を早期に把握する {#recognize-parts-problem} このクエリは、すべてのアクティブなテーブルに対してパーツ数とサイズを分析することで、テーブルのフラグメンテーションを監視します。マージの最適化が必要となる可能性がある、サイズが大きすぎる/小さすぎるパーツを持つテーブルを特定します。クエリパフォーマンスに影響が出る前にフラグメンテーションの問題を検出できるよう、これを定期的に実行してください。 @@ -76,7 +70,6 @@ ORDER BY total_parts DESC LIMIT 20; ``` - ## 動画リソース {#video-sources} - [ClickHouse における高速・並列・一貫性のある非同期 INSERT](https://www.youtube.com/watch?v=AsMPEfN5QtM) - ClickHouse チームメンバーが非同期 INSERT と「too many parts」問題について解説 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md b/i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md index 572caf88734..e7a926b2cff 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md @@ -6,23 +6,18 @@ description: 'clickhouse-static-files-disk-uploader ユーティリティにつ doc_type: 'guide' --- - - # clickhouse-static-files-disk-uploader {#clickhouse-static-files-disk-uploader} 指定した ClickHouse テーブルのメタデータを含むデータディレクトリを出力します。このメタデータを使用して、`web` ディスクをバックエンドとする読み取り専用データセットに基づいた ClickHouse テーブルを別のサーバー上に作成できます。 このツールをデータ移行に使用しないでください。代わりに、[`BACKUP` および `RESTORE` コマンド](/operations/backup)を使用してください。 - - ## 使い方 {#usage} ```bash $ clickhouse static-files-disk-uploader [args] ``` - ## コマンド {#commands} |Command|Description| @@ -34,8 +29,6 @@ $ clickhouse static-files-disk-uploader [args] |`--url [url]`|`test` モード用の Web サーバーの URL| |`--output-dir [dir]`|テストモード以外でファイルを出力するディレクトリ| - - ## 指定したテーブルのメタデータパスを取得する {#retrieve-metadata-path-for-the-specified-table} `clickhouse-static-files-disk-uploader` を使用する場合、対象とするテーブルのメタデータパスを取得する必要があります。 @@ -60,7 +53,6 @@ SELECT data_paths └───────────────────────────────────────────────────────┘ ``` - ## 出力テーブルのメタデータディレクトリをローカルファイルシステム上に書き出す {#output-table-metadata-directory-to-the-local-filesystem} ターゲット出力ディレクトリ `output` と指定したメタデータパスを使用して、次のコマンドを実行します。 @@ -75,7 +67,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st データパス:「/Users/john/store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee」、出力先パス:「output」 ``` - ## テーブルメタデータディレクトリを外部の URL に出力する {#output-table-metadata-directory-to-an-external-url} この手順は、`--test-mode` フラグを追加する点を除き、データディレクトリをローカルファイルシステムに出力する場合と同様です。出力ディレクトリを指定する代わりに、`--url` フラグを使用してターゲット URL を指定する必要があります。 @@ -86,7 +77,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st $ clickhouse static-files-disk-uploader --test-mode --url http://nginx:80/test1 --metadata-path ./store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee/ ``` - ## テーブルメタデータディレクトリを使用して ClickHouse テーブルを作成する {#using-the-table-metadata-directory-to-create-a-clickhouse-table} テーブルメタデータディレクトリを取得したら、それを使用して別のサーバー上に ClickHouse テーブルを作成できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md b/i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md index 9100506b549..dd3d99c638b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/tutorial.md @@ -9,12 +9,8 @@ show_related_blogs: true doc_type: 'guide' --- - - # 高度なチュートリアル {#advanced-tutorial} - - ## Overview {#overview} ニューヨーク市のタクシーサンプルデータセットを使用して、ClickHouseでデータを取り込み、クエリする方法を学習します。 @@ -25,7 +21,6 @@ doc_type: 'guide' - ## 新しいテーブルを作成する {#create-a-new-table} New York City のタクシーデータセットには、数百万件のタクシー乗車に関する詳細が含まれており、チップ額、通行料、支払い種別などのカラムがあります。このデータを保存するためのテーブルを作成します。 @@ -89,8 +84,6 @@ New York City のタクシーデータセットには、数百万件のタクシ ORDER BY pickup_datetime; ``` - - ## データセットを追加する {#add-the-dataset} テーブルを作成したので、S3 内の CSV ファイルからニューヨーク市タクシーデータを追加します。 @@ -159,8 +152,6 @@ New York City のタクシーデータセットには、数百万件のタクシ このクエリは 1,999,657 行を返すはずです。 - - ## データの分析 {#analyze-the-data} データを分析するためにいくつかのクエリを実行します。以下の例を参照するか、独自のSQLクエリを試してください。 @@ -267,8 +258,6 @@ New York City のタクシーデータセットには、数百万件のタクシ 期待される出力

- - ```response ┌──────────────avg_tip─┬───────────avg_fare─┬──────avg_passenger─┬──count─┬─trip_minutes─┐ │ 1.9600000381469727 │ 8 │ 1 │ 1 │ 27511 │ @@ -340,8 +329,6 @@ New York City のタクシーデータセットには、数百万件のタクシ

- - 7. LaGuardia または JFK 空港への乗車データを取得します: ```sql SELECT @@ -382,8 +369,6 @@ New York City のタクシーデータセットには、数百万件のタクシ

- - ## 辞書を作成する {#create-a-dictionary} 辞書は、メモリ内に保存されるキーと値のペアのマッピングです。詳細については [Dictionaries](/sql-reference/dictionaries/index.md) を参照してください。 @@ -467,7 +452,6 @@ LAYOUT(HASHED_ARRAY()) ORDER BY total DESC ``` - このクエリは、LaGuardia か JFK のいずれかの空港で終了するタクシー乗車の件数を区ごとに集計します。結果は次のようになり、乗車地点の地区が不明な乗車がかなり多いことに注目してください。 ```response @@ -484,7 +468,6 @@ LAYOUT(HASHED_ARRAY()) 7 rows in set. Elapsed: 0.019 sec. Processed 2.00 million rows, 4.00 MB (105.70 million rows/s., 211.40 MB/s.) ``` - ## 結合を実行する {#perform-a-join} `taxi_zone_dictionary`と`trips`テーブルを結合するクエリを記述します。 @@ -537,7 +520,6 @@ LAYOUT(HASHED_ARRAY())
- ## 次のステップ {#next-steps} ClickHouse についてさらに学ぶには、以下のドキュメントを参照してください: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md index 71ca71ad56f..ce406028cd8 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md @@ -21,20 +21,16 @@ import img_history from '@site/static/images/use-cases/AI_ML/AIChat/5_history.pn import img_result_actions from '@site/static/images/use-cases/AI_ML/AIChat/6_result_actions.png'; import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_editor.png'; - # ClickHouse Cloud で AI チャットを使用する {#using-ai-chat-in-clickhouse-cloud} > このガイドでは、ClickHouse Cloud コンソールで AI チャット機能を有効にして利用する方法を説明します。 - ## 前提条件 {#prerequisites} 1. AI 機能が有効になっている ClickHouse Cloud の組織へのアクセス権が必要です(利用できない場合は、組織管理者またはサポートに連絡してください)。 - - ## AI Chat パネルを開く {#open-panel} 1. ClickHouse Cloud サービスにアクセスします。 @@ -43,8 +39,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## 初回利用時のデータ利用に関する同意 {#consent} 1. 初回利用時に、データの利用方法とサードパーティ LLM サブプロセッサーについて説明する同意ダイアログが表示されます。 @@ -52,8 +46,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## チャットモードを選択する {#modes} AI Chat では現在、次のモードをサポートしています: @@ -65,30 +57,22 @@ AI Chat では現在、次のモードをサポートしています: - - ## メッセージを作成して送信する {#compose} 1. 質問を入力してください(例:「ユーザーごとの日次イベントを集計するマテリアライズドビューを作成して」)。 2. Enter を押して送信します(改行する場合は Shift + Enter を押します)。 3. モデルが処理を実行している間は、「Stop」ボタンをクリックして中断できます。 - - ## 「Agent」の思考ステップを理解する {#thinking-steps} Agent モードでは、展開可能な中間的な「思考」や計画のステップが表示されることがあります。これらは、アシスタントがどのように回答を生成しているかを可視化するためのものです。必要に応じて折りたたんだり展開したりしてください。 - - ## 新しいチャットを開始する {#new-chats} 現在のコンテキストをクリアして新しいセッションを開始するには、「New Chat」ボタンをクリックします。 - - ## チャット履歴の表示 {#history} 1. 画面下部のセクションに、最近のチャットが一覧表示されます。 @@ -97,8 +81,6 @@ Agent モードでは、展開可能な中間的な「思考」や計画のス - - ## 生成された SQL の扱い方 {#sql-actions} アシスタントが SQL を返したら、次の手順を実行します。 @@ -111,8 +93,6 @@ Agent モードでは、展開可能な中間的な「思考」や計画のス - - ## 応答を停止または中断する {#interrupt} 応答に時間がかかりすぎる場合や、意図から外れてしまった場合は、次の手順を実行します。 @@ -120,8 +100,6 @@ Agent モードでは、展開可能な中間的な「思考」や計画のス 1. 「停止」ボタンをクリックします(処理中のみ表示されます)。 2. メッセージは中断されたものとしてマークされます。その後、プロンプトを調整して再送信できます。 - - ## キーボードショートカット {#shortcuts} | 操作 | ショートカット | diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md index efe6e65d37d..e55523f4360 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md @@ -22,14 +22,12 @@ import img5 from '@site/static/images/use-cases/AI_ML/MCP/5connected_mcp_claude. import img6 from '@site/static/images/use-cases/AI_ML/MCP/6slash_mcp_claude.png'; import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; - # ClickHouse Cloud リモート MCP サーバーを有効にする {#enabling-the-clickhouse-cloud-remote-mcp-server} > このガイドでは、ClickHouse Cloud リモート MCP サーバーの有効化と使用方法について説明します。この例では MCP クライアントとして Claude Code を使用しますが、MCP をサポートする LLM クライアントであればどれでも使用できます。 - ## ClickHouse Cloud サービスでリモート MCP サーバーを有効化する {#enable-remote-mcp-server} 1. ClickHouse Cloud サービスに接続し、「Connect」ボタンをクリックして、そのサービスのリモート MCP サーバーを有効にします @@ -44,7 +42,6 @@ import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; https://mcp.clickhouse.cloud/mcp ``` - ## Claude Code に ClickHouse MCP サーバーを追加する {#add-clickhouse-mcp-server-claude-code} 1. 作業ディレクトリで次のコマンドを実行して、ClickHouse Cloud MCP サーバーの設定を Claude Code に追加します。この例では、Claude Code の設定内で MCP サーバーに `clickhouse_cloud` という名前を付けています。 @@ -71,7 +68,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc [user@host ~/Documents/repos/mcp_test] $ claude ``` - ## OAuth を使用して ClickHouse Cloud に認証する {#authenticate-via-oauth} 1. 最初のセッションでは Claude Code がブラウザウィンドウを開きます。そうでない場合は、Claude Code で `/mcp` コマンドを実行し、`clickhouse_cloud` MCP サーバーを選択して接続を開始することもできます。 @@ -82,8 +78,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc - - ## Claude CodeからClickHouse Cloud Remote MCPサーバーを使用する {#use-rempte-mcp-from-claude-code} 1. Claude CodeでリモートMCPサーバーが接続されていることを確認してください diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md index 355d23d68d8..7402a0f1cb4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md @@ -18,7 +18,6 @@ import FindMCPServers from '@site/static/images/use-cases/AI_ML/MCP/find-mcp-ser import MCPPermission from '@site/static/images/use-cases/AI_ML/MCP/mcp-permission.png'; import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-conversation.png'; - # Claude DesktopでClickHouse MCPサーバーを使用する {#using-clickhouse-mcp-server-with-claude-desktop} > 本ガイドでは、uvを使用してClaude DesktopにClickHouse MCPサーバーを設定し、 @@ -37,20 +36,15 @@ import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-c - ## uv をインストールする {#install-uv} このガイドの手順に従うには、[uv](https://docs.astral.sh/uv/) をインストールする必要があります。 uv を使用したくない場合は、別のパッケージマネージャーを使用するように MCP サーバーの設定を更新する必要があります。 - - ## Claude Desktop のダウンロード {#download-claude-desktop} [Claude Desktop のウェブサイト](https://claude.ai/desktop) からダウンロードできる Claude Desktop アプリもインストールする必要があります。 - - ## ClickHouse MCP サーバーの設定 {#configure-clickhouse-mcp-server} Claude Desktop のインストールが完了したら、次は [ClickHouse MCP サーバー](https://github.com/ClickHouse/mcp-clickhouse) を設定します。 @@ -113,7 +107,6 @@ MCP mcp-clickhouse: spawn uv ENOENT その場合は、`uv` へのフルパスを指定するように `command` を更新する必要があります。たとえば Cargo 経由でインストールした場合は、`/Users/<username>/.cargo/bin/uv` となります。 ::: - ## Using ClickHouse MCP server {#using-clickhouse-mcp-server} Claude Desktopを再起動後、`Search and tools`アイコンをクリックすることでClickHouse MCPサーバーを確認できます: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md index 9562550995a..18534de9d45 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md @@ -15,7 +15,6 @@ import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.png'; - # LibreChatでClickHouse MCPサーバーを使用する {#using-clickhouse-mcp-server-with-librechat} > 本ガイドでは、Dockerを使用してLibreChatとClickHouse MCPサーバーをセットアップし、 @@ -23,7 +22,6 @@ import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.pn - ## Docker をインストールする {#install-docker} LibreChat と MCP サーバーを実行するには Docker が必要です。Docker を入手するには、次の手順に従ってください。 @@ -34,8 +32,6 @@ LibreChat と MCP サーバーを実行するには Docker が必要です。Doc
詳細については、[Docker のドキュメント](https://docs.docker.com/get-docker/)を参照してください。 - - ## LibreChat リポジトリをクローンする {#clone-librechat-repo} ターミナル(コマンドプロンプトや PowerShell など)を開き、次のコマンドを使用して LibreChat リポジトリをクローンします。 @@ -45,7 +41,6 @@ git clone https://github.com/danny-avila/LibreChat.git cd LibreChat ``` - ## .env ファイルの作成と編集 {#create-and-edit-env-file} サンプル構成ファイルを `.env.example` から `.env` にコピーします。 @@ -56,7 +51,6 @@ cp .env.example .env お好みのテキストエディタで `.env` ファイルを開きます。OpenAI、Anthropic、AWS Bedrock など、代表的な LLM プロバイダーごとのセクションが用意されています。例えば次のようになります。 - ```text title=".venv" #============# # Anthropic # {#anthropic} @@ -73,7 +67,6 @@ ANTHROPIC_API_KEY=user_provided API キーがない場合は、Ollama のようなローカル LLM を使用できます。セットアップ方法は後ほどステップ「[Install Ollama](#add-local-llm-using-ollama)」で説明します。ここでは .env ファイルは変更せず、そのまま次の手順に進んでください。 ::: - ## librechat.yaml ファイルを作成する {#create-librechat-yaml-file} 新しい `librechat.yaml` ファイルを作成するには、以下のコマンドを実行します。 @@ -84,7 +77,6 @@ cp librechat.example.yaml librechat.yaml これにより、LibreChat のメインの[設定ファイル](https://www.librechat.ai/docs/configuration/librechat_yaml)が作成されます。 - ## Docker Compose に ClickHouse MCP サーバーを追加する {#add-clickhouse-mcp-server-to-docker-compose} 次に、LLM が @@ -137,7 +129,6 @@ services: /> - ## librechat.yaml で MCP サーバーを構成する {#configure-mcp-server-in-librechat-yaml} `librechat.yaml` を開き、ファイルの末尾に次の設定を追記します。 @@ -163,7 +154,6 @@ socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple', 'sa socialLogins: [] ``` - ## Ollama を使用してローカル LLM を追加する(オプション) {#add-local-llm-using-ollama} ### Ollama をインストールする {#install-ollama} @@ -203,7 +193,6 @@ custom: modelDisplayLabel: "Ollama" ``` - ## すべてのサービスを起動する {#start-all-services} LibreChat プロジェクトディレクトリのルートで、次のコマンドを実行してサービスを起動します。 @@ -214,7 +203,6 @@ docker compose up すべてのサービスが完全に起動するまで待ちます。 - ## ブラウザでLibreChatを開く {#open-librechat-in-browser} すべてのサービスが起動したら、ブラウザを開いて`http://localhost:3080/`にアクセスします。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md index 8c66b7a71df..85e2011186b 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md @@ -18,7 +18,6 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/allm_conversat import MCPServers from '@site/static/images/use-cases/AI_ML/MCP/allm_mcp-servers.png'; import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png'; - # AnythingLLMでClickHouse MCPサーバーを使用する {#using-clickhouse-mcp-server-with-anythingllm} > 本ガイドでは、Dockerを使用してClickHouse MCPサーバーと[AnythingLLM](https://anythingllm.com/)をセットアップし、 @@ -26,7 +25,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png' - ## Docker をインストールする {#install-docker} LibreChat と MCP サーバーを実行するには Docker が必要です。Docker を入手するには、次の手順を実行します。 @@ -37,8 +35,6 @@ LibreChat と MCP サーバーを実行するには Docker が必要です。Doc
詳細については、[Docker のドキュメント](https://docs.docker.com/get-docker/)を参照してください。 - - ## AnythingLLM の Docker イメージをプルする {#pull-anythingllm-docker-image} 次のコマンドを実行して、AnythingLLM の Docker イメージをローカル環境にプルします。 @@ -47,7 +43,6 @@ LibreChat と MCP サーバーを実行するには Docker が必要です。Doc docker pull anythingllm/anythingllm ``` - ## ストレージの場所を設定する {#setup-storage-location} ストレージ用のディレクトリを作成し、環境ファイルを初期化します。 @@ -58,7 +53,6 @@ mkdir -p $STORAGE_LOCATION && \ touch "$STORAGE_LOCATION/.env" ``` - ## MCP サーバー設定ファイルの設定 {#configure-mcp-server-config-file} `plugins` ディレクトリを作成します。 @@ -96,7 +90,6 @@ mkdir -p "$STORAGE_LOCATION/plugins" ご利用の ClickHouse Cloud サービスの [ホスト名、ユーザー名、パスワード](https://clickhouse.com/docs/getting-started/quick-start/cloud#connect-with-your-app) を使用して行えます。 - ## AnythingLLM の Docker コンテナを起動する {#start-anythingllm-docker-container} 次のコマンドを実行して、AnythingLLM の Docker コンテナを起動します。 @@ -113,7 +106,6 @@ mintplexlabs/anythingllm 起動したら、ブラウザで `http://localhost:3001` にアクセスします。 利用したいモデルを選択し、API キーを入力します。 - ## MCP サーバーの起動を待つ {#wait-for-mcp-servers-to-start-up} UI 左下にあるツールアイコンをクリックします。 @@ -125,8 +117,6 @@ UI 左下にあるツールアイコンをクリックします。 - - ## AnythingLLM で ClickHouse MCP Server とチャットする {#chat-with-clickhouse-mcp-server-with-anythingllm} これでチャットを開始する準備が整いました。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md index 8704815d71c..afc953c9d5e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md @@ -25,7 +25,6 @@ import AddConnection from '@site/static/images/use-cases/AI_ML/MCP/7_add_connect import OpenAIModels from '@site/static/images/use-cases/AI_ML/MCP/8_openai_models_more.png'; import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation.png'; - # Open WebUIでClickHouse MCPサーバーを使用する {#using-clickhouse-mcp-server-with-open-webui} > 本ガイドでは、[Open WebUI](https://github.com/open-webui/open-webui)とClickHouse MCPサーバーをセットアップし、 @@ -33,14 +32,11 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation - ## uv のインストール {#install-uv} このガイドに従うには、[uv](https://docs.astral.sh/uv/) をインストールする必要があります。 uv を使用したくない場合は、代わりのパッケージマネージャを使用するように MCP サーバーの設定を更新する必要があります。 - - ## Open WebUI を起動する {#launch-open-webui} Open WebUI を起動するには、次のコマンドを実行してください。 @@ -51,7 +47,6 @@ uv run --with open-webui open-webui serve ブラウザで [http://localhost:8080/](http://localhost:8080/) を開き、UI を表示します。 - ## ClickHouse MCP Server を構成する {#configure-clickhouse-mcp-server} ClickHouse MCP Server をセットアップするには、MCP Server を OpenAPI エンドポイントに変換する必要があります。 @@ -93,7 +88,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - ## OpenAI を設定する {#configure-openai} デフォルトでは、Open WebUI は Ollama モデルと連携して動作しますが、OpenAI 互換のエンドポイントも追加できます。 @@ -109,8 +103,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - - ## Open WebUI で ClickHouse MCP Server とチャットする {#chat-to-clickhouse-mcp-server} これで対話できるようになり、必要に応じて Open WebUI が MCP Server を呼び出します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md index 5641695116d..b85eec085e6 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md @@ -14,14 +14,12 @@ import {CardHorizontal} from '@clickhouse/click-ui/bundled' import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; - # ClickHouse MCP サーバーを Ollama と連携して利用する {#using-clickhouse-mcp-server-with-ollama} > 本ガイドでは、ClickHouse MCP サーバーを Ollama と組み合わせて使用する方法を説明します。 - ## Ollama のインストール {#install-ollama} Ollama は、大規模言語モデル (LLM) をローカル環境で実行するためのライブラリです。 @@ -93,7 +91,6 @@ ollama show qwen3 この出力から、デフォルトの qwen3 モデルにはおよそ80億個のパラメータがあることが分かります。 - ## MCPHost をインストールする {#install-mcphost} この記事の執筆時点(2025 年 7 月)では、Ollama を MCP サーバーで使用するためのネイティブな機能はありません。 @@ -108,7 +105,6 @@ go install github.com/mark3labs/mcphost@latest バイナリは `~/go/bin` にインストールされるため、そのディレクトリが PATH に含まれていることを確認する必要があります。 - ## ClickHouse MCP サーバーの設定 {#configure-clickhouse-mcp-server} MCPHost を使用して、YAML または JSON ファイルで MCP サーバーを構成できます。 @@ -157,7 +153,6 @@ export CLICKHOUSE_PASSWORD="" 理論上は、MCP 構成ファイル内の `environment` キーの下にこれらの変数を指定できるはずですが、実際にはその方法では動作しないことが分かっています。 ::: - ## MCPHost の実行 {#running-mcphost} ClickHouse MCP サーバーの設定が完了したら、次のコマンドで MCPHost を起動できます: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md index eea8c15c91a..f24709b7b9e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md @@ -26,14 +26,12 @@ import ToolsCalled from '@site/static/images/use-cases/AI_ML/MCP/8_janai_tools_c import ToolsCalledExpanded from '@site/static/images/use-cases/AI_ML/MCP/9_janai_tools_called_expanded.png'; import Result from '@site/static/images/use-cases/AI_ML/MCP/10_janai_result.png'; - # Jan.ai で ClickHouse MCP Server を使用する {#using-clickhouse-mcp-server-with-janai} > このガイドでは、ClickHouse MCP Server を [Jan.ai](https://jan.ai/docs) と組み合わせて使用する方法について説明します。 - ## Jan.ai をインストールする {#install-janai} Jan.ai は、100% オフラインで動作するオープンソースの ChatGPT の代替ツールです。 @@ -41,8 +39,6 @@ Jan.ai は、100% オフラインで動作するオープンソースの ChatGPT ネイティブアプリなので、ダウンロードが完了したらすぐに起動できます。 - - ## Jan.ai に LLM を追加する {#add-llm-to-janai} 設定メニューからモデルを有効化できます。 @@ -51,8 +47,6 @@ OpenAI を有効にするには、以下のように API キーを入力する - - ## MCP サーバーを有効化する {#enable-mcp-servers} 本記事の執筆時点では、MCP Servers は Jan.ai における実験的な機能です。 @@ -62,8 +56,6 @@ OpenAI を有効にするには、以下のように API キーを入力する トグルをオンにすると、左側のメニューに `MCP Servers` が表示されます。 - - ## ClickHouse MCP Server を構成する {#configure-clickhouse-mcp-server} `MCP Servers` メニューをクリックすると、接続可能な MCP サーバーの一覧が表示されます。 @@ -84,8 +76,6 @@ ClickHouse MCP Server をインストールするには、`+` アイコンをク - - ## Jan.ai で ClickHouse MCP サーバーとチャットする {#chat-to-clickhouse-mcp-server} ClickHouse に保存されているデータについて、会話してみましょう。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md index e7b898b225d..f143fdecc49 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Agno と ClickHouse MCP Server を使用して AI エージェントを構築する方法 {#how-to-build-an-ai-agent-with-agno-and-the-clickhouse-mcp-server} このガイドでは、[Agno](https://github.com/agno-agi/agno) を使って、[ClickHouse の MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を介して [ClickHouse の SQL playground](https://sql.clickhouse.com/) と対話できる AI エージェントを構築する方法を説明します。 @@ -20,8 +18,6 @@ doc_type: 'guide' このサンプルは、[examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/agno/agno.ipynb) にノートブックとして用意されています。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされていること @@ -32,7 +28,6 @@ doc_type: 'guide' - ## ライブラリをインストールする {#install-libraries} 以下のコマンドを実行して Agno ライブラリをインストールします。 @@ -43,7 +38,6 @@ pip install -q agno pip install -q ipywidgets ``` - ## 認証情報の設定 {#setup-credentials} 次に、Anthropic API キーを指定する必要があります: @@ -74,7 +68,6 @@ env = { } ``` - ## MCPサーバーとAgnoエージェントの初期化 {#initialize-mcp-and-agent} ClickHouse MCPサーバーをClickHouse SQLプレイグラウンドに接続するように設定し、 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md index d87e18c2de4..a8bcc2e39c5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Chainlit と ClickHouse MCP Server を使って AI エージェントを構築する方法 {#how-to-build-an-ai-agent-with-chainlit-and-the-clickhouse-mcp-server} このガイドでは、強力なチャットインターフェース用フレームワークである Chainlit と @@ -20,14 +18,10 @@ ClickHouse Model Context Protocol (MCP) Server を組み合わせて、対話型 AI アプリケーション向けの会話型インターフェースを構築でき、ClickHouse MCP Server により、 高性能なカラム型データベースである ClickHouse とのシームレスな統合が可能になります。 - - ## 前提条件 {#prerequisites} - Anthropic API キーが必要です - [`uv`](https://docs.astral.sh/uv/getting-started/installation/) がインストールされている必要があります - - ## 基本的な Chainlit アプリ {#basic-chainlit-app} 次を実行すると、基本的なチャットアプリの例を確認できます。 @@ -38,7 +32,6 @@ uv run --with anthropic --with chainlit chainlit run chat_basic.py -w -h 次に、`http://localhost:8000` にアクセスします - ## ClickHouse MCP Server を追加する {#adding-clickhouse-mcp-server} ClickHouse MCP Server を追加すると、さらに面白くなります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md index 938676c4262..d2ce060b4cf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Claude Agent SDK と ClickHouse MCP Server を使って AI エージェントを構築する方法 {#how-to-build-an-ai-agent-with-claude-agent-sdk-and-the-clickhouse-mcp-server} このガイドでは、[Claude Agent SDK](https://docs.claude.com/en/api/agent-sdk/overview) を使って、[ClickHouse の SQL playground](https://sql.clickhouse.com/) と [ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を経由して対話できる AI エージェントを構築する方法を説明します。 @@ -20,8 +18,6 @@ doc_type: 'guide' この例は、[examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/claude-agent/claude-agent.ipynb) 内のノートブックとして参照できます。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされていること @@ -32,7 +28,6 @@ doc_type: 'guide' - ## ライブラリをインストールする {#install-libraries} 以下のコマンドを実行して、Claude Agent SDK をインストールします。 @@ -43,7 +38,6 @@ pip install -q claude-agent-sdk pip install -q ipywidgets ``` - ## 資格情報の設定 {#setup-credentials} 次に、Anthropic の API キーを指定する必要があります。 @@ -69,7 +63,6 @@ env = { } ``` - ## MCP ServerとClaude Agent SDKエージェントの初期化 {#initialize-mcp-and-agent} ClickHouse MCP ServerをClickHouse SQLプレイグラウンドに接続するよう設定し、 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md index b44521e7021..e9794a4838d 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # CopilotKit と ClickHouse MCP Server を使用して AI エージェントを構築する方法 {#how-to-build-an-ai-agent-with-copilotkit-and-the-clickhouse-mcp-server} これは、ClickHouse に保存されているデータを利用してエージェント型アプリケーションを構築する方法の例です。[ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を使用して ClickHouse からデータをクエリし、そのデータに基づいてグラフを生成します。 @@ -22,15 +20,11 @@ doc_type: 'guide' このサンプルのコードは [examples リポジトリ](https://github.com/ClickHouse/examples/edit/main/ai/mcp/copilotkit) にあります。 ::: - - ## 前提条件 {#prerequisites} - `Node.js >= 20.14.0` - `uv >= 0.1.0` - - ## 依存関係をインストールする {#install-dependencies} `git clone https://github.com/ClickHouse/examples` を実行してプロジェクトをローカル環境にクローンし、 @@ -39,8 +33,6 @@ doc_type: 'guide' このセクションはスキップし、スクリプト `./install.sh` を実行して依存関係をインストールします。 依存関係を手動でインストールしたい場合は、以下の手順に従ってください。 - - ## 依存関係を手動でインストールする {#install-dependencies-manually} 1. 依存関係をインストールします: @@ -64,20 +56,15 @@ uv sync uv add fastmcp ``` - ## アプリケーションを構成する {#configure-the-application} `env.example` ファイルを `.env` としてコピーし、`ANTHROPIC_API_KEY` を指定するように編集します。 - - ## 独自の LLM を使用する {#use-your-own-llm} Anthropic 以外の LLM プロバイダーを使用したい場合は、Copilotkit ランタイムの設定を変更して、別の LLM アダプターを利用できます。 サポートされているプロバイダーの一覧は[こちら](https://docs.copilotkit.ai/guides/bring-your-own-llm)です。 - - ## 独自の ClickHouse クラスターを使用する {#use-your-own-clickhouse-cluster} デフォルトでは、このサンプルは @@ -90,8 +77,6 @@ Anthropic 以外の LLM プロバイダーを使用したい場合は、Copilotk - `CLICKHOUSE_PASSWORD` - `CLICKHOUSE_SECURE` - - # アプリケーションを実行する {#run-the-application} `npm run dev` を実行して、開発サーバーを起動します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md index 7bcd061d54c..1efefbea467 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # ClickHouse MCP Server を使用して LlamaIndex AI エージェントを構築する方法 {#how-to-build-a-llamaindex-ai-agent-using-clickhouse-mcp-server} このガイドでは、[ClickHouse の SQL Playground](https://sql.clickhouse.com/) と対話できるようにするために、[ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を利用した [LlamaIndex](https://docs.llamaindex.ai) AI エージェントの構築方法を説明します。 @@ -20,8 +18,6 @@ doc_type: 'guide' このサンプルは、[examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/llamaindex/llamaindex.ipynb) 内のノートブックとして利用できます。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされていること @@ -32,7 +28,6 @@ doc_type: 'guide' - ## ライブラリのインストール {#install-libraries} 次のコマンドを実行して、必要なライブラリをインストールします。 @@ -42,7 +37,6 @@ pip install -q --upgrade pip pip install -q llama-index clickhouse-connect llama-index-llms-anthropic llama-index-tools-mcp ``` - ## 資格情報の設定 {#setup-credentials} 次に、Anthropic の API キーを設定する必要があります。 @@ -61,7 +55,6 @@ Anthropic の API キーを持っておらず、別の LLM プロバイダーを 認証情報の設定方法については [LlamaIndex「LLMs」ドキュメント](https://docs.llamaindex.ai/en/stable/examples/) を参照してください。 ::: - ## MCP Server を初期化する {#initialize-mcp-and-agent} 次に、ClickHouse MCP Server を構成し、ClickHouse SQL playground を接続先として指定します。 @@ -92,7 +85,6 @@ mcp_tool_spec = McpToolSpec( ) ``` - tools = await mcp_tool_spec.to_tool_list_async() ```` @@ -110,7 +102,6 @@ agent_worker = FunctionCallingAgentWorker.from_tools( agent = AgentRunner(agent_worker) ```` - ## LLM を初期化する {#initialize-llm} 次のコードで Claude Sonnet 4.0 モデルを初期化します。 @@ -120,7 +111,6 @@ from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-sonnet-4-0") ``` - ## エージェントの実行 {#run-agent} 最後に、エージェントに質問することができます: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md index 37dab0ed552..ac49e480613 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # ClickHouse MCP Server を使用して OpenAI エージェントを構築する方法 {#how-to-build-an-openai-agent-using-clickhouse-mcp-server} このガイドでは、[ClickHouse SQL playground](https://sql.clickhouse.com/) と対話できる [ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を利用して [OpenAI](https://github.com/openai/openai-agents-python) エージェントを構築する手順を説明します。 @@ -20,8 +18,6 @@ doc_type: 'guide' この例は、[examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/openai-agents/openai-agents.ipynb) 内のノートブックとして提供されています。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされている必要があります。 @@ -32,7 +28,6 @@ doc_type: 'guide' - ## ライブラリをインストールする {#install-libraries} 次のコマンドを実行して、必要なライブラリをインストールします。 @@ -42,7 +37,6 @@ pip install -q --upgrade pip pip install -q openai-agents ``` - ## 認証情報のセットアップ {#setup-credentials} 次に、OpenAI API キーを指定する必要があります。 @@ -56,7 +50,6 @@ os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI APIキーを入力:") OpenAI APIキーを入力: ········ ``` - ## MCP Server と OpenAI エージェントの初期化 {#initialize-mcp-and-agent} ここでは、ClickHouse MCP Server を ClickHouse SQL playground を参照するように設定し、 @@ -155,7 +148,6 @@ async with MCPServerStdio( simple_render_chunk(chunk) ``` - ```response title="Response" Running: What's the biggest GitHub project so far in 2025? 🔧 Tool: list_databases({}) diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md index 57f68c86033..f230c839e3a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # ClickHouse MCP Server を使用して PydanticAI エージェントを構築する方法 {#how-to-build-a-pydanticai-agent-using-clickhouse-mcp-server} このガイドでは、[ClickHouse の MCP Server](https://github.com/ClickHouse/mcp-clickhouse) を使って [ClickHouse の SQL playground](https://sql.clickhouse.com/) と対話できる [PydanticAI](https://ai.pydantic.dev/mcp/client/#__tabbed_1_1) エージェントを構築する方法を学びます。 @@ -20,8 +18,6 @@ doc_type: 'guide' この例は、[examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/pydanticai/pydantic.ipynb) にあるノートブックとして提供されています。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされていること @@ -32,7 +28,6 @@ doc_type: 'guide' - ## ライブラリをインストールする {#install-libraries} 次のコマンドを実行して、必要なライブラリをインストールします。 @@ -43,7 +38,6 @@ pip install -q "pydantic-ai-slim[mcp]" pip install -q "pydantic-ai-slim[anthropic]" # 別のLLMプロバイダーを使用する場合は適切なパッケージに置き換えてください ``` - ## 資格情報の設定 {#setup-credentials} 次に、Anthropic の API キーを指定する必要があります。 @@ -74,7 +68,6 @@ env = { } ``` - ## MCP Server と PydanticAI エージェントの初期化 {#initialize-mcp} 次に、ClickHouse MCP Server を設定し、ClickHouse SQL playground を参照するようにします。 @@ -97,7 +90,6 @@ server = MCPServerStdio( agent = Agent('anthropic:claude-sonnet-4-0', mcp_servers=[server]) ``` - ## エージェントに質問する {#ask-agent} 最後に、エージェントに質問できます: diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md index b0b963b84d8..4e0ca4b94aa 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # ClickHouse MCP Server を使用して SlackBot エージェントを構築する方法 {#how-to-build-a-slackbot-agent-using-clickhouse-mcp-server} このガイドでは、[SlackBot](https://slack.com/intl/en-gb/help/articles/202026038-An-introduction-to-Slackbot) エージェントの構築方法を解説します。 @@ -22,8 +20,6 @@ doc_type: 'guide' このサンプルのコードは [examples リポジトリ](https://github.com/ClickHouse/examples/blob/main/ai/mcp/slackbot/README.md) で参照できます。 ::: - - ## 前提条件 {#prerequisites} - [`uv`](https://docs.astral.sh/uv/getting-started/installation/)をインストールしておく必要があります @@ -32,15 +28,12 @@ doc_type: 'guide' - ## Slack アプリを作成する {#create-a-slack-app} 1. [slack.com/apps](https://slack.com/apps) にアクセスし、`Create New App` をクリックします。 2. `From scratch` を選択し、アプリに名前を付けます。 3. Slack ワークスペースを選択します。 - - ## ワークスペースにアプリをインストールする {#install-the-app-to-your-workspace} 次に、前の手順で作成したアプリをワークスペースに追加します。 @@ -48,8 +41,6 @@ Slack のドキュメントにある 「[Slack ワークスペースにアプリを追加する](https://slack.com/intl/en-gb/help/articles/202035138-Add-apps-to-your-Slack-workspace)」 の手順に従ってください。 - - ## Slack アプリの設定を行う {#configure-slack-app-settings} - `App Home` に移動する @@ -75,8 +66,6 @@ Slack のドキュメントにある - `message:im` - 変更を保存する - - ## 環境変数を追加する (`.env`) {#add-env-vars} プロジェクトのルートに `.env` ファイルを作成し、以下の環境変数を定義します。 @@ -95,7 +84,6 @@ CLICKHOUSE_SECURE=true 必要に応じて、ClickHouse の変数を調整し、ご自身の ClickHouse サーバーまたは ClickHouse Cloud インスタンスを使用することもできます。 - ## ボットの使用 {#using-the-bot} 1. **ボットを起動:** diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md index 665f09cca00..be457696527 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Streamlit を使って ClickHouse をバックエンドにした AI エージェントを構築する方法 {#how-to-build-a-clickhouse-backed-ai-agent-with-streamlit} このガイドでは、[Streamlit](https://streamlit.io/) を使用して、[ClickHouse の SQL playground](https://sql.clickhouse.com/) に対して [ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) と [Agno](https://github.com/agno-agi/agno) を通じて対話できる、Web ベースの AI エージェントを構築する方法を説明します。 @@ -21,8 +19,6 @@ doc_type: 'guide' このサンプルのソースコードは、[examples リポジトリ](https://github.com/ClickHouse/examples/tree/main/ai/mcp/streamlit)で確認できます。 ::: - - ## 前提条件 {#prerequisites} - システムにPythonがインストールされている必要があります。 @@ -33,7 +29,6 @@ doc_type: 'guide' - ## ライブラリのインストール {#install-libraries} 次のコマンドを実行して、必要なライブラリをインストールします。 @@ -42,7 +37,6 @@ doc_type: 'guide' pip install streamlit agno ipywidgets ``` - ## ユーティリティファイルを作成 {#create-utilities} 2つのユーティリティ関数を含む `utils.py` ファイルを作成します。1つ目は、 @@ -71,7 +65,6 @@ def apply_styles():
""", unsafe_allow_html=True) ``` - ## 認証情報のセットアップ {#setup-credentials} Anthropic の API キーを環境変数として設定してください。 @@ -85,7 +78,6 @@ Anthropic の API キーを持っておらず、別の LLM プロバイダーを 資格情報の設定手順については [Agno「Integrations」ドキュメント](https://docs.agentops.ai/v2/integrations/ag2) を参照してください。 ::: - ## 必要なライブラリをインポートする {#import-libraries} まずメインの Streamlit アプリケーションファイル(例: `app.py`)を作成し、次のインポート文を追加します。 @@ -110,7 +102,6 @@ import threading from queue import Queue ``` - ## エージェントのストリーミング関数を定義する {#define-agent-function} [ClickHouse の SQL Playground](https://sql.clickhouse.com/) に接続し、レスポンスをストリーミングするメインのエージェント関数を追加します。 @@ -161,7 +152,6 @@ async def stream_clickhouse_agent(message): yield chunk.content ``` - ## 同期用ラッパー関数を追加する {#add-wrapper-functions} Streamlit で非同期ストリーミング処理を扱うためのヘルパー関数を追加します。 @@ -184,7 +174,6 @@ async def _agent_stream_to_queue(message, queue): queue.put(chunk) ``` - ## Streamlit インターフェイスを作成 {#create-interface} Streamlit の UI コンポーネントとチャット機能を追加します。 @@ -214,7 +203,6 @@ if prompt := st.chat_input("何かご質問はありますか?"): st.session_state.messages.append({"role": "assistant", "content": response}) ``` - ## アプリケーションの実行 {#run-application} ClickHouse AIエージェントWebアプリケーションを起動するには、ターミナルから以下のコマンドを実行します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md index 0abaa9e8a26..ba7dea1e240 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md @@ -20,7 +20,6 @@ ClickHouse は複数のカタログ(Unity、Glue、Polaris など)との統 Glue は多くの異なるテーブル形式をサポートしていますが、この統合でサポートされるのは Iceberg テーブルのみです。 ::: - ## AWS で Glue を構成する {#configuring} Glue カタログに接続するには、カタログのリージョンを特定し、アクセスキーとシークレットキーを指定する必要があります。 @@ -43,7 +42,6 @@ SETTINGS aws_secret_access_key = '' ``` - ## ClickHouse から Glue データカタログをクエリする {#query-glue-catalog} 接続が確立できたので、Glue に対してクエリを実行できるようになりました。 @@ -80,7 +78,6 @@ ClickHouse は複数のネームスペースをサポートしていないため SHOW CREATE TABLE `iceberg-benchmark.hitsiceberg`; ``` - ```sql title="Response" ┌─statement───────────────────────────────────────────────┐ 1.│ CREATE TABLE glue.`iceberg-benchmark.hitsiceberg` │ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md index e84741e2f95..65e64f63197 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md @@ -32,7 +32,6 @@ Lakekeeper は Apache Iceberg 向けのオープンソース REST カタログ `SET allow_experimental_database_iceberg = 1;` ::: - ## ローカル開発環境のセットアップ {#local-development-setup} ローカルでの開発やテストには、Lakekeeper のコンテナ化環境を使用できます。この方法は、学習、プロトタイピング、および開発環境に最適です。 @@ -230,7 +229,6 @@ docker-compose logs -f Lakekeeper のセットアップでは、まずサンプルデータを Iceberg テーブルにロードしておく必要があります。ClickHouse を通じてクエリを実行する前に、環境でテーブルが作成され、データが投入されていることを必ず確認してください。テーブルが利用可能かどうかは、使用している特定の docker-compose セットアップやサンプルデータ読み込み用スクリプトに依存します。 ::: - ### ローカルの Lakekeeper カタログへの接続 {#connecting-to-local-lakekeeper-catalog} ClickHouse コンテナに接続します。 @@ -249,7 +247,6 @@ ENGINE = DataLakeCatalog('http://lakekeeper:8181/catalog', 'minio', 'ClickHouse_ SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/warehouse-rest', warehouse = 'demo' ``` - ## ClickHouse を使用して Lakekeeper カタログテーブルをクエリする {#querying-lakekeeper-catalog-tables-using-clickhouse} 接続が確立したので、Lakekeeper カタログ経由でクエリを実行できます。例えば次のとおりです。 @@ -333,7 +330,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## データレイクから ClickHouse へのデータ取り込み {#loading-data-from-your-data-lake-into-clickhouse} Lakekeeper カタログのデータを ClickHouse に取り込む必要がある場合は、まずローカルの ClickHouse テーブルを作成します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md index 86cd6457293..8f0250ced4a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md @@ -34,7 +34,6 @@ Nessie は、次の機能を備えたデータレイク向けのオープンソ `SET allow_experimental_database_iceberg = 1;` ::: - ## ローカル開発環境のセットアップ {#local-development-setup} ローカル開発およびテストには、コンテナ化された Nessie セットアップを利用できます。このアプローチは、学習、プロトタイピング、開発用途に最適です。 @@ -148,7 +147,6 @@ docker-compose logs -f Nessie のセットアップではインメモリのバージョンストアを使用し、最初にサンプルデータを Iceberg テーブルにロードしておく必要があります。ClickHouse からクエリを実行する前に、その環境でテーブルが作成され、データが投入されていることを必ず確認してください。 ::: - ### ローカル Nessie カタログへの接続 {#connecting-to-local-nessie-catalog} ClickHouse コンテナに接続します。 @@ -167,7 +165,6 @@ ENGINE = DataLakeCatalog('http://nessie:19120/iceberg', 'admin', 'password') SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/my-bucket', warehouse = 'warehouse' ``` - ## ClickHouse を使用した Nessie カタログテーブルのクエリ実行 {#querying-nessie-catalog-tables-using-clickhouse} 接続が確立できたので、Nessie カタログ経由でクエリを実行し始めることができます。たとえば次のとおりです。 @@ -251,7 +248,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## データレイクから ClickHouse へのデータ読み込み {#loading-data-from-your-data-lake-into-clickhouse} Nessie カタログから ClickHouse にデータをロードする必要がある場合は、まずローカルの ClickHouse テーブルを作成することから始めます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md index c610f445723..1c8894c07d4 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md @@ -23,7 +23,6 @@ Microsoft OneLake は、レイクハウス向けに複数のテーブル形式 `SET allow_database_iceberg = 1;` ::: - ## OneLake の要件の収集 {#gathering-requirements} Microsoft Fabric でテーブルをクエリする前に、次の情報を収集する必要があります。 @@ -43,7 +42,6 @@ Microsoft Fabric でテーブルをクエリする前に、次の情報を収集 SET allow_database_iceberg=1 ``` - ### OneLakeに接続する {#connect-onelake} ```sql @@ -59,7 +57,6 @@ onelake_client_id = '', onelake_client_secret = '' ``` - ## ClickHouse を使用した OneLake へのクエリ実行 {#querying-onelake-using-clickhouse} 接続が確立できたので、これで OneLake に対してクエリを実行できます。 @@ -120,7 +117,6 @@ ClickHouse は複数のネームスペースをサポートしていないため テーブルの DDL を確認するには: - ```sql SHOW CREATE TABLE onelake_catalog.`year_2017.green_tripdata_2017` @@ -155,7 +151,6 @@ Query id: 8bd5bd8e-83be-453e-9a88-32de12ba7f24 └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## データレイクから ClickHouse へのデータ読み込み {#loading-data-from-onelake-into-clickhouse} OneLake から ClickHouse にデータを読み込む必要がある場合は、次の手順を実行します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md index 7a41e20283f..f617aaedaa9 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md @@ -33,7 +33,6 @@ REST Catalog は Iceberg カタログ向けの標準化された API 仕様で `SET allow_experimental_database_iceberg = 1;` ::: - ## ローカル開発環境のセットアップ {#local-development-setup} ローカルでの開発およびテストには、コンテナ化した REST カタログのセットアップを使用できます。この方法は、学習やプロトタイピング、開発環境に最適です。 @@ -90,7 +89,6 @@ docker-compose logs -f REST カタログをセットアップするには、まずサンプルデータを Iceberg テーブルにロードしておく必要があります。ClickHouse 経由でクエリを実行する前に、Spark 環境でテーブルが作成され、データが投入済みであることを必ず確認してください。テーブルが利用可能かどうかは、使用している特定の docker-compose セットアップおよびサンプルデータ読み込みスクリプトに依存します。 ::: - ### ローカルの REST カタログへの接続 {#connecting-to-local-rest-catalog} ClickHouse コンテナに接続します。 @@ -112,7 +110,6 @@ SETTINGS warehouse = 'demo' ``` - ## ClickHouse を使用した REST カタログテーブルのクエリ実行 {#querying-rest-catalog-tables-using-clickhouse} 接続が確立できたので、REST カタログ経由でクエリを実行できます。例えば、次のようになります。 @@ -196,7 +193,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## データレイク内のデータを ClickHouse に読み込む {#loading-data-from-your-data-lake-into-clickhouse} REST カタログから ClickHouse にデータを読み込む必要がある場合は、まずローカルテーブルを作成します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md index 3010daad03f..f593673a315 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md @@ -29,7 +29,6 @@ Databricks はレイクハウス向けに複数のデータ形式をサポート `SET allow_experimental_database_unity_catalog = 1;` ::: - ## Databricks での Unity の構成 {#configuring-unity-in-databricks} ClickHouse が Unity カタログと連携できるようにするには、Unity Catalog を外部リーダーとの連携を許可するように構成しておく必要があります。これは、[「Unity Catalog への外部データ アクセスを有効にする」](https://docs.databricks.com/aws/en/external-access/admin) ガイドに従うことで実現できます。 @@ -54,7 +53,6 @@ ENGINE = DataLakeCatalog('https://.cloud.databricks.com/api/2.1/un SETTINGS warehouse = 'CATALOG_NAME', catalog_credential = '', catalog_type = 'unity' ``` - ### Iceberg を読み込む {#read-iceberg} ```sql @@ -64,7 +62,6 @@ SETTINGS catalog_type = 'rest', catalog_credential = ':); ``` - ## データレイクから ClickHouse へのデータの読み込み {#loading-data-from-your-data-lake-into-clickhouse} Databricks から ClickHouse にデータを読み込む必要がある場合は、まずローカルの ClickHouse テーブルを作成します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md index ca620afd0db..1d2238e48dd 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md @@ -19,7 +19,6 @@ import observability_23 from '@site/static/images/use-cases/observability/observ import observability_24 from '@site/static/images/use-cases/observability/observability-24.png'; import Image from '@theme/IdealImage'; - # 観測性のための Grafana と ClickHouse の利用 {#using-grafana-and-clickhouse-for-observability} Grafana は、ClickHouse における観測性データの推奨可視化ツールです。これは、Grafana 向け公式 ClickHouse プラグインを使用して実現します。インストール手順は[こちら](/integrations/grafana)を参照してください。 @@ -55,7 +54,6 @@ SELECT Timestamp as timestamp, Body as body, SeverityText as level, TraceId as t クエリビルダーは、ユーザーがSQLを記述せずにクエリを簡単に変更できる手段を提供します。キーワードを含むログの検索を含めたフィルタリングは、クエリビルダーから実行できます。より複雑なクエリを記述したいユーザーは、SQLエディタに切り替えることができます。適切なカラムが返され、かつ Query Type として `logs` が選択されていれば、結果はログとしてレンダリングされます。ログのレンダリングに必要なカラムは[こちら](https://grafana.com/developers/plugin-tools/tutorials/build-a-logs-data-source-plugin#logs-data-frame-format)に記載されています。 - ### ログからトレースへ {#logs-to-traces} ログにトレース ID が含まれている場合、ユーザーは特定のログ行から対応するトレースへ遷移できると便利です。 @@ -85,7 +83,6 @@ WHERE ( Timestamp >= $__fromTime AND Timestamp <= $__toTime ) より複雑なクエリを記述したいユーザーは、`SQL Editor` に切り替えることができます。 - ### トレースの詳細を表示する {#view-trace-details} 上記のとおり、トレース ID はクリック可能なリンクとして表示されます。トレース ID をクリックすると、ユーザーは `View Trace` リンクから関連するスパンを表示できます。これにより(OTel のカラムを前提として)、必要な構造でスパンを取得するための次のクエリが実行され、結果はウォーターフォール形式でレンダリングされます。 @@ -120,7 +117,6 @@ LIMIT 1000 - ### トレースからログへの遷移 {#traces-to-logs} ログにトレース ID が含まれている場合、ユーザーはトレースから関連するログへ遷移できます。ログを表示するには、トレース ID をクリックして `View Logs` を選択します。これにより、デフォルトの OTel 列を前提として次のクエリが実行されます。 @@ -135,7 +131,6 @@ ORDER BY timestamp ASC LIMIT 1000 - ## ダッシュボード {#dashboards} ユーザーは ClickHouse データソースを利用して、Grafana 上にダッシュボードを構築できます。詳細については、特に [マクロの概念](https://github.com/grafana/clickhouse-datasource?tab=readme-ov-file#macros) や [変数](https://grafana.com/docs/grafana/latest/dashboards/variables/) について、Grafana および ClickHouse の [データソースドキュメント](https://github.com/grafana/clickhouse-datasource) を参照することを推奨します。 @@ -165,7 +160,6 @@ LIMIT 100000 - ### マルチラインチャート {#multi-line-charts} 次の条件を満たすクエリでは、自動的にマルチラインチャートがレンダリングされます。 @@ -191,7 +185,6 @@ LIMIT 100000 - ### 地理データの可視化 {#visualizing-geo-data} 前のセクションでは、IP 辞書を用いて地理座標を付与し、オブザーバビリティデータを拡充する方法について説明しました。`latitude` と `longitude` の列がある前提で、`geohashEncode` 関数を使うことで、オブザーバビリティデータを可視化に利用できます。これにより、Grafana の Geo Map チャートと互換性のあるジオハッシュが生成されます。以下に、クエリと可視化の例を示します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md index 0d245991f34..08db502faaf 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md @@ -16,7 +16,6 @@ import observability_8 from '@site/static/images/use-cases/observability/observa import observability_9 from '@site/static/images/use-cases/observability/observability-9.png'; import Image from '@theme/IdealImage'; - # データ収集のための OpenTelemetry の統合 {#integrating-opentelemetry-for-data-collection} あらゆるオブザーバビリティソリューションには、ログやトレースを収集してエクスポートする手段が必要です。この目的のために、ClickHouse は [OpenTelemetry (OTel) プロジェクト](https://opentelemetry.io/) の利用を推奨します。 @@ -111,7 +110,6 @@ Collector は、ログを収集するために 2 つの主要な receiver を提 可能な場合は構造化ログを採用し、JSON(例:ndjson)形式でログを出力することを推奨します。これにより、後続のログ処理が簡略化されます。具体的には、[Collector processors](https://opentelemetry.io/docs/collector/configuration/#processors) を使用して ClickHouse に送信する前、あるいは挿入時にマテリアライズドビューを用いて処理する際の負荷を軽減できます。構造化ログを利用することで、後段の処理に必要なリソースを節約でき、最終的には ClickHouse ソリューションで必要となる CPU を削減できます。 - ### 例 {#example} 例として、構造化(JSON)および非構造化のログデータセットをそれぞれ約 1,000 万行分用意しており、以下のリンクからダウンロードできます。 @@ -165,7 +163,6 @@ service: 構造化ログを使用している場合、出力されるメッセージは次の形式になります。 - ```response LogRecord #98 ObservedTimestamp: 2024-06-19 13:21:16.414259 +0000 UTC @@ -205,7 +202,6 @@ Operators はログ処理の最も基本的な単位です。各 operator は、 ローカルまたは Kubernetes のログファイルを収集する必要があるユーザーには、[filelog receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/filelogreceiver/README.md#configuration)で利用可能な設定オプションと、[オフセット](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#offset-tracking)および[複数行ログのパース処理](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#example---multiline-logs-parsing)の扱いについて理解しておくことを推奨します。 - ## Kubernetes ログの収集 {#collecting-kubernetes-logs} Kubernetes ログの収集については、[OpenTelemetry のドキュメントガイド](https://opentelemetry.io/docs/kubernetes/) を推奨します。[Kubernetes Attributes Processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) を使用すると、ポッドのメタデータでログおよびメトリクスを拡充できます。これにより、例えばラベルのような動的メタデータが生成され、`ResourceAttributes` 列に保存されます。ClickHouse は現在、この列に対して `Map(String, String)` 型を使用しています。この型の取り扱いや最適化の詳細については、[Using Maps](/use-cases/observability/schema-design#using-maps) および [Extracting from maps](/use-cases/observability/schema-design#extracting-from-maps) を参照してください。 @@ -278,7 +274,6 @@ Attributes: トレースメッセージの完全なスキーマは [こちら](https://opentelemetry.io/docs/concepts/signals/traces/) で公開されています。ユーザーの皆さまには、このスキーマに十分に精通しておくことを強く推奨します。 - ## 処理 - フィルタリング、変換、およびエンリッチ {#processing---filtering-transforming-and-enriching} 先ほどのログイベントのタイムスタンプ設定の例で示したように、ユーザーは必然的にイベントメッセージをフィルタリング、変換し、エンリッチしたくなります。これは OpenTelemetry の複数の機能を利用することで実現できます。 @@ -339,7 +334,6 @@ service: ./otelcol-contrib --config config-unstructured-logs-with-processor.yaml ``` - ## ClickHouse へのエクスポート {#exporting-to-clickhouse} エクスポーターは、1 つ以上のバックエンドまたは送信先にデータを送信します。エクスポーターには、プル型とプッシュ型があります。イベントを ClickHouse に送信するには、プッシュ型の [ClickHouse exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md) を使用する必要があります。 @@ -402,7 +396,6 @@ service: 以下の主な設定を確認してください: - * **pipelines** - 上記の設定では、[pipelines](https://opentelemetry.io/docs/collector/configuration/#pipelines) の利用が重要です。これはレシーバー、プロセッサー、エクスポーターのセットで構成されており、ログ用とトレース用にそれぞれ 1 つずつ定義されています。 * **endpoint** - ClickHouse との通信は `endpoint` パラメーターで設定します。接続文字列 `tcp://localhost:9000?dial_timeout=10s&compress=lz4&async_insert=1` により、通信は TCP 経由で行われます。トラフィック切り替えなどの理由で HTTP を利用したい場合は、この接続文字列を[こちら](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options)で説明されている方法に従って変更してください。ユーザー名とパスワードをこの接続文字列内で指定できる、より完全な接続設定の詳細は[こちら](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options)に記載されています。 @@ -430,7 +423,6 @@ $GOBIN/telemetrygen traces --otlp-insecure --traces 300 起動したら、次のような簡単なクエリでログイベントが存在することを確認します。 - ```sql SELECT * FROM otel_logs @@ -491,7 +483,6 @@ Links.TraceState: [] Links.Attributes: [] ``` - ## 既定のスキーマ {#out-of-the-box-schema} デフォルトでは、ClickHouse exporter はログとトレースの両方に対して出力先のログテーブルを作成します。これは設定 `create_schema` によって無効化できます。さらに、前述の設定により、ログテーブルおよびトレーステーブルの名前は、デフォルト値である `otel_logs` および `otel_traces` から変更できます。 @@ -540,7 +531,6 @@ SETTINGS ttl_only_drop_parts = 1 このスキーマについて、いくつか重要な注意点があります。 - - デフォルトでは、テーブルは `PARTITION BY toDate(Timestamp)` によって日付でパーティション分割されます。これにより、有効期限切れのデータを効率的に削除できます。 - TTL は `TTL toDateTime(Timestamp) + toIntervalDay(3)` によって設定されており、collector の設定で指定した値に対応します。[`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) は、含まれるすべての行が有効期限切れになった場合にのみ、そのパーツ全体を削除することを意味します。これは、パーツ内の行単位で削除する(高コストな delete を伴う)よりも効率的です。常にこの設定にすることを推奨します。詳細は [Data management with TTL](/observability/managing-data#data-management-with-ttl-time-to-live) を参照してください。 - テーブルは標準的な [`MergeTree` engine](/engines/table-engines/mergetree-family/mergetree) を使用します。これはログおよびトレースに推奨されており、通常変更する必要はありません。 @@ -593,7 +583,6 @@ SETTINGS ttl_only_drop_parts = 1 ユーザーには、自動スキーマ作成を無効化し、テーブルを手動で作成することを推奨します。これにより、主キーおよびセカンダリキーを変更できるほか、クエリパフォーマンスを最適化するための追加カラムを導入することが可能になります。詳細については [Schema design](/use-cases/observability/schema-design) を参照してください。 - ## 挿入の最適化 {#optimizing-inserts} collector 経由で Observability データを ClickHouse に挿入する際に、高い挿入パフォーマンスと強い一貫性保証の両方を得るには、いくつかの簡単なルールに従う必要があります。OTel collector を正しく構成すれば、これらのルールに従うことは容易になります。これにより、初めて ClickHouse を使用する際にユーザーが遭遇しがちな[一般的な問題](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse)も回避できます。 @@ -692,7 +681,6 @@ service: exporters: [otlp] ``` - [clickhouse-gateway-config.yaml](https://www.otelbin.io/#config=receivers%3A*N__otlp%3A*N____protocols%3A*N____grpc%3A*N____endpoint%3A_0.0.0.0%3A4317*N*Nprocessors%3A*N__batch%3A*N____timeout%3A_5s*N____send*_batch*_size%3A_10000*N*Nexporters%3A*N__clickhouse%3A*N____endpoint%3A_tcp%3A%2F%2Flocalhost%3A9000*Qdial*_timeout*E10s*Acompress*Elz4*N____ttl%3A_96h*N____traces*_table*_name%3A_otel*_traces*N____logs*_table*_name%3A_otel*_logs*N____create*_schema%3A_true*N____timeout%3A_10s*N____database%3A_default*N____sending*_queue%3A*N____queue*_size%3A_10000*N____retry*_on*_failure%3A*N____enabled%3A_true*N____initial*_interval%3A_5s*N____max*_interval%3A_30s*N____max*_elapsed*_time%3A_300s*N*Nservice%3A*N__pipelines%3A*N____logs%3A*N______receivers%3A_%5Botlp%5D*N______processors%3A_%5Bbatch%5D*N______exporters%3A_%5Bclickhouse%5D%7E\&distro=otelcol-contrib%7E\&distroVersion=v0.103.1%7E) ```yaml @@ -740,7 +728,6 @@ service: より大規模なゲートウェイベースのアーキテクチャの管理と、そこから得られた知見の例としては、この[ブログ記事](https://clickhouse.com/blog/building-a-logging-platform-with-clickhouse-and-saving-millions-over-datadog)を参照してください。 - ### Kafka の追加 {#adding-kafka} ここまでのアーキテクチャでは、メッセージキューとして Kafka を使用していないことに気づくかもしれません。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md index 59a33d0a224..0dbf0c7f4a9 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md @@ -11,7 +11,6 @@ import observability_1 from '@site/static/images/use-cases/observability/observa import observability_2 from '@site/static/images/use-cases/observability/observability-2.png'; import Image from '@theme/IdealImage'; - # オブザーバビリティのための ClickHouse 活用 {#using-clickhouse-for-observability} ## はじめに {#introduction} @@ -86,7 +85,6 @@ Observability のユースケースには、ログ、トレース、メトリク ClickHouse はメトリクスデータの保存にも使用できますが、この柱については、Prometheus データフォーマットや PromQL のサポートなどの機能がまだ開発途上であり、ClickHouse における成熟度はそれほど高くありません。 ::: - ### 分散トレーシング {#distributed-tracing} 分散トレーシングは、オブザーバビリティにおける重要な機能です。分散トレース(単にトレースとも呼ばれます)は、システム内を流れるリクエストの経路を表現します。リクエストはエンドユーザーまたはアプリケーションから送信され、システム全体へと広がり、一般的にはマイクロサービス間の一連の処理フローとして現れます。このシーケンスを記録し、その後に発生するイベント同士を相関付けることで、アーキテクチャの複雑さやサーバーレスであるかどうかに関わらず、オブザーバビリティの利用者や SRE はアプリケーションフロー内の問題を診断できるようになります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md index f8fdf020dba..c9318706c67 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md @@ -10,7 +10,6 @@ doc_type: 'guide' import observability_14 from '@site/static/images/use-cases/observability/observability-14.png'; import Image from '@theme/IdealImage'; - # データ管理 {#managing-data} Observability 用の ClickHouse デプロイメントでは、必然的に大規模なデータセットを扱うことになり、それらを適切に管理する必要があります。ClickHouse には、データ管理を支援するためのさまざまな機能が用意されています。 @@ -78,7 +77,6 @@ WHERE `table` = 'otel_logs' 古いデータの保存用に、`otel_logs_archive` という別のテーブルを用意しておくこともできます。データはパーティション単位で効率的にこのテーブルへ移動でき(メタデータの変更だけで済みます)、処理されます。 - ```sql CREATE TABLE otel_logs_archive AS otel_logs --アーカイブテーブルにデータを移動 @@ -145,7 +143,6 @@ ORDER BY c DESC この機能は、設定 [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) を有効にした場合に TTL によって活用されます。詳細については [TTL(Time to Live)によるデータ管理](#data-management-with-ttl-time-to-live) を参照してください。 ::: - ### アプリケーション {#applications} 上記では、データをパーティション単位で効率的に移動および操作できることを示しました。実際には、ユーザーが Observability のユースケースでパーティション操作を最も頻繁に活用するのは、おそらく次の 2 つのシナリオです。 @@ -193,7 +190,6 @@ TTL は即時には適用されず、上記のとおりスケジュールに基 **Important: [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) の使用を推奨します**(デフォルトのスキーマで適用されます)。この設定が有効な場合、そのパーツ内のすべての行が期限切れになったときに、ClickHouse はパーツ全体を削除します。パーツ全体を削除することで(`ttl_only_drop_parts=0` の場合にリソース集約的なミューテーションによって行われる、TTL 対象行の部分的なクリーンアップではなく)、`merge_with_ttl_timeout` を短く設定しても、システムパフォーマンスへの影響を抑えることができます。データを TTL で有効期限を設定している単位(例: 日)と同じ単位でパーティション分割している場合、各パーツには自然とその定義された間隔のデータのみが含まれるようになります。これにより、`ttl_only_drop_parts=1` を効率的に適用できるようになります。 - ### カラムレベルの TTL {#column-level-ttl} 上記の例では、テーブルレベルでデータの有効期限を設定しています。データはカラムレベルでも有効期限を設定できます。データが古くなるにつれ、調査における価値が、その保持に必要なリソースコストに見合わないカラムを削除するために活用できます。例えば、挿入時にまだ抽出されていない新しい動的メタデータ(例: 新しい Kubernetes ラベル)が追加される可能性に備えて、`Body` カラムを保持しておくことを推奨します。一定期間、例えば 1 か月が経過した後、この追加メタデータが有用ではないことが明らかになるかもしれません。その場合、`Body` カラムを保持し続ける価値は限定的です。 @@ -215,7 +211,6 @@ ORDER BY (ServiceName, Timestamp) 列レベルの TTL を指定する場合は、ユーザーが独自にスキーマを定義する必要があります。これは OTel collector では指定できません。 ::: - ## データの再圧縮 {#recompressing-data} 通常、オブザーバビリティ向けデータセットには `ZSTD(1)` を推奨していますが、ユーザーは別の圧縮アルゴリズムや、より高い圧縮レベル(例: `ZSTD(3)`)を試すこともできます。スキーマ作成時にこれを指定できるだけでなく、一定期間経過後に圧縮方式を変更するように設定することも可能です。これは、あるコーデックや圧縮アルゴリズムが圧縮率を向上させる一方で、クエリ性能を低下させる場合に有効です。このトレードオフは、クエリ頻度の低い古いデータには許容できる一方で、調査で頻繁に利用される最新データには適さない場合があります。 @@ -254,7 +249,6 @@ TTL Timestamp + INTERVAL 4 DAY RECOMPRESS CODEC(ZSTD(3)) TTL の設定に関する詳細と例については[こちら](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes)を参照してください。テーブルやカラムに TTL を追加・変更する方法の例は[こちら](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl)を参照してください。TTL によって hot-warm アーキテクチャのようなストレージ階層を実現する方法については、[ストレージ階層](#storage-tiers)を参照してください。 - ## ストレージ階層 {#storage-tiers} ClickHouse では、異なるディスク上にストレージ階層を作成できます。たとえば、ホット/最新データを SSD 上に、古いデータを S3 に配置します。このアーキテクチャにより、調査で参照される頻度が低く、クエリの SLA 要件がより緩い古いデータについて、より低コストなストレージを利用できます。 @@ -351,7 +345,6 @@ LIMIT 5 今後取り込まれるすべてのデータにこの値が挿入されるようにするため、以下のように `ALTER TABLE` 構文を使用してマテリアライズドビューを変更します。 - ```sql ALTER TABLE otel_logs_mv MODIFY QUERY @@ -378,7 +371,6 @@ FROM otel_logs 以降の行では、挿入時に `Size` 列に値が設定されます。 - ### 新しいテーブルを作成する {#create-new-tables} 上記の手順の代替として、ユーザーは新しいスキーマを持つ新しいターゲットテーブルを作成するだけでも問題ありません。いずれのマテリアライズドビューも、上記の `ALTER TABLE MODIFY QUERY` を使用して新しいテーブルを参照するように変更できます。このアプローチでは、ユーザーは `otel_logs_v3` のようにテーブルをバージョン管理できます。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md index 4b162170bad..0d5eb91903a 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md @@ -13,7 +13,6 @@ import observability_12 from '@site/static/images/use-cases/observability/observ import observability_13 from '@site/static/images/use-cases/observability/observability-13.png'; import Image from '@theme/IdealImage'; - # オブザーバビリティ向けスキーマ設計 {#designing-a-schema-for-observability} ログおよびトレースに対しては、常に独自のスキーマを作成することを推奨します。その理由は次のとおりです。 @@ -82,7 +81,6 @@ LIMIT 5 構造化ログの JSON 解析は、一般的に ClickHouse で実行することを推奨します。ClickHouse が最速の JSON 解析実装であると確信しています。ただし、ユーザーがログを他の送信先にも送信したい場合や、このロジックを SQL 側に持たせたくない場合があることも理解しています。 ::: - ```sql SELECT path(JSONExtractString(Body, 'request_path')) AS path, count() AS c FROM otel_logs @@ -156,7 +154,6 @@ LIMIT 5 ユーザーは、[こちら](/observability/integrating-opentelemetry#processing---filtering-transforming-and-enriching) で説明しているように、OTel collector の processors や operators を使用して処理を行うこともできます。多くの場合、ユーザーは、ClickHouse の方が collector の processors よりも大幅にリソース効率が高く、高速であると感じるでしょう。すべてのイベント処理を SQL で行うことの主なデメリットは、ソリューションが ClickHouse に密結合してしまうことです。例えば、ユーザーは処理済みログを OTel collector から S3 などの別の宛先に送信したい場合があります。 ::: - ### マテリアライズドカラム {#materialized-columns} マテリアライズドカラムは、他のカラムから構造を抽出するための最もシンプルな手段を提供します。この種のカラムの値は常に挿入時に計算され、INSERT クエリで明示的に指定することはできません。 @@ -224,7 +221,6 @@ Peak memory usage: 3.16 MiB. マテリアライズドカラムは、デフォルトでは `SELECT *` の結果には含まれません。これは、`SELECT *` の結果を常に INSERT 文を使ってそのままテーブルに挿入できるという不変性を保つためです。この挙動は `asterisk_include_materialized_columns=1` を設定することで変更でき、Grafana ではデータソース設定(`Additional Settings -> Custom Settings`)の中でこの設定を有効化できます。 ::: - ## マテリアライズドビュー {#materialized-views} [マテリアライズドビュー](/materialized-views) は、ログおよびトレースに対して SQL によるフィルタリングや変換を適用する、より強力な手段を提供します。 @@ -268,7 +264,6 @@ Null テーブルエンジンは強力な最適化機構で、`/dev/null` のよ 次のクエリを見てみましょう。これは行を保持したい形式に変換し、`LogAttributes` からすべてのカラムを抽出します(これはコレクターが `json_parser` オペレーターを使って設定したものと仮定します)。さらに、いくつかの単純な条件と[これらのカラム](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext)の定義に基づいて `SeverityText` と `SeverityNumber` を設定します。この例では、`TraceId`、`SpanId`、`TraceFlags` などのカラムは無視し、値が設定されることが分かっているカラムだけを選択しています。 - ```sql SELECT Body, @@ -354,7 +349,6 @@ ORDER BY (ServiceName, Timestamp) スキーマを大幅に変更している点に注目してください。実際には、ユーザーは保持しておきたいトレース用のカラムや、`ResourceAttributes` カラム(通常は Kubernetes のメタデータを含みます)も持っていることが多いでしょう。Grafana はトレース関連のカラムを活用して、ログとトレース間のリンク機能を提供できます。詳しくは [「Grafana の利用」](/observability/grafana) を参照してください。 ::: - 以下では、`otel_logs` テーブルに対して上記の SELECT を実行し、その結果を `otel_logs_v2` に送るマテリアライズドビュー `otel_logs_mv` を作成します。 ```sql @@ -417,7 +411,6 @@ SeverityNumber: 9 `Body` カラムから JSON 関数を使って列を抽出することで構成される、同等のマテリアライズドビューを次に示します。 - ```sql CREATE MATERIALIZED VIEW otel_logs_mv TO otel_logs_v2 AS SELECT Body, @@ -440,7 +433,6 @@ SELECT Body, FROM otel_logs ``` - ### 型に注意 {#beware-types} 上記のマテリアライズドビューは、特に `LogAttributes` マップを使用する場合に、暗黙の型変換に依存しています。ClickHouse は多くの場合、抽出された値を対象テーブルの型に自動的にキャストし、記述すべき構文を減らすことができます。ただし、ビューが正しく動作することを確認するため、ビューの `SELECT` 文と、同じスキーマを持つ対象テーブルに対する [`INSERT INTO`](/sql-reference/statements/insert-into) 文を組み合わせて常にテストすることを推奨します。これにより、型が正しく扱われていることを確認できます。特に次のケースには注意してください: @@ -495,7 +487,6 @@ groupArrayDistinctArray(mapKeys(LogAttributes)): ['remote_user','run_time','requ Map 型の列名でドットを使用することは推奨しません。将来的にその使用を非推奨とする可能性があります。代わりに `_` を使用してください。 ::: - ## エイリアスの使用 {#using-aliases} Map 型へのクエリは通常のカラムへのクエリよりも低速です — ["Accelerating queries"](#accelerating-queries) を参照してください。さらに、構文がより複雑で、ユーザーがクエリを記述する際に煩雑になりがちです。この後者の問題に対処するため、ALIAS カラムの使用を推奨します。 @@ -573,7 +564,6 @@ LIMIT 5 デフォルトでは、`SELECT *` は ALIAS 列を除外します。この動作は、`asterisk_include_alias_columns=1` を設定することで無効にできます。 ::: - ## 型の最適化 {#optimizing-types} 型最適化に関する[ClickHouse の一般的なベストプラクティス](/data-modeling/schema-design#optimizing-types)は、本ユースケースにも適用されます。 @@ -694,7 +684,6 @@ LIMIT 4; 4 rows in set. Elapsed: 0.259 sec. ``` - :::note 上記のクエリでは多くの処理が行われています。詳しく知りたい方は、この優れた[解説](https://clickhouse.com/blog/geolocating-ips-in-clickhouse-and-grafana#using-bit-functions-to-convert-ip-ranges-to-cidr-notation)を参照してください。そうでない場合は、上記のクエリが IP レンジに対して CIDR を計算していると理解してください。 ::: @@ -775,7 +764,6 @@ SELECT dictGet('ip_trie', ('country_code', 'latitude', 'longitude'), CAST('85.24 元のログデータセットに戻ると、上記を利用してログを国別に集計できます。以下では、先ほどのマテリアライズドビューの結果として得られたスキーマを使用しており、そこには抽出済みの `RemoteAddress` 列が含まれていることを前提としています。 - ```sql SELECT dictGet('ip_trie', 'country_code', tuple(RemoteAddress)) AS country, formatReadableQuantity(count()) AS num_requests @@ -833,7 +821,6 @@ ORDER BY (ServiceName, Timestamp) 上記の国情報と座標を利用することで、国ごとのグルーピングやフィルタリングにとどまらない可視化が可能になります。活用のヒントについては、「[地理情報データの可視化](/observability/grafana#visualizing-geo-data)」を参照してください。 - ### 正規表現辞書の使用(User-Agent の解析) {#using-regex-dictionaries-user-agent-parsing} [User-Agent 文字列](https://en.wikipedia.org/wiki/User_agent)の解析は、典型的な正規表現の問題であり、ログおよびトレースベースのデータセットで一般的に求められる処理です。ClickHouse は Regular Expression Tree Dictionary を使用して、User-Agent を効率的に解析する機能を提供します。 @@ -929,7 +916,6 @@ LAYOUT(regexp_tree); これらの辞書を読み込んだら、サンプルの User-Agent 文字列を指定して、この新しい辞書抽出機能をテストできます。 - ```sql WITH 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:127.0) Gecko/20100101 Firefox/127.0' AS user_agent SELECT @@ -1006,7 +992,6 @@ ORDER BY (ServiceName, Timestamp, Status) コレクターを再起動し、これまでの手順どおりに構造化ログを取り込んだら、新たに抽出された Device、Browser、Os の各列に対してクエリを実行できるようになります。 - ```sql SELECT Device, Browser, Os FROM otel_logs_v2 @@ -1024,7 +1009,6 @@ Os: ('Other','0','0','0') これらのユーザーエージェント列では Tuple を使用している点に注意してください。Tuple は、階層があらかじめ分かっている複雑な構造に対して推奨されます。サブカラムも、異なる型を許容しつつ、通常のカラムと同等のパフォーマンスを発揮します(Map のキーとは異なります)。 ::: - ### さらに詳しく {#further-reading} 辞書のさらなる例や詳細については、以下の記事を参照してください。 @@ -1111,7 +1095,6 @@ FINAL 1行のセット。経過時間: 0.039秒 ``` - ここでは、クエリ結果を保存することで、`otel_logs` の 1,000 万行から 113 行まで行数を効果的に削減しました。ここで重要なのは、新しいログが `otel_logs` テーブルに挿入されると、それぞれの時間帯に対応する新しい値が `bytes_per_hour` に書き込まれ、バックグラウンドで非同期に自動マージされる点です。1 時間あたり 1 行のみを保持することで、`bytes_per_hour` は常に小さく、かつ最新の状態に保たれます。 行のマージは非同期で行われるため、ユーザーがクエリした時点では、1 時間あたり複数行が存在する可能性があります。クエリ時に未マージの行も必ずマージされるようにするには、次の 2 つの選択肢があります。 @@ -1165,7 +1148,6 @@ LIMIT 5 この高速化効果は、より大きなデータセットやより複雑なクエリではさらに大きくなる可能性があります。サンプルについては[こちら](https://github.com/ClickHouse/clickpy)を参照してください。 ::: - #### さらに複雑な例 {#a-more-complex-example} 上記の例では、[SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree) を使って、1時間ごとの単純なカウントを集計しています。単純な合計を超える統計を計算するには、別のターゲットテーブルエンジン、すなわち [AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree) が必要です。 @@ -1244,7 +1226,6 @@ ORDER BY Hour DESC ここでは `FINAL` ではなく `GROUP BY` を使用していることに注意してください。 - ### 高速なルックアップのためのマテリアライズドビュー(インクリメンタル)の活用 {#using-materialized-views-incremental--for-fast-lookups} ユーザーは、`WHERE` 句や集約句で頻繁に使用されるカラムを含むように ClickHouse の並び替えキーを選択する際、自身のアクセスパターンを考慮する必要があります。Observability のユースケースでは、単一のカラム集合には収まらない多様なアクセスパターンが存在するため、これは制約になり得ます。この点は、デフォルトの OTel スキーマに組み込まれている例で示すのが最も分かりやすいでしょう。トレース用のデフォルトスキーマを考えてみましょう。 @@ -1316,7 +1297,6 @@ WHERE TraceId != '' GROUP BY TraceId ``` - このビューにより、テーブル `otel_traces_trace_id_ts` には各トレースの最小および最大タイムスタンプが必ず保持されるようになります。このテーブルは `TraceId` で並べ替えられているため、これらのタイムスタンプを効率的に取得できます。これらのタイムスタンプの範囲は、メインの `otel_traces` テーブルをクエリする際に利用できます。より具体的には、トレースをその ID で取得する際、Grafana は次のクエリを使用します。 ```sql @@ -1350,7 +1330,6 @@ LIMIT 1000 同様の手法は、同種のアクセスパターンにも適用できます。類似の例を Data Modeling の[こちら](/materialized-view/incremental-materialized-view#lookup-table)で説明しています。 - ### プロジェクションの使用 {#using-projections} ClickHouseプロジェクションを使用すると、1つのテーブルに対して複数の`ORDER BY`句を指定できます。 @@ -1460,7 +1439,6 @@ Peak memory usage: 27.85 MiB. 上記の例では、先ほどのクエリで使用した列をプロジェクションで指定しています。これにより、指定したこれらの列のみがプロジェクションの一部としてディスク上に保存され、`Status` で並べ替えられます。代わりにここで `SELECT *` を使用した場合は、すべての列が保存されます。これは、任意の列の組み合わせを用いる、より多くのクエリがプロジェクションの恩恵を受けられる一方で、追加のストレージ使用量が発生することを意味します。ディスク容量と圧縮率の測定方法については、「[テーブルサイズと圧縮の測定](#measuring-table-size--compression)」を参照してください。 - ### Secondary/data skipping indices {#secondarydata-skipping-indices} ClickHouse でどれだけプライマリキーを適切にチューニングしても、一部のクエリではテーブル全体のスキャンがどうしても必要になる場合があります。これはマテリアライズドビュー(および一部のクエリに対するプロジェクション)を用いることで軽減できますが、これらには追加のメンテナンスが必要であり、ユーザーがその存在を把握したうえで積極的に利用しなければなりません。従来のリレーショナルデータベースではセカンダリインデックスでこれを解決しますが、ClickHouse のようなカラム指向データベースでは非効率です。その代わり、ClickHouse では「スキップインデックス」を使用し、一致する値が存在しない大きなデータチャンクをデータベースがスキップできるようにすることで、クエリ性能を大幅に向上させます。 @@ -1616,7 +1594,6 @@ WHERE Referer LIKE '%ultra%' ブルームフィルターは一般的に、フィルターのサイズが対象カラム自体より小さい場合にのみ高速になります。フィルターのほうが大きい場合は、パフォーマンス向上はほとんど見込めません。次のクエリを使用して、フィルターのサイズとカラムのサイズを比較してください。 - ```sql SELECT name, @@ -1654,7 +1631,6 @@ Bloom フィルターには、かなりのチューニングが必要になる セカンダリスキップインデックスの詳細については、[こちら](/optimize/skipping-indexes#skip-index-functions) を参照してください。 - ### マップからの抽出 {#extracting-from-maps} `Map` 型は OTel のスキーマで広く使われています。この型では値とキーが同じ型である必要があり、Kubernetes のラベルなどのメタデータに適しています。`Map` 型のサブキーをクエリするときは、親カラム全体が読み込まれることに注意してください。マップに多くのキーが含まれている場合、キーが個別のカラムとして存在している場合と比べてディスクから読み込むデータ量が増えるため、クエリに大きなオーバーヘッドが発生する可能性があります。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md index a9f97756be2..48020539bff 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md @@ -14,15 +14,14 @@ import hyperdx_26 from '@site/static/images/use-cases/observability/hyperdx-26.p ClickStack の各コンポーネントには、以下の設定オプションがあります。 - ## 設定の変更 {#modifying-settings} ### Docker {#docker} -[All in One](/use-cases/observability/clickstack/deployment/all-in-one)、[HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only)、または [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only) を使用している場合は、希望する設定値を環境変数として渡すだけです。例: +[All in One](/use-cases/observability/clickstack/deployment/all-in-one)、[HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only)、または [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only) を使用している場合は、環境変数で必要な設定値を指定するだけで構いません。例: ```shell -docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` @@ -43,7 +42,6 @@ services: # ... その他の設定 ``` - ### Helm {#helm} #### 値のカスタマイズ(任意) {#customizing-values} @@ -97,7 +95,6 @@ ingress: value: abc ``` - ## HyperDX {#hyperdx} ### データソース設定 {#datasource-settings} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md index 815dfb6c2ab..fb79c11dce2 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md @@ -31,7 +31,6 @@ ClickStack はイベントの可視化をサポートしており、HyperDX に 可視化は、トレース、メトリクス、ログ、または任意のユーザー定義のワイドイベントスキーマに基づいて作成できます。 - ## 可視化の作成 {#creating-visualizations} HyperDX の **Chart Explorer** インターフェイスを使用すると、メトリクス、トレース、ログを時間経過とともに可視化でき、データ分析用の簡易な可視化をすばやく作成できます。このインターフェイスは、ダッシュボード作成時にも再利用されます。以下のセクションでは、Chart Explorer を使用して可視化を作成する手順を説明します。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md index b750be7c2a3..8bbfcd2af34 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md @@ -23,7 +23,6 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo このオプションには認証が含まれており、ダッシュボード、アラート、保存済み検索をセッションやユーザーをまたいで保持できます。 - ### 適した用途 {#suitable-for} * デモ @@ -35,21 +34,25 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo -### Docker でデプロイする {#deploy-with-docker} +### Docker を使ってデプロイする {#deploy-with-docker} -次のコマンドで OpenTelemetry コレクター(ポート 4317 および 4318)と HyperDX UI(ポート 8080)を起動します。 +次のコマンドで、OpenTelemetry コレクター(ポート 4317 および 4318)と HyperDX UI(ポート 8080)を起動します。 ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` +:::note Image Name Update +ClickStack のコンテナイメージは現在 `clickhouse/clickstack-*`(以前は `docker.hyperdx.io/hyperdx/*`)として公開されています。 +::: + ### HyperDX UI にアクセスする {#navigate-to-hyperdx-ui} [http://localhost:8080](http://localhost:8080) にアクセスして HyperDX UI を開きます。 要件を満たすユーザー名とパスワードを指定して、ユーザーを作成します。 -`Create` をクリックすると、組み込みの ClickHouse インスタンス用のデータソースが作成されます。 +`Create` をクリックすると、統合済みの ClickHouse インスタンス向けのデータソースが作成されます。 @@ -57,13 +60,13 @@ docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hype ### データを取り込む {#ingest-data} -データを取り込む方法については、「[Ingesting data](/use-cases/observability/clickstack/ingesting-data)」を参照してください。 +データの取り込みについては、「[Ingesting data](/use-cases/observability/clickstack/ingesting-data)」を参照してください。 ## データと設定の永続化 {#persisting-data-and-settings} -コンテナの再起動後もデータと設定を保持するには、前述の docker コマンドを変更し、パス `/data/db`、`/var/lib/clickhouse`、`/var/log/clickhouse-server` をマウントするようにします。例えば、次のようになります。 +コンテナ再起動後もデータと設定を保持するには、上記の docker コマンドを変更して `/data/db`、`/var/lib/clickhouse`、`/var/log/clickhouse-server` のパスをマウントするようにします。例えば次のようにします: ```shell # ディレクトリの存在を確認 {#ensure-directories-exist} @@ -76,7 +79,7 @@ docker run \ -v "$(pwd)/.volumes/db:/data/db" \ -v "$(pwd)/.volumes/ch_data:/var/lib/clickhouse" \ -v "$(pwd)/.volumes/ch_logs:/var/log/clickhouse-server" \ - docker.hyperdx.io/hyperdx/hyperdx-all-in-one + clickhouse/clickstack-all-in-one:latest ``` @@ -89,18 +92,18 @@ docker run \ ## ポートのカスタマイズ {#customizing-ports-deploy} -HyperDX Local が使用するアプリケーション (8080) や API (8000) のポートをカスタマイズする必要がある場合は、適切なポートをポートフォワーディングし、いくつかの環境変数を設定するように `docker run` コマンドを変更する必要があります。 +HyperDX Local が使用するアプリケーション用 (8080) または API 用 (8000) のポートをカスタマイズする必要がある場合は、適切なポートを転送し、いくつかの環境変数を設定するように `docker run` コマンドを変更する必要があります。 -OpenTelemetry のポートは、ポートフォワーディングのフラグを変更するだけでカスタマイズできます。たとえば、OpenTelemetry の HTTP ポートを 4999 に変更するには、`-p 4318:4318` を `-p 4999:4318` に置き換えます。 +OpenTelemetry のポートは、ポート転送フラグを変更するだけでカスタマイズできます。たとえば、OpenTelemetry の HTTP ポートを 4999 に変更するには、`-p 4318:4318` を `-p 4999:4318` に置き換えます。 ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 clickhouse/clickstack-all-in-one:latest ``` -## ClickHouse Cloud の使用 {#using-clickhouse-cloud} +## ClickHouse Cloud を使用する {#using-clickhouse-cloud} -このディストリビューションは ClickHouse Cloud と組み合わせて使用できます。ローカルの ClickHouse インスタンスも引き続きデプロイされますが(使用はされません)、環境変数 `CLICKHOUSE_ENDPOINT`、`CLICKHOUSE_USER`、`CLICKHOUSE_PASSWORD` を設定することで、OTel collector が ClickHouse Cloud インスタンスを使用するように構成できます。 +このディストリビューションは ClickHouse Cloud で使用できます。ローカルの ClickHouse インスタンスは引き続きデプロイされますが(使用されません)、環境変数 `CLICKHOUSE_ENDPOINT`、`CLICKHOUSE_USER`、`CLICKHOUSE_PASSWORD` を設定することで、OTel collector が ClickHouse Cloud インスタンスを使用するように構成できます。 例: @@ -109,22 +112,22 @@ export CLICKHOUSE_ENDPOINT= export CLICKHOUSE_USER= export CLICKHOUSE_PASSWORD= -docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` -`CLICKHOUSE_ENDPOINT` には、ポート `8443` を含む ClickHouse Cloud の HTTPS エンドポイントを指定します。例えば `https://mxl4k3ul6a.us-east-2.aws.clickhouse.com:8443` のようになります。 +`CLICKHOUSE_ENDPOINT` には、ポート `8443` を含む ClickHouse Cloud の HTTPS エンドポイントを指定します。例としては `https://mxl4k3ul6a.us-east-2.aws.clickhouse.com:8443` のようになります。 -HyperDX UI に接続したら、[`Team Settings`](http://localhost:8080/team) に移動し、ClickHouse Cloud サービスへの接続を作成し、その後で必要なソースの設定を行います。フローの一例については[こちら](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)を参照してください。 +HyperDX UI に接続したら、[`Team Settings`](http://localhost:8080/team) に移動し、ClickHouse Cloud サービスへの接続を作成してから、必要なソースを追加します。手順の一例については[こちら](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)を参照してください。 -## OpenTelemetry collector の設定 {#configuring-collector} +## OpenTelemetry Collector の設定 {#configuring-collector} -必要に応じて OTel collector の設定を変更できます。詳細は ["設定の変更"](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration) を参照してください。 +必要に応じて OTel collector の設定を変更できます。詳細は「[設定の変更](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration)」を参照してください。 -例えば、次のように設定します。 +例: ```shell -docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md index 72736b74e34..93ea60836db 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md @@ -33,7 +33,6 @@ Docker Compose は、デフォルトの `otel-collector` セットアップに これらのポートにより、多様なテレメトリソースとの連携が可能になり、OpenTelemetry collector はさまざまなインジェスト要件に対応できる本番運用向けの構成になります。 - ### 適しているケース {#suitable-for} * ローカルでのテスト @@ -117,7 +116,6 @@ HYPERDX_OPAMP_PORT=4320 HYPERDX_OTEL_EXPORTER_CLICKHOUSE_DATABASE=default ``` - ### OpenTelemetry collector の設定 {#configuring-collector} 必要に応じて OTel collector の設定を変更できます。設定の変更方法の詳細は、["Modifying configuration"](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration) を参照してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md index 3609f6e7548..2544ea79ffe 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md @@ -34,7 +34,6 @@ helm install my-clickstack clickstack/clickstack \ --set otel.opampServerUrl="http://my-clickstack-clickstack-app.default.svc.cluster.local:4320" ``` - ### GKE に関するその他の考慮事項 {#other-gke-considerations} ```yaml @@ -53,7 +52,6 @@ clickhouse: - "10.0.0.0/8" # その他の設定用のフォールバック ``` - ## Amazon EKS {#amazon-eks} EKS にデプロイする場合は、次の一般的な構成を検討してください。 @@ -79,7 +77,6 @@ hyperdx: enabled: true ``` - ## Azure AKS {#azure-aks} AKS にデプロイする場合: @@ -97,7 +94,6 @@ clickhouse: - "10.0.0.0/8" ``` - ## 本番環境向けクラウド デプロイメント チェックリスト {#production-cloud-deployment-checklist} 任意のクラウドプロバイダー上の本番環境に ClickStack をデプロイする前に、次を実施してください。 @@ -127,7 +123,6 @@ hyperdx: memory: 4Gi ``` - ### 高可用性 {#high-availability} ```yaml @@ -148,7 +143,6 @@ hyperdx: topologyKey: kubernetes.io/hostname ``` - ### 永続ストレージ {#persistent-storage} データを保持できるよう、PersistentVolume(永続ボリューム)が適切に構成されていることを確認します。 @@ -167,7 +161,6 @@ clickhouse: * **EKS**: `gp3` または `io2` * **AKS**: `managed-premium` または `managed-csi` - ### ブラウザ互換性に関する注意事項 {#browser-compatibility-notes} HTTP のみでデプロイしている場合(開発/テスト用途)、一部のブラウザでは「セキュアコンテキスト」の要件により crypto API のエラーが表示されることがあります。本番環境向けのデプロイメントでは、必ずイングレス構成を利用し、適切な TLS 証明書付きの HTTPS を使用してください。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md index 7cf33a41328..9886fd82d20 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md @@ -34,14 +34,12 @@ hyperdx: helm upgrade my-clickstack clickstack/clickstack -f values.yaml ``` - ### 方法 2:`--set` フラグを指定した Helm upgrade による更新 {#api-key-set-flag} ```shell helm upgrade my-clickstack clickstack/clickstack --set hyperdx.apiKey="your-api-key-here" ``` - ### 変更を反映するためにポッドを再起動する {#restart-pods} API キーを更新したら、新しい設定を反映するためにポッドを再起動します。 @@ -54,7 +52,6 @@ kubectl rollout restart deployment my-clickstack-clickstack-app my-clickstack-cl このチャートは、API キーを含む Kubernetes Secret(`-app-secrets`)を自動的に作成します。外部 Secret を使用する場合を除き、追加の Secret 設定は不要です。 ::: - ## シークレット管理 {#secret-management} API キーやデータベース認証情報などの機密データを扱う場合は、Kubernetes の Secret リソースを使用してください。 @@ -83,7 +80,6 @@ data: kubectl apply -f secrets.yaml ``` - ### カスタムシークレットの作成 {#creating-a-custom-secret} Kubernetes のカスタムシークレットを手動で作成します。 @@ -93,7 +89,6 @@ kubectl create secret generic hyperdx-secret \ --from-literal=API_KEY=my-secret-api-key ``` - ### values.yaml で Secret を参照する {#referencing-a-secret} ```yaml @@ -105,7 +100,6 @@ hyperdx: key: API_KEY ``` - ## イングレスのセットアップ {#ingress-setup} ドメイン名経由で HyperDX の UI と API を公開するには、`values.yaml` でイングレスを有効にします。 @@ -124,7 +118,6 @@ hyperdx: `hyperdx.frontendUrl` はイングレスのホスト名と一致させ、プロトコルを含めて設定してください(例: `https://hyperdx.yourdomain.com`)。これにより、生成されるすべてのリンク、クッキー、およびリダイレクトが正しく動作します。 ::: - ### TLS (HTTPS) の有効化 {#enabling-tls} デプロイメントを HTTPS で保護するには、次の手順を実行します。 @@ -149,7 +142,6 @@ hyperdx: tlsSecretName: "hyperdx-tls" ``` - ### イングレス設定の例 {#example-ingress-configuration} 参考として、生成されるイングレスリソースは次のようになります。 @@ -181,7 +173,6 @@ spec: secretName: hyperdx-tls ``` - ### よくあるイングレスの落とし穴 {#common-ingress-pitfalls} **パスとリライトの設定:** @@ -207,7 +198,6 @@ spec: kubectl -n ingress-nginx get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath="{.items[0].spec.containers[0].image}" ``` - ## OTel collector のイングレス {#otel-collector-ingress} OTel collector のエンドポイント(traces、metrics、logs)をイングレス経由で公開する必要がある場合は、`additionalIngresses` 設定を使用します。これは、クラスター外からテレメトリデータを送信する場合や、OTel collector 用にカスタムドメインを使用する場合に便利です。 @@ -244,7 +234,6 @@ hyperdx: OTEL collector を外部公開する必要がない場合は、この設定を省略できます。ほとんどのユーザーにとっては、通常のイングレス設定だけで十分です。 ::: - ## イングレスのトラブルシューティング {#troubleshooting-ingress} **イングレスリソースを確認する:** @@ -282,7 +271,6 @@ curl -I https://hyperdx.yourdomain.com/_next/static/chunks/main-xxxx.js * 設定変更後は、ブラウザキャッシュと CDN/プロキシキャッシュをクリアして、古いアセットが配信されるのを避ける - ## 値のカスタマイズ {#customizing-values} `--set` フラグを使用して設定値をカスタマイズできます。 @@ -322,7 +310,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values.yaml ``` - ## 次のステップ {#next-steps} - [デプロイメントオプション](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options) - 外部システムおよび最小構成でのデプロイメント diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md index 79b4343b613..7b3b3b79163 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md @@ -56,7 +56,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-external-clickhouse.yaml ``` - ### オプション 2: 外部シークレット(本番環境で推奨) {#external-clickhouse-secret} 認証情報を Helm の設定から分離しておきたい本番環境でのデプロイでは、次のようにします。 @@ -174,7 +173,6 @@ hyperdx: ClickHouse Cloud への接続手順の全体像については、[「ClickHouse Cloud 接続の作成」](/docs/use-cases/observability/clickstack/getting-started#create-a-cloud-connection) を参照してください。 - ## 外部 OTel collector {#external-otel-collector} 既存の OTel collector インフラストラクチャがある場合は: @@ -194,7 +192,6 @@ helm install my-clickstack clickstack/clickstack -f values-external-otel.yaml イングレスを介して OTel collector のエンドポイントを公開する手順については、[Ingress 設定](/docs/use-cases/observability/clickstack/deployment/helm-configuration#otel-collector-ingress)を参照してください。 - ## 最小限のデプロイメント {#minimal-deployment} 既存のインフラストラクチャがある組織の場合は、HyperDX のみをデプロイします。 @@ -233,7 +230,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-minimal.yaml ``` - ## 次のステップ {#next-steps} - [Configuration Guide](/docs/use-cases/observability/clickstack/deployment/helm-configuration) - API キー、シークレット、イングレスのセットアップ diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md index 1260018cfae..99f6f6fb8e5 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md @@ -36,7 +36,6 @@ HyperDX 用の Helm チャートは [こちら](https://github.com/hyperdxio/hel * TLS およびイングレスの設定 * シークレット管理および認証設定 - ### 適した用途 {#suitable-for} * 検証・PoC @@ -262,7 +261,6 @@ helm install my-clickstack clickstack/clickstack -f values.yaml シークレットベースの設定、外部 OTel collector、または最小構成で本番環境にデプロイする場合は、[Deployment Options](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options) ガイドを参照してください。 ::: - ## 本番環境向けの注意事項 デフォルトでは、このチャートは ClickHouse と OTel collector もインストールするようになっています。ただし、本番環境では ClickHouse と OTel collector は別々に管理することが推奨されます。 @@ -283,7 +281,6 @@ helm install my-clickstack clickstack/clickstack \ * [Cloud Deployments](/docs/use-cases/observability/clickstack/deployment/helm-cloud) - クラウド固有の設定と本番環境チェックリスト ::: - ## タスク設定 {#task-configuration} デフォルトでは、アラートを発火させる必要があるかどうかをチェックする 1 つのタスクが、CronJob としてチャート内に設定されています。以下はその設定オプションです。 @@ -308,7 +305,6 @@ helm upgrade my-clickstack clickstack/clickstack -f values.yaml helm search repo clickstack ``` - ## ClickStack のアンインストール デプロイメントを削除するには: @@ -319,23 +315,20 @@ helm uninstall my-clickstack これにより、そのリリースに関連するすべてのリソースは削除されますが、永続データ(存在する場合)は残る可能性があります。 - ## トラブルシューティング {#troubleshooting} -### ログの確認 +### ログの確認 {#customizing-values} ```shell kubectl logs -l app.kubernetes.io/name=clickstack ``` - -### インストール失敗時のデバッグ +### インストール失敗時のデバッグ {#using-secrets} ```shell helm install my-clickstack clickstack/clickstack --debug --dry-run ``` - ### デプロイメントの検証 ```shell @@ -379,7 +372,6 @@ helm install my-clickstack clickstack/clickstack \ --set "otel.env[0].value=--feature-gates=clickhouse.json" ``` - ## 関連ドキュメント {#related-documentation} ### デプロイメントガイド {#deployment-guides} diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md index 5394ec9638e..997de410287 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md @@ -38,7 +38,6 @@ ClickHouse Cloud を初めて利用する場合は、 このモードでは、データのインジェストは完全にユーザーに委ねられます。独自にホストした OpenTelemetry コレクター、クライアントライブラリからの直接インジェスト、Kafka や S3 などの ClickHouse ネイティブなテーブルエンジン、ETL パイプライン、あるいは ClickHouse Cloud のマネージドインジェストサービスである ClickPipes を使用して、ClickHouse Cloud にデータを取り込むことができます。このアプローチは、ClickStack を運用するうえで最もシンプルかつ高パフォーマンスな方法を提供します。 - ### 適したユースケース {#suitable-for} このデプロイメントパターンは、次のようなシナリオに最適です。 diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md index 84d917c777d..a566bd1f063 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md @@ -23,7 +23,6 @@ HyperDX はスタックの他のコンポーネントとは独立して使用で このモードでは、データのインジェストは完全にユーザー側の管理となります。独自にホストした OpenTelemetry collector、クライアントライブラリからの直接インジェスト、ClickHouse ネイティブのテーブルエンジン(Kafka や S3 など)、ETL パイプライン、あるいは ClickPipes のようなマネージドインジェストサービスを使用して、データを ClickHouse に取り込むことができます。このアプローチは最大限の柔軟性を提供し、すでに ClickHouse を運用していて、その上に HyperDX をレイヤーとして重ねて可視化、検索、アラートを実現したいチームに適しています。 - ### 適用対象 {#suitable-for} - 既存の ClickHouse ユーザー diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md index f9c8e030689..3a07833132e 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md @@ -23,7 +23,6 @@ import JSONSupport from '@site/i18n/jp/docusaurus-plugin-content-docs/current/us **ただし、この HyperDX のディストリビューションではユーザー認証は無効になっています** - ### 適した用途 {#suitable-for} * デモ @@ -35,33 +34,33 @@ import JSONSupport from '@site/i18n/jp/docusaurus-plugin-content-docs/current/us
- ### Docker を使用してデプロイする + ### Docker でデプロイする - ローカルモードでは、HyperDX UI がポート 8080 で動作します。 + ローカルモードでは、HyperDX UI がポート 8080 で起動します。 ```shell - docker run -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local + docker run -p 8080:8080 clickhouse/clickstack-local:latest ``` ### HyperDX UI にアクセスする - [http://localhost:8080](http://localhost:8080) にアクセスして HyperDX UI を開きます。 + HyperDX UI にアクセスするには、[http://localhost:8080](http://localhost:8080) を開きます。 - **このデプロイモードでは認証が有効になっていないため、ユーザーアカウントの作成を求められることはありません。** + **このデプロイモードでは認証が有効になっていないため、ユーザー作成画面は表示されません。** - ClickHouse Cloud など、ご自身の外部 ClickHouse クラスターに接続します。 + 外部の ClickHouse クラスター(例: ClickHouse Cloud)に接続します。 - + - ソースを作成し、デフォルト値はすべてそのまま保持したうえで、`Table` フィールドに `otel_logs` を設定します。その他の設定は自動検出されるため、`Save New Source` をクリックできます。 + ソースを作成し、すべてのデフォルト値はそのままにして、`Table` フィールドに `otel_logs` を入力します。その他の設定は自動検出されるはずなので、`Save New Source` をクリックします。 - + -ローカルモード専用イメージを使用する場合、ユーザーは `BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true` パラメーターだけを設定すればよく、例えば次のように指定します。 +ローカルモード専用イメージの場合は、`BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true` パラメーターを設定するだけで十分です(例: 環境変数として設定)。 ```shell -docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local +docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 clickhouse/clickstack-local:latest ``` diff --git a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md index d98b03e38ae..10b9a4c3843 100644 --- a/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md +++ b/i18n/jp/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md @@ -23,7 +23,6 @@ import dashboard_kubernetes from '@site/static/images/use-cases/observability/hy - - ## Бенчмарки производительности {#performance-benchmarks} chDB демонстрирует исключительную производительность в самых разных сценариях: - - * **[ClickBench встраиваемых движков](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQXRoZW5hIChwYXJ0aXRpb25lZCkiOnRydWUsIkF0aGVuYSAoc2luZ2xlKSI6dHJ1ZSwiQXVyb3JhIGZvciBNeVNRTCI6dHJ1ZSwiQXVyb3JhIGZvciBQb3N0Z3JlU1FMIjp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIiOnRydWUsIkNpdHVzIjp0cnVlLCJjbGlja2hvdXNlLWxvY2FsIChwYXJ0aXRpb25lZCkiOnRydWUsImNsaWNraG91c2UtbG9jYWwgKHNpbmdsZSkiOnRydWUsIkNsaWNrSG91c2UiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoenN0ZCkiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQiOnRydWUsIkNsaWNrSG91c2UgKHdlYikiOnRydWUsIkNyYXRlREIiOnRydWUsIkRhdGFiZW5kIjp0cnVlLCJEYXRhRnVzaW9uIChzaW5nbGUpIjp0cnVlLCJBcGFjaGUgRG9yaXMiOnRydWUsIkRydWlkIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQpIjp0cnVlLCJEdWNrREIiOnRydWUsIkVsYXN0aWNzZWFyY2giOnRydWUsIkVsYXN0aWNzZWFyY2ggKHR1bmVkKSI6ZmFsc2UsIkdyZWVucGx1bSI6dHJ1ZSwiSGVhdnlBSSI6dHJ1ZSwiSHlkcmEiOnRydWUsIkluZm9icmlnaHQiOnRydWUsIktpbmV0aWNhIjp0cnVlLCJNYXJpYURCIENvbHVtblN0b3JlIjp0cnVlLCJNYXJpYURCIjpmYWxzZSwiTW9uZXREQiI6dHJ1ZSwiTW9uZ29EQiI6dHJ1ZSwiTXlTUUwgKE15SVNBTSkiOnRydWUsIk15U1FMIjp0cnVlLCJQaW5vdCI6dHJ1ZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2VsZWN0REIiOnRydWUsIlNpbmdsZVN0b3JlIjp0cnVlLCJTbm93Zmxha2UiOnRydWUsIlNRTGl0ZSI6dHJ1ZSwiU3RhclJvY2tzIjp0cnVlLCJUaW1lc2NhbGVEQiAoY29tcHJlc3Npb24pIjp0cnVlLCJUaW1lc2NhbGVEQiI6dHJ1ZX0sInR5cGUiOnsic3RhdGVsZXNzIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsIkphdmEiOmZhbHNlLCJjb2x1bW4tb3JpZW50ZWQiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQyI6ZmFsc2UsIlBvc3RncmVTUUwgY29tcGF0aWJsZSI6ZmFsc2UsIkNsaWNrSG91c2UgZGVyaXZhdGl2ZSI6ZmFsc2UsImVtYmVkZGVkIjp0cnVlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiUnVzdCI6ZmFsc2UsInNlYXJjaCI6ZmFsc2UsImRvY3VtZW50IjpmYWxzZSwidGltZS1zZXJpZXMiOmZhbHNlfSwibWFjaGluZSI6eyJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNS40eGxhcmdlLCA1MDBnYiBncDIiOnRydWUsIjE2IHRocmVhZHMiOnRydWUsIjIwIHRocmVhZHMiOnRydWUsIjI0IHRocmVhZHMiOnRydWUsIjI4IHRocmVhZHMiOnRydWUsIjMwIHRocmVhZHMiOnRydWUsIjQ4IHRocmVhZHMiOnRydWUsIjYwIHRocmVhZHMiOnRydWUsIm01ZC4yNHhsYXJnZSI6dHJ1ZSwiYzVuLjR4bGFyZ2UsIDIwMGdiIGdwMiI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDE1MDBnYiBncDIiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMjQiOnRydWUsIlMyIjp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZX0sImNsdXN0ZXJfc2l6ZSI6eyIxIjp0cnVlLCIyIjp0cnVlLCI0Ijp0cnVlLCI4Ijp0cnVlLCIxNiI6dHJ1ZSwiMzIiOnRydWUsIjY0Ijp0cnVlLCIxMjgiOnRydWUsInNlcnZlcmxlc3MiOnRydWUsInVuZGVmaW5lZCI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** — комплексное сравнение производительности * **[Производительность обработки DataFrame](https://colab.research.google.com/drive/1FogLujJ_-ds7RGurDrUnK-U0IW8a8Qd0)** - сравнительный анализ с другими библиотеками DataFrame * **[DataFrame Benchmark](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQWxsb3lEQiI6dHJ1ZSwiQWxsb3lEQiAodHVuZWQpIjp0cnVlLCJBdGhlbmEgKHBhcnRpdGlvbmVkKSI6dHJ1ZSwiQXRoZW5hIChzaW5nbGUpIjp0cnVlLCJBdXJvcmEgZm9yIE15U1FMIjp0cnVlLCJBdXJvcmEgZm9yIFBvc3RncmVTUUwiOnRydWUsIkJ5Q29uaXR5Ijp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIgKERhdGFGcmFtZSkiOnRydWUsImNoREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiY2hEQiI6dHJ1ZSwiQ2l0dXMiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF3cykiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF6dXJlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSBDbG91ZCAoZ2NwKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoZGF0YSBsYWtlLCBwYXJ0aXRpb25lZCkiOnRydWUsIkNsaWNrSG91c2UgKGRhdGEgbGFrZSwgc2luZ2xlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJDbGlja0hvdXNlIChQYXJxdWV0LCBzaW5nbGUpIjp0cnVlLCJDbGlja0hvdXNlICh3ZWIpIjp0cnVlLCJDbGlja0hvdXNlIjp0cnVlLCJDbGlja0hvdXNlICh0dW5lZCkiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkLCBtZW1vcnkpIjp0cnVlLCJDbG91ZGJlcnJ5Ijp0cnVlLCJDcmF0ZURCIjp0cnVlLCJDcnVuY2h5IEJyaWRnZSBmb3IgQW5hbHl0aWNzIChQYXJxdWV0KSI6dHJ1ZSwiRGF0YWJlbmQiOnRydWUsIkRhdGFGdXNpb24gKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRGF0YUZ1c2lvbiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiQXBhY2hlIERvcmlzIjp0cnVlLCJEcnVpZCI6dHJ1ZSwiRHVja0RCIChEYXRhRnJhbWUpIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRHVja0RCIjp0cnVlLCJFbGFzdGljc2VhcmNoIjp0cnVlLCJFbGFzdGljc2VhcmNoICh0dW5lZCkiOmZhbHNlLCJHbGFyZURCIjp0cnVlLCJHcmVlbnBsdW0iOnRydWUsIkhlYXZ5QUkiOnRydWUsIkh5ZHJhIjp0cnVlLCJJbmZvYnJpZ2h0Ijp0cnVlLCJLaW5ldGljYSI6dHJ1ZSwiTWFyaWFEQiBDb2x1bW5TdG9yZSI6dHJ1ZSwiTWFyaWFEQiI6ZmFsc2UsIk1vbmV0REIiOnRydWUsIk1vbmdvREIiOnRydWUsIk1vdGhlcmR1Y2siOnRydWUsIk15U1FMIChNeUlTQU0pIjp0cnVlLCJNeVNRTCI6dHJ1ZSwiT3hsYSI6dHJ1ZSwiUGFuZGFzIChEYXRhRnJhbWUpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiUGlub3QiOnRydWUsIlBvbGFycyAoRGF0YUZyYW1lKSI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2luZ2xlU3RvcmUiOnRydWUsIlNub3dmbGFrZSI6dHJ1ZSwiU1FMaXRlIjp0cnVlLCJTdGFyUm9ja3MiOnRydWUsIlRhYmxlc3BhY2UiOnRydWUsIlRlbWJvIE9MQVAgKGNvbHVtbmFyKSI6dHJ1ZSwiVGltZXNjYWxlREIgKGNvbXByZXNzaW9uKSI6dHJ1ZSwiVGltZXNjYWxlREIiOnRydWUsIlVtYnJhIjp0cnVlfSwidHlwZSI6eyJDIjpmYWxzZSwiY29sdW1uLW9yaWVudGVkIjpmYWxzZSwiUG9zdGdyZVNRTCBjb21wYXRpYmxlIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsImdjcCI6ZmFsc2UsInN0YXRlbGVzcyI6ZmFsc2UsIkphdmEiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQ2xpY2tIb3VzZSBkZXJpdmF0aXZlIjpmYWxzZSwiZW1iZWRkZWQiOmZhbHNlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiZGF0YWZyYW1lIjp0cnVlLCJhd3MiOmZhbHNlLCJhenVyZSI6ZmFsc2UsImFuYWx5dGljYWwiOmZhbHNlLCJSdXN0IjpmYWxzZSwic2VhcmNoIjpmYWxzZSwiZG9jdW1lbnQiOmZhbHNlLCJzb21ld2hhdCBQb3N0Z3JlU1FMIGNvbXBhdGlibGUiOmZhbHNlLCJ0aW1lLXNlcmllcyI6ZmFsc2V9LCJtYWNoaW5lIjp7IjE2IHZDUFUgMTI4R0IiOnRydWUsIjggdkNQVSA2NEdCIjp0cnVlLCJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDUwMGdiIGdwMiI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCIxOTJHQiI6dHJ1ZSwiMjRHQiI6dHJ1ZSwiMzYwR0IiOnRydWUsIjQ4R0IiOnRydWUsIjcyMEdCIjp0cnVlLCI5NkdCIjp0cnVlLCJkZXYiOnRydWUsIjcwOEdCIjp0cnVlLCJjNW4uNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJBbmFseXRpY3MtMjU2R0IgKDY0IHZDb3JlcywgMjU2IEdCKSI6dHJ1ZSwiYzUuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgMTUwMGdiIGdwMiI6dHJ1ZSwiY2xvdWQiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMiI6dHJ1ZSwiUzI0Ijp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZSwiTDEgLSAxNkNQVSAzMkdCIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AzIjp0cnVlfSwiY2x1c3Rlcl9zaXplIjp7IjEiOnRydWUsIjIiOnRydWUsIjQiOnRydWUsIjgiOnRydWUsIjE2Ijp0cnVlLCIzMiI6dHJ1ZSwiNjQiOnRydWUsIjEyOCI6dHJ1ZSwic2VydmVybGVzcyI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** - - - - ## О chDB {#about-chdb} - Прочитайте полную историю создания проекта chDB в [блоге](https://clickhouse.com/blog/chdb-embedded-clickhouse-rocket-engine-on-a-bicycle) @@ -89,8 +72,6 @@ chDB демонстрирует исключительную производи - Попробуйте chDB прямо в браузере, используя [примеры codapi](https://antonz.org/trying-chdb/) - Дополнительные примеры см. по адресу https://github.com/chdb-io/chdb/tree/main/examples - - ## Лицензия {#license} chDB распространяется по лицензии Apache версии 2.0. Дополнительную информацию см. в файле [LICENSE](https://github.com/chdb-io/chdb/blob/main/LICENSE.txt). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/bun.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/bun.md index 8d69a2e0f18..878a9ea9378 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/bun.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/bun.md @@ -48,7 +48,7 @@ bun run build chDB-bun поддерживает два режима выполнения запросов: эфемерные запросы для одноразовых операций и постоянные сеансы для сохранения состояния базы данных. -### Эфемерные запросы +### Эфемерные запросы {#persistent-sessions} Для простых одноразовых запросов, которые не требуют сохранения состояния: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/c.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/c.md index 5bb28c8be5e..d1f85d418b7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/c.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/c.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'c', 'cpp', 'embedded', 'clickhouse', 'sql', 'olap', 'api'] doc_type: 'guide' --- - - # chDB для C и C++ {#chdb-for-c-and-c} chDB предоставляет родной C/C++ API для встраивания функциональности ClickHouse непосредственно в ваши приложения. API поддерживает как простые запросы, так и расширенные возможности, такие как постоянные соединения и потоковая передача результатов запросов. - - ## Установка {#installation} ### Шаг 1: Установите libchdb {#install-libchdb} @@ -37,13 +33,11 @@ curl -sL https://lib.chdb.io | bash Скомпилируйте и скомпонуйте ваше приложение с chDB: - ```bash # Компиляция на C {#c-compilation} gcc -o myapp myapp.c -lchdb ``` - # Компиляция на C++ {#c-compilation} g++ -o myapp myapp.cpp -lchdb @@ -51,7 +45,6 @@ g++ -o myapp myapp.cpp -lchdb ``` ``` - ## Примеры на C {#c-examples} ### Базовое подключение и выполнение запросов {#basic-connection-queries} @@ -194,7 +187,6 @@ int main() { chdb_destroy_query_result(json_result); ``` - // Форматированный вывод chdb_result* pretty_result = chdb_query(*conn, query, "Pretty"); printf("Pretty Result:\n%.*s\n\n", @@ -209,7 +201,6 @@ return 0; ``` ``` - ## Пример на C++ {#cpp-example} ```cpp @@ -293,7 +284,6 @@ int main() { } ``` - ## Лучшие практики обработки ошибок {#error-handling} ```c @@ -341,7 +331,6 @@ cleanup: } ``` - ## Репозиторий GitHub {#github-repository} - **Основной репозиторий**: [chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/go.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/go.md index c680f270206..8b945b33eaf 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/go.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/go.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'go', 'golang', 'embedded', 'clickhouse', 'sql', 'olap'] doc_type: 'guide' --- - - # chDB для Go {#chdb-for-go} chDB-go предоставляет биндинги Go к chDB, позволяя выполнять запросы к ClickHouse напрямую в ваших Go‑приложениях без каких-либо внешних зависимостей. - - ## Установка {#installation} ### Шаг 1: Установите libchdb {#install-libchdb} @@ -39,26 +35,20 @@ go install github.com/chdb-io/chdb-go@latest go get github.com/chdb-io/chdb-go ``` - ## Использование {#usage} ### Интерфейс командной строки {#cli} chDB-go предоставляет утилиту командной строки для быстрых запросов: - - ```bash # Простой запрос {#simple-query} ./chdb-go "SELECT 123" ``` - # Интерактивный режим {#interactive-mode} ./chdb-go - - # Интерактивный режим с постоянным хранилищем данных {#interactive-mode-with-persistent-storage} ./chdb-go --path /tmp/chdb @@ -242,15 +232,12 @@ func main() { } ``` - **Преимущества потокового выполнения запросов:** - **Эффективное использование памяти** - Обрабатывайте большие наборы данных, не загружая их полностью в память - **Обработка в реальном времени** - Начинайте обработку данных, как только поступит первый фрагмент - **Поддержка отмены** - Можно отменять длительные запросы с помощью `Cancel()` - **Обработка ошибок** - Проверяйте наличие ошибок во время потоковой обработки с помощью `Error()` - - ## Документация по API {#api-documentation} chDB-go предоставляет как высокоуровневый, так и низкоуровневый API: @@ -258,8 +245,6 @@ chDB-go предоставляет как высокоуровневый, так - **[Документация по высокоуровневому API](https://github.com/chdb-io/chdb-go/blob/main/chdb.md)** — рекомендуется для большинства сценариев использования - **[Документация по низкоуровневому API](https://github.com/chdb-io/chdb-go/blob/main/lowApi.md)** — для более сложных сценариев, требующих детального управления - - ## Системные требования {#requirements} - Go 1.21 или новее diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md index 6f814a346c2..95aedbe5e44 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md @@ -7,21 +7,16 @@ keywords: ['chdb', 'nodejs', 'javascript', 'встраиваемая', 'clickhou doc_type: 'guide' --- - - # chDB для Node.js {#chdb-for-nodejs} chDB-node предоставляет биндинги chDB для Node.js, позволяя выполнять запросы к ClickHouse непосредственно в ваших Node.js-приложениях без каких-либо внешних зависимостей. - - ## Установка {#installation} ```bash npm install chdb ``` - ## Использование {#usage} chDB-node поддерживает два режима выполнения запросов: автономные запросы для простых операций и сеансовые запросы для сохранения состояния базы данных. @@ -146,7 +141,6 @@ try { } ``` - ## Обработка ошибок {#error-handling} Всегда правильно обрабатывайте ошибки при работе с chDB: @@ -192,7 +186,6 @@ function safeSessionQuery() { safeSessionQuery(); ``` - ## Репозиторий на GitHub {#github-repository} - **Репозиторий на GitHub**: [chdb-io/chdb-node](https://github.com/chdb-io/chdb-node) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/python.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/python.md index 6faa008f7dd..738de4e18d2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/python.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/python.md @@ -18,7 +18,6 @@ doc_type: 'guide' pip install chdb ``` - ## Использование {#usage} ### Интерфейс командной строки {#command-line-interface} @@ -33,7 +32,6 @@ python3 -m chdb "SELECT 1, 'abc'" Pretty python3 -m chdb "SELECT version()" JSON ``` - ### Основы работы с Python {#basic-python-usage} ```python @@ -49,7 +47,6 @@ print(f"Прочитано байт: {result.bytes_read()}") print(f"Время выполнения: {result.elapsed()} секунд") ``` - ### API на основе подключений (рекомендуется) {#connection-based-api} Для более эффективного управления ресурсами и повышения производительности: @@ -84,7 +81,6 @@ cur.close() conn.close() ``` - ## Методы ввода данных {#data-input} ### Файловые источники данных {#file-based-data-sources} @@ -118,7 +114,6 @@ result = chdb.query(""" """, 'Pretty') ``` - ### Примеры форматов вывода {#output-format-examples} ```python @@ -139,7 +134,6 @@ pretty_result = chdb.query('SELECT * FROM system.numbers LIMIT 3', 'Pretty') print(pretty_result) ``` - ### Операции с DataFrame {#dataframe-operations} #### Устаревший API DataFrame {#legacy-dataframe-api} @@ -164,7 +158,6 @@ summary = result_df.query('SELECT b, sum(a) FROM __table__ GROUP BY b') print(summary) ``` - #### Табличный движок Python (рекомендуется) {#python-table-engine-recommended} ```python @@ -212,7 +205,6 @@ chdb.query(""" """).show() ``` - ### Сеансы с сохранением состояния {#stateful-sessions} Сессии поддерживают состояние запросов между несколькими операциями, что позволяет реализовывать сложные рабочие процессы: @@ -267,7 +259,6 @@ print(result) sess.close() # Необязательно — автоматически закрывается при удалении объекта ``` - ### Расширенные возможности сессий {#advanced-session-features} ```python @@ -288,7 +279,6 @@ result = sess.query(""" См. также: [test_stateful.py](https://github.com/chdb-io/chdb/blob/main/tests/test_stateful.py). - ### Интерфейс Python DB-API 2.0 {#python-db-api-20} Стандартный интерфейс доступа к базе данных для совместимости с существующими приложениями на Python: @@ -337,7 +327,6 @@ cursor.executemany( ) ``` - ### Пользовательские функции (UDF) {#user-defined-functions} Расширяйте SQL с помощью пользовательских функций, написанных на Python: @@ -378,7 +367,6 @@ result = query(""" print(result) ``` - #### Продвинутые UDF с пользовательскими типами возвращаемых значений {#advanced-udf-custom-return-types} ```python @@ -413,7 +401,6 @@ result = query(""" print(result) ``` - #### Рекомендации по использованию UDF {#udf-best-practices} 1. **Stateless Functions**: функции UDF должны быть чистыми, без побочных эффектов @@ -449,7 +436,6 @@ query(""" """) ``` - ### Потоковая обработка запросов {#streaming-queries} Обрабатывайте большие наборы данных при фиксированном объёме потребляемой памяти: @@ -520,7 +506,6 @@ stream.close() sess.close() ``` - ### Табличный движок Python {#python-table-engine} #### Выполнение запросов к DataFrame в Pandas {#query-pandas-dataframes} @@ -578,7 +563,6 @@ window_result = chdb.query(""" print(window_result) ``` - #### Пользовательские источники данных с PyReader {#custom-data-sources-pyreader} Реализуйте пользовательские ридеры данных для специализированных источников данных: @@ -686,7 +670,6 @@ complex_json = chdb.query(""" print(complex_json) ```` - ## Производительность и оптимизация {#performance-optimization} ### Бенчмарки {#benchmarks} @@ -772,7 +755,6 @@ stream.close() sess.close() ``` - ## Репозиторий GitHub {#github-repository} - **Основной репозиторий**: [chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/rust.md b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/rust.md index d3a3b27fff4..11352a6b1ff 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/rust.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/chdb/install/rust.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'embedded', 'clickhouse-lite', 'rust', 'install', 'ffi', 'bin doc_type: 'guide' --- - - # chDB для Rust {#chdb-for-rust} chDB-rust предоставляет экспериментальные привязки FFI (Foreign Function Interface) для chDB, позволяющие выполнять запросы к ClickHouse непосредственно в ваших Rust-приложениях без каких-либо внешних зависимостей. - - ## Установка {#installation} ### Установка libchdb {#install-libchdb} @@ -25,7 +21,6 @@ chDB-rust предоставляет экспериментальные прив curl -sL https://lib.chdb.io | bash ``` - ## Использование {#usage} chDB для Rust предоставляет как статический, так и состояние-сохраняющий режимы выполнения запросов. @@ -114,7 +109,6 @@ fn main() -> Result<(), Box> { } ``` - ## Сборка и тестирование {#building-testing} ### Сборка проекта {#build-the-project} @@ -137,7 +131,6 @@ cargo test * `tempdir` (v0.3.7) - работа с временными каталогами в тестах * `thiserror` (v1) - утилиты для обработки ошибок - ## Обработка ошибок {#error-handling} chDB Rust предоставляет всестороннюю обработку ошибок с помощью перечисления `Error`: @@ -164,7 +157,6 @@ match execute("SELECT 1", None) { } ``` - ## Репозиторий на GitHub {#github-repository} Репозиторий проекта на GitHub доступен по адресу [chdb-io/chdb-rust](https://github.com/chdb-io/chdb-rust). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md index 3f793e62c20..31d9bbb5366 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md @@ -7,8 +7,6 @@ keywords: ['тарифные планы облака', 'тарифные пла doc_type: 'reference' --- - - # Уровни ClickHouse Cloud {#clickhouse-cloud-tiers} В ClickHouse Cloud доступно несколько уровней. @@ -17,8 +15,6 @@ doc_type: 'reference' **Краткое описание уровней в облаке:** - -
@@ -224,8 +220,6 @@ doc_type: 'reference' - - ## Basic {#basic} - Экономичный вариант, поддерживающий развертывания с одной репликой. @@ -236,8 +230,6 @@ doc_type: 'reference' Вы можете перейти на уровень Scale или Enterprise, чтобы масштабировать эти сервисы. ::: - - ## Scale {#scale} Предназначен для рабочих нагрузок, которым требуются повышенные SLA (развертывания с 2 и более репликами), масштабируемость и расширенная безопасность. @@ -248,8 +240,6 @@ doc_type: 'reference' - Опции [гибкого масштабирования](/manage/scaling) (масштабирование вверх/вниз, внутрь/наружу). - [Настраиваемые резервные копии](/cloud/manage/backups/configurable-backups) - - ## Enterprise {#enterprise} Предназначен для крупномасштабных, критически важных развертываний со строгими требованиями к безопасности и соответствию нормативам. @@ -268,8 +258,6 @@ doc_type: 'reference' Сервисы с одной репликой во всех трех тарифах предусмотрены фиксированного размера (`8 GiB`, `12 GiB`). ::: - - ## Переход на другой тарифный план {#upgrading-to-a-different-tier} Вы всегда можете перейти с тарифа Basic на Scale или с Scale на Enterprise. Понижение тарифного плана потребует отключения премиальных возможностей. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md index ff58ddf4543..e19a9788bf8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md @@ -14,29 +14,22 @@ import insights_recent from '@site/static/images/cloud/sqlconsole/insights_recen import insights_drilldown from '@site/static/images/cloud/sqlconsole/insights_drilldown.png'; import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_query_info.png'; - # Аналитика запросов {#query-insights} Возможность **Query Insights** упрощает работу со встроенным журналом запросов ClickHouse за счёт различных визуализаций и таблиц. Таблица `system.query_log` в ClickHouse является ключевым источником информации для оптимизации запросов, отладки и мониторинга общего состояния и производительности кластера. - - ## Обзор запросов {#query-overview} После выбора сервиса пункт навигации **Monitoring** в левой боковой панели разворачивается и отображает новый подпункт **Query insights**. Щелчок по этому пункту открывает новую страницу Query insights: - - ## Метрики верхнего уровня {#top-level-metrics} Статистические блоки в верхней части отображают базовые сводные метрики запросов за выбранный период времени. Ниже представлены три графика временных рядов, показывающие объем запросов, задержку и уровень ошибок с разбивкой по типам запросов (select, insert, other) за выбранный временной интервал. График задержки можно дополнительно настроить для отображения задержек p50, p90 и p99: - - ## Недавние запросы {#recent-queries} Под основными метриками отображается таблица записей журнала запросов (сгруппированных по нормализованному хэшу запроса и пользователю) за выбранный временной интервал: @@ -45,8 +38,6 @@ import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_q Недавние запросы можно фильтровать и сортировать по любому из доступных полей. Таблицу также можно настроить для отображения или скрытия дополнительных полей, таких как таблицы, а также задержки p90 и p99. - - ## Детальный разбор запроса {#query-drill-down} При выборе запроса в таблице недавних запросов открывается всплывающая панель, содержащая метрики и информацию, относящиеся к выбранному запросу: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md index 17700003ef5..48d0319215d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md @@ -20,13 +20,10 @@ import dashboards_9 from '@site/static/images/cloud/dashboards/9_dashboards.png' import dashboards_10 from '@site/static/images/cloud/dashboards/10_dashboards.png'; import dashboards_11 from '@site/static/images/cloud/dashboards/11_dashboards.png'; - # Панели мониторинга {#dashboards} Функция панелей мониторинга в SQL Console позволяет собирать и совместно использовать визуализации на основе сохранённых запросов. Начните с сохранения и визуализации запросов, добавления их визуализаций на панель и сделайте панель интерактивной с помощью параметров запросов. - - ## Базовые понятия {#core-concepts} ### Общий доступ к запросам {#query-sharing} @@ -39,14 +36,10 @@ import dashboards_11 from '@site/static/images/cloud/dashboards/11_dashboards.pn Вы можете включать и отключать поле ввода параметра запроса через боковую панель фильтров **Global**, выбрав тип «filter» в настройках визуализации. Также вы можете включать и отключать ввод параметра запроса, связав его с другим объектом (например, таблицей) на панели. См. раздел «[configure a filter](/cloud/manage/dashboards#configure-a-filter)» в кратком руководстве по быстрому старту ниже. - - ## Быстрый старт {#quick-start} Создадим панель мониторинга для нашего сервиса ClickHouse, используя системную таблицу [query\_log](/operations/system-tables/query_log). - - ## Быстрый старт {#quick-start-1} ### Создание сохранённого запроса {#create-a-saved-query} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md index dd713b455d5..65e70d03da7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md @@ -16,7 +16,6 @@ import scaling_configure from '@site/static/images/cloud/manage/scaling-configur import scaling_memory_allocation from '@site/static/images/cloud/manage/scaling-memory-allocation.png'; import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - # Автоматическое масштабирование {#automatic-scaling} Масштабирование — это возможность изменять доступные ресурсы в соответствии с запросами клиентов. Сервисы уровней Scale и Enterprise (со стандартным профилем 1:4) могут масштабироваться горизонтально через программные вызовы API или путем изменения настроек в пользовательском интерфейсе для регулирования системных ресурсов. Эти сервисы также могут **автоматически масштабироваться** вертикально в соответствии с потребностями приложений. @@ -27,8 +26,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' Уровни Scale и Enterprise поддерживают как сервисы с одной репликой, так и с несколькими репликами, тогда как уровень Basic поддерживает только сервисы с одной репликой. Сервисы с одной репликой имеют фиксированный размер и не допускают вертикального или горизонтального масштабирования. Пользователи могут перейти на уровень Scale или Enterprise, чтобы масштабировать свои сервисы. ::: - - ## Как работает масштабирование в ClickHouse Cloud {#how-scaling-works-in-clickhouse-cloud} В настоящее время ClickHouse Cloud поддерживает вертикальное автомасштабирование и ручное горизонтальное масштабирование для сервисов уровня Scale. @@ -89,8 +86,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' Однако вертикальное масштабирование таких сервисов возможно по обращению в службу поддержки. ::: - - ## Ручное горизонтальное масштабирование {#manual-horizontal-scaling} @@ -129,8 +124,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - - ## Автоматический переход в режим ожидания {#automatic-idling} На странице **Settings** вы также можете выбрать, разрешать ли автоматический переход сервиса в режим ожидания, когда он неактивен, как показано на изображении выше (то есть когда сервис не выполняет никаких пользовательских запросов). Автоматический переход в режим ожидания снижает стоимость работы сервиса, так как с вас не взимается плата за вычислительные ресурсы, пока сервис приостановлен. @@ -144,8 +137,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' Используйте автоматический переход в режим ожидания только в том случае, если в вашей задаче допустима задержка перед ответом на запросы, поскольку при приостановке сервиса подключения к нему будут завершаться по таймауту. Автоматический переход в режим ожидания оптимален для сервисов, которые используются редко и где задержка может быть терпима. Он не рекомендуется для сервисов, обеспечивающих клиентские функции, которыми часто пользуются. ::: - - ## Обработка всплесков нагрузки {#handling-bursty-workloads} Если в ближайшее время ожидается всплеск нагрузки, вы можете использовать diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md index 6f3d55e8f5f..b6a61377a8b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md @@ -6,15 +6,11 @@ keywords: ['собственное облако', 'byoc', 'частное обл doc_type: 'reference' --- - - # Варианты развертывания ClickHouse {#clickhouse-deployment-options} ClickHouse предоставляет широкий спектр вариантов развертывания, чтобы удовлетворить разнообразные требования клиентов, предлагая разные уровни контроля, соответствия нормативным требованиям и операционных накладных расходов. В этом документе описаны различные типы развертывания, что позволяет пользователям выбрать оптимальное решение, соответствующее их архитектурным предпочтениям, регуляторным обязательствам и стратегиям управления ресурсами. - - ## ClickHouse Cloud {#clickhouse-cloud} ClickHouse Cloud — это полностью управляемый облачный сервис, который обеспечивает мощь и скорость ClickHouse без операционной сложности самостоятельного управления. @@ -24,24 +20,18 @@ ClickHouse Cloud берет на себя все аспекты подготов Подробнее см. в разделе [ClickHouse Cloud](/getting-started/quick-start/cloud). - - ## Собственное облако (Bring Your Own Cloud) {#byoc} ClickHouse Bring Your Own Cloud (BYOC) позволяет организациям развертывать и управлять ClickHouse в собственной облачной среде, при этом используя управляемый сервисный слой. Этот вариант занимает промежуточное положение между полностью управляемым сервисом ClickHouse Cloud и полным контролем при самостоятельном управлении развертываниями. С ClickHouse BYOC пользователи сохраняют контроль над своими данными, инфраструктурой и политиками безопасности, соблюдая специфические требования по соответствию и нормативному регулированию, одновременно передавая ClickHouse эксплуатационные задачи, такие как установка обновлений и патчей, мониторинг и масштабирование. Эта модель предоставляет гибкость развертывания в частном облаке с преимуществами управляемого сервиса, что делает ее подходящей для крупномасштабных развертываний для предприятий с жесткими требованиями к безопасности, управлению и локализации данных. Подробнее см. в разделе [Bring Your Own Cloud](/cloud/reference/byoc/overview). - - ## ClickHouse Private {#clickhouse-private} ClickHouse Private — это версия ClickHouse для самостоятельного развёртывания, использующая ту же проприетарную технологию, которая лежит в основе ClickHouse Cloud. Этот вариант обеспечивает максимально возможный уровень контроля, что делает его оптимальным для организаций с жёсткими требованиями к соответствию, сетевому взаимодействию и безопасности, а также для команд, обладающих достаточной экспертизой для управления собственной инфраструктурой. Решение получает регулярные обновления и улучшения, тщательно протестированные в среде ClickHouse Cloud, имеет насыщенную дорожную карту по развитию функциональности и поддерживается нашей командой экспертов. Подробнее о [ClickHouse Private](/cloud/infrastructure/clickhouse-private). - - ## ClickHouse Government {#clickhouse-government} ClickHouse Government — это самостоятельно развертываемая версия ClickHouse, разработанная для удовлетворения уникальных и строгих требований государственных учреждений и организаций государственного сектора, которым необходимы изолированные и аккредитованные среды. Этот вариант развертывания предоставляет высокозащищенную, соответствующую нормативным требованиям и изолированную среду, с акцентом на обеспечение соответствия стандарту FIPS 140-3 с использованием OpenSSL, дополнительное усиление безопасности системы и управление уязвимостями. Он использует мощные возможности ClickHouse Cloud, одновременно интегрируя специализированные функции и конфигурации для удовлетворения специфических эксплуатационных требований и требований безопасности государственных структур. С ClickHouse Government ведомства могут выполнять высокопроизводительную аналитику над конфиденциальными данными в контролируемой и аккредитованной инфраструктуре при поддержке экспертов, услуги которых адаптированы под потребности государственного сектора. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md index a84e315d428..681a0ee9e52 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md @@ -8,7 +8,6 @@ doc_type: 'guide' import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; - # Маршрутизация с учетом реплик {#replica-aware-routing} @@ -27,8 +26,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; Обратите внимание, что исходное имя хоста по-прежнему будет использовать балансировку нагрузки `LEAST_CONNECTION`, которая является алгоритмом маршрутизации по умолчанию. - - ## Ограничения маршрутизации с учетом реплик {#limitations-of-replica-aware-routing} ### Маршрутизация с учетом реплик не гарантирует изоляцию {#replica-aware-routing-does-not-guarantee-isolation} @@ -39,8 +36,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; Клиентам необходимо вручную добавить DNS-запись, чтобы разрешение имен работало для нового шаблона имени хоста. При неправильном использовании это может привести к неравномерной нагрузке на сервер. - - ## Настройка маршрутизации с учётом реплик {#configuring-replica-aware-routing} Чтобы включить маршрутизацию с учётом реплик, пожалуйста, обратитесь в [нашу службу поддержки](https://clickhouse.com/support/program). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md index 6c52db49f23..4864db536f1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md @@ -7,8 +7,6 @@ description: 'Описывает компонент Shared Catalog и движо doc_type: 'reference' --- - - # Общий каталог и общий движок базы данных {#shared-catalog-and-shared-database-engine} **Доступно исключительно в ClickHouse Cloud (и в облачных сервисах партнёров первого уровня)** @@ -24,8 +22,6 @@ Shared Catalog **не реплицирует сами таблицы**, но о - MySQL - DataLakeCatalog - - ## Архитектура и хранение метаданных {#architecture-and-metadata-storage} Все метаданные и история DDL‑запросов в Shared Catalog хранятся централизованно в ZooKeeper. Ничего не сохраняется на локальные диски. Такая архитектура обеспечивает: @@ -34,8 +30,6 @@ Shared Catalog **не реплицирует сами таблицы**, но о - Отсутствие состояния у вычислительных узлов (stateless) - Быстрый и надёжный запуск реплик - - ## Shared database engine {#shared-database-engine} **Shared database engine** работает совместно с Shared Catalog для управления базами данных, таблицы которых используют **stateless‑движки таблиц**, такие как `SharedMergeTree`. Эти движки таблиц не записывают постоянное состояние на диск и совместимы с динамическими вычислительными средами. @@ -69,8 +63,6 @@ Shared database engine развивает и улучшает поведение - **Централизованное, версионируемое состояние метаданных** Shared Catalog хранит единый источник истины в ZooKeeper. Когда реплика запускается, она получает последнее состояние и применяет разницу (diff), чтобы достичь согласованности. Во время выполнения запроса система может дождаться, пока другие реплики достигнут как минимум требуемой версии метаданных для обеспечения корректности. - - ## Использование в ClickHouse Cloud {#usage-in-clickhouse-cloud} Для конечных пользователей использование Shared Catalog и движка базы данных Shared не требует какой-либо дополнительной настройки. Создание базы данных осуществляется как обычно: @@ -81,7 +73,6 @@ CREATE DATABASE my_database; ClickHouse Cloud автоматически назначает базам данных движок базы данных Shared. Любые таблицы, созданные в такой базе данных с использованием stateless-движков, автоматически получают все преимущества репликации и координации, обеспечиваемых Shared Catalog. - ## Краткое содержание {#summary} Shared Catalog и движок базы данных Shared обеспечивают: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md index b44cbf73550..8f9e45da698 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md @@ -11,7 +11,6 @@ import shared_merge_tree from '@site/static/images/cloud/reference/shared-merge- import shared_merge_tree_2 from '@site/static/images/cloud/reference/shared-merge-tree-2.png'; import Image from '@theme/IdealImage'; - # Движок таблиц SharedMergeTree {#sharedmergetree-table-engine} Семейство движков таблиц SharedMergeTree — это облачный (cloud‑native) аналог движков ReplicatedMergeTree, оптимизированный для работы поверх общего хранилища (например, Amazon S3, Google Cloud Storage, MinIO, Azure Blob Storage). Для каждого конкретного типа движка MergeTree существует соответствующий SharedMergeTree, то есть ReplacingSharedMergeTree заменяет ReplacingReplicatedMergeTree. @@ -34,8 +33,6 @@ import Image from '@theme/IdealImage'; В отличие от ReplicatedMergeTree, SharedMergeTree не требует, чтобы реплики напрямую обменивались данными друг с другом. Вместо этого весь обмен происходит через общее хранилище и clickhouse-keeper. SharedMergeTree реализует асинхронную репликацию без лидера и использует clickhouse-keeper для координации и хранения метаданных. Это означает, что метаданные не нужно реплицировать при изменении масштаба сервиса. Это приводит к более быстрой репликации, выполнению мутаций, слияниям и операциям масштабирования. SharedMergeTree поддерживает сотни реплик для каждой таблицы, что делает возможным динамическое масштабирование без шардов. В ClickHouse Cloud используется подход распределённого выполнения запросов для задействования большего количества вычислительных ресурсов на один запрос. - - ## Интроспекция {#introspection} Большинство системных таблиц, используемых для интроспекции ReplicatedMergeTree, доступны и для SharedMergeTree, за исключением `system.replication_queue` и `system.replicated_fetches`, так как в SharedMergeTree нет репликации данных и метаданных. Однако для этих двух таблиц в SharedMergeTree есть соответствующие альтернативы. @@ -48,8 +45,6 @@ import Image from '@theme/IdealImage'; Эта таблица является альтернативой `system.replicated_fetches` для SharedMergeTree. Она содержит информацию о текущих выполняющихся операциях выборки (fetches) первичных ключей и контрольных сумм в память. - - ## Включение SharedMergeTree {#enabling-sharedmergetree} `SharedMergeTree` включён по умолчанию. @@ -103,7 +98,6 @@ ENGINE = SharedReplacingMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica ORDER BY key ``` - ## Настройки {#settings} Поведение некоторых настроек значительно изменилось: @@ -112,8 +106,6 @@ ORDER BY key - `insert_quorum_parallel` -- все вставки в SharedMergeTree являются кворумными вставками (записываются в общее хранилище), поэтому эта настройка не требуется при использовании движка таблицы SharedMergeTree. - `select_sequential_consistency` -- не требует кворумных вставок, приведёт к дополнительной нагрузке на clickhouse-keeper при выполнении запросов `SELECT` - - ## Согласованность {#consistency} SharedMergeTree обеспечивает более сильные (lightweight) гарантии согласованности, чем ReplicatedMergeTree. При вставке в SharedMergeTree вам не нужно указывать настройки, такие как `insert_quorum` или `insert_quorum_parallel`. Вставки являются кворумными, то есть метаданные будут сохранены в ClickHouse-Keeper, и эти метаданные реплицируются как минимум на кворум узлов ClickHouse-Keeper. Каждая реплика в вашем кластере будет асинхронно получать новую информацию из ClickHouse-Keeper. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md index 37b419ec8d1..e3d15a64704 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md @@ -15,11 +15,8 @@ import compute_7 from '@site/static/images/cloud/reference/compute-compute-7.png import compute_8 from '@site/static/images/cloud/reference/compute-compute-8.png'; import Image from '@theme/IdealImage'; - # Хранилища {#warehouses} - - ## Что такое разделение вычислительных ресурсов (compute-compute)? {#what-is-compute-compute-separation} Разделение вычислительных ресурсов (compute-compute) доступно для тарифов Scale и Enterprise. @@ -51,8 +48,6 @@ _Рис. 2 — разделение вычислительных ресурсо Можно создавать дополнительные сервисы, которые совместно используют те же данные, что и ваши существующие сервисы, или настроить полностью новую конфигурацию с несколькими сервисами, совместно использующими одни и те же данные. - - ## Что такое warehouse? {#what-is-a-warehouse} В ClickHouse Cloud _warehouse_ — это набор сервисов, которые используют одни и те же данные. @@ -75,8 +70,6 @@ _Рис. 3 — Пример warehouse_ Вы можете сортировать сервисы по warehouse, к которому они относятся. - - ## Контроль доступа {#access-controls} ### Учетные данные базы данных {#database-credentials} @@ -116,8 +109,6 @@ _Рис. 6 — Сервисы с правами чтения-записи и т 2. В настоящее время обновляемые материализованные представления выполняются на всех сервисах в рамках warehouse, включая сервисы только для чтения. В будущем это поведение будет изменено, и они будут выполняться только на сервисах с правами чтения-записи. ::: - - ## Масштабирование {#scaling} Каждый сервис в Warehouse можно настроить под вашу нагрузку по следующим параметрам: @@ -126,13 +117,9 @@ _Рис. 6 — Сервисы с правами чтения-записи и т - Нужно ли автоматически масштабировать сервис - Нужно ли переводить сервис в режим простоя при отсутствии активности (не может быть применено к первому сервису в группе — см. раздел **Ограничения**) - - ## Изменения в поведении {#changes-in-behavior} После того как для сервиса включён режим compute-compute (создан хотя бы один вторичный сервис), вызов функции `clusterAllReplicas()` с именем кластера `default` будет использовать только реплики того сервиса, в котором она была вызвана. Это означает, что если два сервиса подключены к одному и тому же набору данных и `clusterAllReplicas(default, system, processes)` вызывается из сервиса 1, будут показаны только процессы, запущенные на сервисе 1. При необходимости вы по-прежнему можете вызвать, например, `clusterAllReplicas('all_groups.default', system, processes)`, чтобы обратиться ко всем репликам. - - ## Ограничения {#limitations} 1. **Основной сервис всегда должен быть запущен и не может переходить в режим простоя (это ограничение будет снято через некоторое время после GA).** Во время private preview и некоторое время после GA основной сервис (обычно это существующий сервис, который вы хотите расширить, добавив другие сервисы) всегда будет работать, и для него будет отключена настройка простоя. Вы не сможете остановить или перевести в режим простоя основной сервис, если существует хотя бы один вторичный сервис. Как только все вторичные сервисы будут удалены, вы снова сможете остановить или перевести в режим простоя исходный сервис. @@ -154,22 +141,17 @@ SETTINGS distributed_ddl_task_timeout=0 7. **В настоящее время действует мягкий лимит — не более 5 сервисов на один warehouse.** Обратитесь в службу поддержки, если вам нужно использовать более 5 сервисов в одном warehouse. - ## Цены {#pricing} Стоимость вычислительных ресурсов одинакова для всех сервисов в хранилище (основном и дополнительном). Хранение данных тарифицируется только один раз — оно включено в первый (исходный) сервис. Воспользуйтесь калькулятором на странице [Pricing](https://clickhouse.com/pricing), который поможет оценить стоимость в зависимости от размера вашей нагрузки и выбранного тарифа. - - ## Резервные копии {#backups} - Поскольку все сервисы в одном warehouse используют одно и то же хранилище, резервные копии создаются только на основном (первичном) сервисе. Таким образом, в резервную копию попадают данные всех сервисов в этом warehouse. - Если вы восстанавливаете резервную копию с основного сервиса warehouse, она будет развёрнута как полностью новый сервис, не связанный с существующим warehouse. Затем вы можете добавить дополнительные сервисы к этому новому сервису сразу после завершения восстановления. - - ## Использование хранилищ {#using-warehouses} ### Создание хранилища {#creating-a-warehouse} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md index 119938da5de..4d128775e2a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md @@ -8,12 +8,8 @@ doc_type: 'reference' keywords: ['ClickHouse Cloud', 'обзор API', 'облачный API', 'REST API', 'программный доступ'] --- - - # API ClickHouse Cloud {#clickhouse-cloud-api} - - ## Обзор {#overview} ClickHouse Cloud API — это REST API, предназначенный для разработчиков и упрощающий управление @@ -23,8 +19,6 @@ ClickHouse Cloud API — это REST API, предназначенный для [Узнайте, как создать свой первый API-ключ и начать использовать ClickHouse Cloud API.](/cloud/manage/openapi) - - ## Конечная точка и интерфейс Swagger (OpenAPI) {#swagger-openapi-endpoint-and-ui} API ClickHouse Cloud построен на основе открытой [спецификации OpenAPI](https://www.openapis.org/), @@ -40,14 +34,10 @@ API ClickHouse Cloud построен на основе открытой [спе Это повлияет на объекты, возвращаемые запросами `POST`, `GET` и `PATCH` для сервисов. Поэтому любой код, который использует этот API, может потребовать доработки для корректной обработки этих изменений. ::: - - ## Лимиты {#rate-limits} Для разработчиков установлено ограничение — не более 100 API‑ключей на организацию. Каждый API‑ключ имеет лимит в 10 запросов за 10 секунд. Если вы хотите увеличить количество API‑ключей или число запросов в 10‑секундное окно для вашей организации, свяжитесь с support@clickhouse.com. - - ## Провайдер Terraform {#terraform-provider} Официальный провайдер Terraform для ClickHouse позволяет использовать [инфраструктуру как код (Infrastructure as Code)](https://www.redhat.com/en/topics/automation/what-is-infrastructure-as-code-iac) @@ -65,16 +55,12 @@ API ClickHouse Cloud построен на основе открытой [спе Теперь вы также сможете указывать поле `num_replicas` как свойство ресурса сервиса. ::: - - ## Terraform и OpenAPI: новое ценообразование и настройки реплик {#terraform-and-openapi-new-pricing---replica-settings-explained} Число реплик, с которым создаётся каждый сервис, по умолчанию равно 3 для тарифов Scale и Enterprise и 1 для тарифа Basic. Для тарифов Scale и Enterprise его можно изменить, передав поле `numReplicas` в запросе на создание сервиса. Значение поля `numReplicas` для первого сервиса в хранилище должно быть от 2 до 20. Сервисы, которые создаются в уже существующем хранилище, могут иметь не менее 1 реплики. - - ## Поддержка {#support} Мы рекомендуем сначала обратиться в [наш канал в Slack](https://clickhouse.com/slack), чтобы получить оперативную поддержку. Если diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md index d2ec22f1d4a..7acae6b1934 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md @@ -14,7 +14,6 @@ import image_04 from '@site/static/images/cloud/manage/openapi4.png'; import image_05 from '@site/static/images/cloud/manage/openapi5.png'; import Image from '@theme/IdealImage'; - # Управление ключами API {#managing-api-keys} ClickHouse Cloud предоставляет API на основе OpenAPI, который позволяет программно управлять вашей учетной записью и различными параметрами ваших сервисов. @@ -68,7 +67,6 @@ $ curl --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations - ## Конечные точки {#endpoints} Подробную информацию о конечных точках API см. в [справочнике по API](https://clickhouse.com/docs/cloud/manage/api/swagger). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md index 9aff1f7d657..07016c2d848 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md @@ -90,7 +90,6 @@ import postman17 from '@site/static/images/cloud/manage/postman/postman17.png'; - ## Тестирование возможностей ClickHouse Cloud API {#test-the-clickhouse-cloud-api-functionalities} ### Тест «GET list of available organizations» {#test-get-list-of-available-organizations} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md index d5067c125ef..80e5d028e12 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md @@ -15,7 +15,6 @@ import enroll_fast_release from '@site/static/images/cloud/manage/enroll_fast_re import scheduled_upgrades from '@site/static/images/cloud/manage/scheduled_upgrades.png'; import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled_upgrade_window.png'; - # Обновления {#upgrades} С ClickHouse Cloud вам не нужно заботиться о патчах и обновлениях. Мы регулярно внедряем обновления, которые включают исправления, новые функции и улучшения производительности. Полный список новых возможностей в ClickHouse смотрите в нашем [Cloud changelog](/whats-new/cloud). @@ -26,8 +25,6 @@ import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled В рамках этого изменения исторические данные системных таблиц будут сохраняться максимум 30 дней как часть событий обновления. Кроме того, любые данные системных таблиц старше 19 декабря 2024 года для сервисов на AWS или GCP и старше 14 января 2025 года для сервисов на Azure не будут сохранены при миграции на новые уровни организации. ::: - - ## Совместимость версий {#version-compatibility} Когда вы создаёте сервис, настройка [`compatibility`](/operations/settings/settings#compatibility) устанавливается на самую актуальную версию ClickHouse, доступную в ClickHouse Cloud на момент первоначального развёртывания сервиса. @@ -36,16 +33,12 @@ import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled Вы не можете управлять значением настройки `compatibility` по умолчанию на уровне сервиса. Вам необходимо [обратиться в службу поддержки](https://clickhouse.com/support/program), если вы хотите изменить версию, установленную по умолчанию в настройке `compatibility` для вашего сервиса. Однако вы можете переопределить настройку `compatibility` на уровне пользователя, роли, профиля, запроса или сеанса, используя стандартные механизмы настройки ClickHouse, такие как `SET compatibility = '22.3'` в сеансе или `SETTINGS compatibility = '22.3'` в запросе. - - ## Режим обслуживания {#maintenance-mode} Иногда нам может потребоваться обновить ваш сервис, и для этого может понадобиться временно отключить некоторые функции, например масштабирование или автоматический перевод в режим простоя. В редких случаях нам может потребоваться вмешаться в работу сервиса, который испытывает проблемы, и вернуть его в корректное состояние. Во время такого обслуживания на странице сервиса будет отображаться баннер с надписью _«Выполняется обслуживание»_. В этот период вы, возможно, всё ещё сможете выполнять запросы к сервису. С вас не будет взиматься плата за время, пока сервис находится на обслуживании. _Режим обслуживания_ — редкий режим и его не следует путать с плановыми обновлениями сервиса. - - ## Каналы релизов (график обновлений) {#release-channels-upgrade-schedule} Пользователи могут задавать график обновлений своего сервиса ClickHouse Cloud, подписываясь на конкретный канал релизов. Доступно три канала релизов, и пользователь может настроить день и время недели для обновлений с помощью функции **scheduled upgrades** (плановых обновлений). @@ -112,8 +105,6 @@ import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled - Переход на более медленный канал не приведёт к откату версии сервиса; он останется на текущей версии до появления более новой версии в этом канале, то есть при переходе с обычного на медленный или с быстрого на обычный или медленный. ::: - - ## Плановые обновления {#scheduled-upgrades} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/06_security.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/06_security.md index 2d50f9c7e9c..dedcd06e939 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/06_security.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/06_security.md @@ -7,16 +7,12 @@ doc_type: 'reference' keywords: ['безопасность', 'безопасность в облаке', 'управление доступом', 'соответствие нормативным требованиям', 'защита данных'] --- - - # Безопасность ClickHouse Cloud {#clickhouse-cloud-security} В этом документе подробно рассматриваются варианты обеспечения безопасности и рекомендуемые практики для защиты организации и сервисов ClickHouse. ClickHouse уделяет особое внимание предоставлению защищённых решений на основе аналитической базы данных, поэтому защита данных и обеспечение целостности сервиса являются приоритетом. Представленная здесь информация охватывает различные методы, призванные помочь пользователям в обеспечении безопасности их сред ClickHouse. - - ## Аутентификация в облачной консоли {#cloud-console-auth} ### Аутентификация по паролю {#password-auth} @@ -49,8 +45,6 @@ ClickHouse Cloud поддерживает социальную аутентиф Подробнее об [аутентификации API](/cloud/manage/openapi). - - ## Аутентификация в базе данных {#database-auth} ### Аутентификация по паролю базы данных {#db-password-auth} @@ -65,8 +59,6 @@ ClickHouse Cloud поддерживает социальную аутентиф Узнайте больше об [аутентификации через SSH](/cloud/security/manage-database-users#database-ssh). - - ## Контроль доступа {#access-control} ### Ролевое управление доступом в консоли (RBAC) {#console-rbac} @@ -81,8 +73,6 @@ ClickHouse Cloud поддерживает назначение ролей для Узнайте больше о [грантах пользователям базы данных](/cloud/security/manage-database-users#database-permissions). - - ## Сетевая безопасность {#network-security} ### IP-фильтры {#ip-filters} @@ -97,8 +87,6 @@ ClickHouse Cloud поддерживает назначение ролей для Подробнее о [приватном подключении](/cloud/security/connectivity/private-networking). - - ## Шифрование {#encryption} ### Шифрование на уровне хранилища {#storage-encryption} @@ -119,8 +107,6 @@ ClickHouse Cloud по умолчанию шифрует данные в сост Узнайте подробнее о [ключах шифрования, управляемых клиентом](/cloud/security/cmek#customer-managed-encryption-keys-cmek). - - ## Аудит и ведение журналов {#auditing-logging} ### Журнал аудита консоли {#console-audit-log} @@ -141,8 +127,6 @@ ClickHouse Cloud по умолчанию шифрует данные в сост Подробнее о [руководстве по безопасности BYOC (BYOC Security Playbook)](/cloud/security/audit-logging/byoc-security-playbook). - - ## Соответствие требованиям {#compliance} ### Отчёты по безопасности и соответствию требованиям {#compliance-reports} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md index c70f17f2a18..238639547bb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md @@ -28,7 +28,6 @@ import Image from '@theme/IdealImage'; так и в ClickHouse Cloud. В этой статье мы покажем, как использовать расширенную панель мониторинга в ClickHouse Cloud. - ## Доступ к расширенной панели мониторинга {#accessing-the-advanced-dashboard} Перейдите к расширенной панели мониторинга: @@ -38,8 +37,6 @@ ClickHouse Cloud. - - ## Доступ к встроенной расширенной панели мониторинга {#accessing-the-native-advanced-dashboard} К встроенной расширенной панели мониторинга можно получить доступ, перейдя в: @@ -58,8 +55,6 @@ ClickHouse Cloud. - - ## Готовые визуализации {#out-of-box-visualizations} Базовые графики в Advanced Dashboard предназначены для обеспечения @@ -96,8 +91,6 @@ ClickHouse Cloud. | OS CPU Usage (Userspace) | Использование CPU для выполнения кода в пространстве пользователя | | OS CPU Usage (Kernel) | Использование CPU для выполнения кода ядра | - - ## Особенности ClickHouse Cloud {#clickhouse-cloud-specific} ClickHouse Cloud хранит данные в объектном хранилище типа S3. Мониторинг этого уровня может помочь вовремя обнаруживать проблемы. @@ -116,8 +109,6 @@ ClickHouse Cloud хранит данные в объектном хранили | Network receive bytes/sec | Отслеживает текущую скорость входящего сетевого трафика | | Concurrent network connections | Отслеживает количество текущих одновременных сетевых соединений | - - ## Определение проблем с помощью расширенной панели мониторинга {#identifying-issues-with-the-advanced-dashboard} Наличие такого представления в реальном времени о состоянии вашего сервиса ClickHouse значительно помогает @@ -219,7 +210,6 @@ read_rows: 150957260 tables: ['default.amazon_reviews_no_pk'] ``` - Строка 2: ────── type: QueryFinish diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md index d666191d393..7fb78c9557a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md @@ -15,7 +15,6 @@ import prometheus_grafana_metrics_explorer from '@site/static/images/integration import prometheus_datadog from '@site/static/images/integrations/prometheus-datadog.png'; import Image from '@theme/IdealImage'; - # Интеграция с Prometheus {#prometheus-integration} Данная возможность поддерживает интеграцию с [Prometheus](https://prometheus.io/) для мониторинга сервисов ClickHouse Cloud. Доступ к метрикам Prometheus предоставляется через endpoint [ClickHouse Cloud API](/cloud/manage/api/api-overview), который позволяет пользователям безопасно подключаться и экспортировать метрики в свой сборщик метрик Prometheus. Эти метрики можно интегрировать с дашбордами, например, Grafana и Datadog, для визуализации. @@ -59,7 +58,6 @@ export SERVICE_ID= curl --silent --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations/$ORG_ID/services/$SERVICE_ID/prometheus?filtered_metrics=true ``` - ### Пример ответа {#sample-response} ```response @@ -187,7 +185,6 @@ scrape_configs: Обратите внимание, что для корректного заполнения метки `instance` параметр конфигурации `honor_labels` должен быть установлен в значение `true`. Кроме того, в приведённом выше примере `filtered_metrics` установлено в `true`, однако это значение следует настраивать в соответствии с предпочтениями пользователя. - ## Интеграция с Grafana {#integrating-with-grafana} Существует два основных способа интеграции с Grafana: @@ -260,7 +257,6 @@ prometheus.remote_write "metrics_service" { Обратите внимание: чтобы метка `instance` заполнялась корректно, параметр конфигурации `honor_labels` должен иметь значение `true`. - ### Самостоятельно управляемая Grafana с Alloy {#grafana-self-managed-with-alloy} Пользователи, развернувшие Grafana самостоятельно, могут найти инструкции по установке агента Alloy [здесь](https://grafana.com/docs/alloy/latest/get-started/install/). Мы предполагаем, что Alloy настроен на отправку метрик Prometheus в нужное целевое назначение. Компонент `prometheus.scrape` ниже настраивает Alloy на опрос конечной точки ClickHouse Cloud Endpoint. Мы предполагаем, что `prometheus.remote_write` получает собранные метрики. При необходимости измените значение ключа `forward_to` на нужное целевое назначение. @@ -293,7 +289,6 @@ prometheus.scrape "clickhouse_cloud" { Обратите внимание, что параметр конфигурации `honor_labels` должен быть установлен в значение `true`, чтобы метка `instance` заполнялась корректно. - ## Интеграция с Datadog {#integrating-with-datadog} Вы можете использовать агент [Datadog Agent](https://docs.datadoghq.com/agent/?tab=Linux) и [интеграцию OpenMetrics](https://docs.datadoghq.com/integrations/openmetrics/) для сбора метрик с endpoint ClickHouse Cloud. Ниже приведён простой пример конфигурации для этого агента и интеграции. Обратите внимание, что, возможно, имеет смысл отбирать только те метрики, которые представляют для вас наибольший интерес. Приведённый ниже универсальный пример будет экспортировать тысячи комбинаций «метрика–экземпляр», которые Datadog будет рассматривать как пользовательские метрики. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md index 1c4b849e09d..2886d1d0a30 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md @@ -17,7 +17,6 @@ import endpoints_monitoring from '@site/static/images/cloud/sqlconsole/endpoints import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # Настройка конечных точек API для запросов {#setting-up-query-api-endpoints} Возможность **Query API Endpoints** позволяет создавать конечные точки API непосредственно из любого сохранённого SQL-запроса в консоли ClickHouse Cloud. Вы сможете обращаться к конечным точкам API по HTTP для выполнения своих сохранённых запросов без необходимости подключаться к вашему сервису ClickHouse Cloud через нативный драйвер. @@ -135,7 +134,6 @@ GET /query-endpoints/{queryEndpointId}/run POST /query-endpoints/{queryEndpointId}/run ``` - ### HTTP-методы {#http-methods} | Метод | Сценарий использования | Параметры | @@ -251,7 +249,6 @@ POST /query-endpoints/{queryEndpointId}/run SELECT database, name AS num_tables FROM system.tables LIMIT 3; ``` - #### Версия 1 {#version-1} @@ -428,7 +425,6 @@ SELECT name, database FROM system.tables WHERE match(name, {tableNameRegex: Stri - ### Запрос с массивом в переменных запроса, вставляющий данные в таблицу {#request-with-array-in-the-query-variables-that-inserts-data-into-a-table} **SQL таблицы:** @@ -492,7 +488,6 @@ INSERT INTO default.t_arr VALUES ({arr: Array(Array(Array(UInt32)))}); - ### Запрос с настройкой ClickHouse `max_threads`, равной 8 {#request-with-clickhouse-settings-max_threads-set-to-8} **SQL для конечной точки Query API:** @@ -539,7 +534,6 @@ SELECT * FROM system.tables; - ### Выполнить запрос и разобрать ответ как поток` {#request-and-parse-the-response-as-a-stream} **SQL для конечной точки Query API:** @@ -610,7 +604,6 @@ SELECT name, database FROM system.tables; - ### Вставка потока из файла в таблицу {#insert-a-stream-from-a-file-into-a-table} Создайте файл `./samples/my_first_table_2024-07-11.csv` со следующим содержимым: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md index 85c28be3302..a2707ce3dd4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md @@ -17,21 +17,16 @@ import backup_usage from '@site/static/images/cloud/manage/backup-usage.png'; import backup_restore from '@site/static/images/cloud/manage/backup-restore.png'; import backup_service_provisioning from '@site/static/images/cloud/manage/backup-service-provisioning.png'; - # Просмотр и восстановление резервных копий {#review-and-restore-backups} В этом руководстве рассматривается, как работают резервные копии в ClickHouse Cloud, какие параметры доступны для настройки резервного копирования вашего сервиса и как выполнить восстановление из резервной копии. - - ## Список статусов резервных копий {#backup-status-list} Резервные копии вашего сервиса будут создаваться по заданному расписанию — будь то ежедневное расписание по умолчанию или [пользовательское расписание](/cloud/manage/backups/configurable-backups), выбранное вами. Все доступные резервные копии можно просмотреть на вкладке сервиса **Backups**. Здесь вы можете увидеть статус резервной копии, продолжительность её создания, а также размер. Вы также можете восстановить конкретную резервную копию, используя столбец **Actions**. - - ## Понимание стоимости резервного копирования {#understanding-backup-cost} Согласно стандартной политике, ClickHouse Cloud предусматривает ежедневное создание резервной копии с периодом хранения 24 часа. Выбор расписания, предполагающего хранение большего объема данных или более частое создание резервных копий, может привести к дополнительным затратам на хранение резервных копий. @@ -50,8 +45,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup Имейте в виду, что оценочная стоимость резервного копирования будет меняться по мере роста объема данных в сервисе. ::: - - ## Восстановление резервной копии {#restore-a-backup} Резервные копии восстанавливаются в новый сервис ClickHouse Cloud, а не в существующий сервис, с которого была создана резервная копия. @@ -64,8 +57,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup - - ## Работа с восстановленным сервисом {#working-with-your-restored-service} После восстановления резервной копии у вас будет два похожих сервиса: **исходный сервис**, который требовалось восстановить, и новый **восстановленный сервис**, восстановленный из резервной копии исходного сервиса. @@ -147,7 +138,6 @@ FROM remoteSecure('source-hostname', db, table, 'exporter', 'password-here') После успешной записи данных в исходный сервис обязательно проверьте их в этом сервисе. После проверки данных удалите новый сервис. - ## Восстановление удалённых таблиц {#undeleting-or-undropping-tables} Команда `UNDROP` поддерживается в ClickHouse Cloud через [Shared Catalog](https://clickhouse.com/docs/cloud/reference/shared-catalog). @@ -169,13 +159,10 @@ SYNC SETTINGS max_table_size_to_drop=2000000000000 -- увеличивает о Устаревшие тарифные планы: для клиентов на таких планах стандартные ежедневные резервные копии с хранением в течение 24 часов включены в стоимость хранилища. ::: - ## Настраиваемые резервные копии {#configurable-backups} Если вы хотите задать другое расписание резервного копирования, чем используется по умолчанию, ознакомьтесь с разделом [Настраиваемые резервные копии](/cloud/manage/backups/configurable-backups). - - ## Экспорт резервных копий в собственный облачный аккаунт {#export-backups-to-your-own-cloud-account} Инструкции для пользователей, которые хотят экспортировать резервные копии в собственный облачный аккаунт, см. [здесь](/cloud/manage/backups/export-backups-to-own-cloud-account). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md index 85751d66768..39847516590 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md @@ -9,7 +9,6 @@ doc_type: 'landing-page' import TableOfContents from '@site/i18n/ru/docusaurus-plugin-content-docs/current/best-practices/_snippets/_table_of_contents.md'; - # Лучшие практики использования ClickHouse Cloud {#best-practices-in-clickhouse-cloud} В этом разделе представлены рекомендации, которые помогут максимально эффективно использовать ClickHouse Cloud. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md index eac29556e7a..babbe7a1ce4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md @@ -91,7 +91,6 @@ GRANT user_role TO user_1 GRANT user_role TO user_2 ``` - Теперь вы можете подключиться под пользователем `user_1` и выполнить простой запрос SELECT. Будут возвращены только строки из первого тенанта. ```sql @@ -108,7 +107,6 @@ FROM events └───────────┴──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## Отдельные таблицы {#separate-tables} В этом подходе данные каждого арендатора хранятся в отдельной таблице в той же базе данных, что устраняет необходимость в специальном поле для идентификации арендаторов. Доступ пользователей ограничивается с помощью [оператора GRANT](/sql-reference/statements/grant), благодаря чему каждый пользователь может получить доступ только к таблицам, содержащим данные соответствующих арендаторов. @@ -201,7 +199,6 @@ FROM default.events_tenant_1 └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## Раздельные базы данных {#separate-databases} Данные каждого арендатора хранятся в отдельной базе данных в одном и том же сервисе ClickHouse. @@ -286,7 +283,6 @@ GRANT SELECT ON tenant_1.events TO user_1 GRANT SELECT ON tenant_2.events TO user_2 ``` - Теперь вы можете подключиться под пользователем `user_1` и выполнить простой SELECT-запрос к таблице `events` соответствующей базы данных. Будут возвращены только строки первого арендатора. ```sql @@ -303,7 +299,6 @@ FROM tenant_1.events └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## Разделение вычислительных ресурсов {#compute-compute-separation} Три описанных выше подхода также можно дополнительно изолировать с помощью [Warehouses](/cloud/reference/warehouses#what-is-a-warehouse). Данные хранятся в общем объектном хранилище, но каждый арендатор может иметь собственный вычислительный сервис благодаря [разделению вычислительных ресурсов](/cloud/reference/warehouses#what-is-compute-compute-separation) с разным соотношением CPU/Memory. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md index ed476f2167f..fb9f29b3259 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md @@ -7,14 +7,10 @@ keywords: ['ClickHouse Cloud', 'совместимость'] doc_type: 'guide' --- - - # Руководство по совместимости с ClickHouse Cloud {#clickhouse-cloud-compatibility-guide} Это руководство даёт общее представление о функциональных возможностях и особенностях эксплуатации ClickHouse Cloud. Хотя ClickHouse Cloud основан на дистрибутиве ClickHouse с открытым исходным кодом, возможны некоторые различия в архитектуре и реализации. В качестве вводного материала может быть полезно прочитать эту запись в блоге о том, [как мы создали ClickHouse Cloud](https://clickhouse.com/blog/building-clickhouse-cloud-from-scratch-in-a-year). - - ## Архитектура ClickHouse Cloud {#clickhouse-cloud-architecture} ClickHouse Cloud значительно упрощает эксплуатацию и снижает затраты на использование ClickHouse в больших масштабах. Нет необходимости заранее задавать размер развертывания, настраивать репликацию для высокой доступности, вручную шардировать данные, масштабировать серверы при росте нагрузки или уменьшать их, когда вы ими не пользуетесь — мы берем это на себя. @@ -26,8 +22,6 @@ ClickHouse Cloud значительно упрощает эксплуатаци - Плавная гибернация для периодических рабочих нагрузок включена по умолчанию. Мы автоматически приостанавливаем вычислительные ресурсы после периода простоя и прозрачно запускаем их снова при поступлении нового запроса, так что вам не приходится платить за неиспользуемые ресурсы. - Расширенные параметры масштабирования позволяют задать максимальный порог автомасштабирования для дополнительного контроля затрат или минимальный порог автомасштабирования для резервирования вычислительных ресурсов для приложений с особыми требованиями к производительности. - - ## Возможности {#capabilities} ClickHouse Cloud предоставляет доступ к отобранному набору возможностей из открытой дистрибуции ClickHouse. В таблицах ниже описаны некоторые функции, которые в данный момент отключены в ClickHouse Cloud. @@ -107,8 +101,6 @@ ClickHouse Cloud поддерживает HTTPS, нативные интерфе [Именованные коллекции](/operations/named-collections) в настоящее время не поддерживаются в ClickHouse Cloud. - - ## Операционные значения по умолчанию и особенности {#operational-defaults-and-considerations} Ниже приведены настройки по умолчанию для сервисов ClickHouse Cloud. В некоторых случаях эти параметры зафиксированы, чтобы обеспечить корректную работу сервиса, а в других — могут быть изменены. @@ -132,8 +124,6 @@ ClickHouse Cloud оптимизирован для переменных нагр ### Расширенное администрирование безопасности {#advanced-security-administration} В рамках создания сервиса ClickHouse мы создаем базу данных по умолчанию и пользователя по умолчанию, который имеет широкие права доступа к этой базе данных. Этот начальный пользователь может создавать дополнительных пользователей и назначать им права на эту базу данных. Помимо этого, возможность включения в базе данных таких механизмов безопасности, как аутентификация по Kerberos, LDAP или SSL с использованием сертификатов X.509, в настоящее время не поддерживается. - - ## Дорожная карта {#roadmap} Мы внедряем поддержку исполняемых UDF в ClickHouse Cloud и оцениваем спрос на ряд других функций. Если у вас есть предложения или вы хотите запросить добавление конкретной функции, пожалуйста, [отправьте запрос здесь](https://console.clickhouse.cloud/support). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md index 845a6ad82d0..f706ceb2ee1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md @@ -14,7 +14,6 @@ import s3_output from '@site/static/images/cloud/security/secures3_output.jpg'; В этой статье показано, как клиенты ClickHouse Cloud могут использовать ролевой доступ для аутентификации в Amazon Simple Storage Service (S3) и безопасного доступа к своим данным. - ## Введение {#introduction} Прежде чем переходить к настройке безопасного доступа к S3, важно понять, как это работает. Ниже приведён обзор того, как сервисы ClickHouse могут получать доступ к приватным S3‑бакетам, принимая на себя роль в аккаунте AWS клиента. @@ -130,7 +129,6 @@ import s3_output from '@site/static/images/cloud/security/secures3_output.jpg'; 4 - После создания скопируйте новый **IAM Role Arn**. Он понадобится для доступа к вашему S3-бакету. - ## Доступ к бакету S3 с ролью ClickHouseAccess {#access-your-s3-bucket-with-the-clickhouseaccess-role} В ClickHouse Cloud появилась новая возможность указывать параметр `extra_credentials` в S3 table function. Ниже приведён пример того, как выполнить запрос, используя только что созданную роль, скопированную выше. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md index b2ef0f235df..92450dbfa11 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md @@ -16,7 +16,6 @@ import byoc_subnet_1 from '@site/static/images/cloud/reference/byoc-subnet-1.png import byoc_subnet_2 from '@site/static/images/cloud/reference/byoc-subnet-2.png'; import byoc_s3_endpoint from '@site/static/images/cloud/reference/byoc-s3-endpoint.png' - ## Процесс подключения {#onboarding-process} Клиенты могут инициировать процесс подключения, связавшись с [нами](https://clickhouse.com/cloud/bring-your-own-cloud). Клиентам необходимо иметь отдельную учетную запись AWS и знать регион, который они будут использовать. В настоящее время пользователи могут запускать сервисы BYOC только в тех регионах, которые поддерживаются в ClickHouse Cloud. @@ -83,7 +82,6 @@ module "clickhouse_onboarding" {
-
@@ -169,8 +167,6 @@ module "clickhouse_onboarding" { Необязательно: после проверки работоспособности peering вы можете запросить отключение публичного балансировщика нагрузки для ClickHouse BYOC. - - ## Процесс обновления {#upgrade-process} Мы регулярно обновляем программное обеспечение, включая обновления версии базы данных ClickHouse, ClickHouse Operator, EKS и других компонентов. @@ -181,8 +177,6 @@ module "clickhouse_onboarding" { Окна обслуживания не распространяются на устранение уязвимостей и исправления, связанные с безопасностью. Они выполняются как внеплановые обновления; мы заблаговременно согласуем подходящее время, чтобы минимизировать влияние на эксплуатацию. ::: - - ## Роли IAM для CloudFormation {#cloudformation-iam-roles} ### Начальная (bootstrap) роль IAM {#bootstrap-iam-role} @@ -217,8 +211,6 @@ module "clickhouse_onboarding" { Наконец, **`data-plane-mgmt`** позволяет компоненту Control Plane ClickHouse Cloud синхронизировать необходимые пользовательские ресурсы (Custom Resources), такие как `ClickHouseCluster` и Istio Virtual Service/Gateway. - - ## Границы сети {#network-boundaries} В этом разделе рассматриваются различные типы сетевого трафика в клиентский BYOC VPC и из него: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md index ae2d3cd32e2..91a7c85eb69 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md @@ -7,8 +7,6 @@ keywords: ['готовность к промышленной эксплуата doc_type: 'guide' --- - - # Руководство по подготовке ClickHouse Cloud к промышленной эксплуатации {#production-readiness} Для организаций, которые прошли руководство по быстрому старту и уже имеют активный сервис с поступающими данными @@ -23,8 +21,6 @@ doc_type: 'guide' - Проверить процедуры резервного копирования и задокументировать процессы восстановления после аварий ::: - - ## Введение {#introduction} У вас уже успешно работает ClickHouse Cloud для ваших рабочих нагрузок. Теперь вам нужно довести развертывание до уровня, соответствующего корпоративным стандартам промышленной эксплуатации — будь то в результате аудита на соответствие требованиям, инцидента в продакшене из‑за непроверенного запроса или требований ИТ по интеграции с корпоративными системами. @@ -41,8 +37,6 @@ doc_type: 'guide' Это руководство проведёт вас по каждой из этих областей и поможет перейти от работоспособного развертывания ClickHouse Cloud к системе, готовой для корпоративной эксплуатации. - - ## Стратегия окружений {#environment-strategy} Создавайте отдельные окружения, чтобы безопасно тестировать изменения до того, как они повлияют на рабочие нагрузки в продуктиве. Большинство инцидентов в продуктиве происходит из‑за нетестированных запросов или изменений конфигурации, развернутых напрямую в продуктивных системах. @@ -57,8 +51,6 @@ doc_type: 'guide' **Размер окружений**: Подбирайте размер staging‑сервиса так, чтобы он был максимально близок к характеристикам нагрузки в продуктиве. Тестирование на существенно меньшей инфраструктуре может не выявить конфликтов за ресурсы или проблем с масштабированием. Используйте наборы данных, репрезентативные для продуктива, за счет периодического обновления данных или генерации синтетических данных. Рекомендации по выбору размера staging‑окружения и корректному масштабированию сервисов приведены в документации [Рекомендации по выбору размера и аппаратному обеспечению](/guides/sizing-and-hardware-recommendations) и [Масштабирование в ClickHouse Cloud](/manage/scaling). Эти материалы содержат практические советы по выбору объема памяти, CPU и хранилища, а также детали о вариантах вертикального и горизонтального масштабирования, которые помогут сопоставить staging‑окружение с продуктивными рабочими нагрузками. - - ## Частные сети {#private-networking} [Частные сети](/cloud/security/connectivity/private-networking) в ClickHouse Cloud позволяют подключать ваши сервисы ClickHouse непосредственно к виртуальной сети в облаке, гарантируя, что данные не проходят через общедоступный интернет. Это особенно важно для организаций с жесткими требованиями по безопасности или соответствию нормативам, а также для тех, кто запускает приложения в приватных подсетях. @@ -71,8 +63,6 @@ ClickHouse Cloud поддерживает частные сети с помощ Если вам нужны дополнительные технические подробности или пошаговые инструкции по настройке, в связанной документации для каждого провайдера приведены развернутые руководства. - - ## Корпоративная аутентификация и управление пользователями {#enterprise-authentication} Переход от консольного управления пользователями к интеграции с корпоративной системой аутентификации является критически важным для готовности к промышленной эксплуатации. @@ -103,8 +93,6 @@ ClickHouse Cloud в настоящее время не поддерживает Узнайте больше об [управлении доступом в ClickHouse Cloud](/cloud/security/cloud_access_management) и [настройке SAML SSO](/cloud/security/saml-setup). - - ## Инфраструктура как код и автоматизация {#infrastructure-as-code} Управление ClickHouse Cloud с использованием подхода «инфраструктура как код» и автоматизации на основе API обеспечивает согласованность, контроль версий и воспроизводимость конфигурации развертывания. @@ -148,7 +136,6 @@ provider "clickhouse" { Аутентификация в API использует тот же подход на основе токенов, что и Terraform. Для полного справочника по API и примеров интеграции см. документацию [ClickHouse Cloud API](/cloud/manage/api/api-overview). - ## Мониторинг и операционная интеграция {#monitoring-integration} Подключение ClickHouse Cloud к существующей мониторинговой инфраструктуре обеспечивает наблюдаемость и проактивное обнаружение проблем. @@ -179,7 +166,6 @@ scrape_configs: Для полноценной настройки, включая подробную конфигурацию Prometheus/Grafana и расширенные правила оповещения, см. [руководство по наблюдаемости в ClickHouse Cloud](/use-cases/observability/cloud-monitoring#prometheus). - ## Обеспечение непрерывности бизнеса и интеграция со службой поддержки {#business-continuity} Наличие процедур проверки резервных копий и интеграции со службой поддержки гарантирует, что ваше развертывание ClickHouse Cloud сможет восстанавливаться после инцидентов и получать помощь при необходимости. @@ -206,8 +192,6 @@ ClickHouse Cloud предоставляет автоматические рез Узнайте больше о [резервном копировании и восстановлении в ClickHouse Cloud](/cloud/manage/backups/overview) и [сервисах поддержки](/about-us/support). - - ## Следующие шаги {#next-steps} После внедрения интеграций и процедур, описанных в этом руководстве, перейдите к разделу [обзор ресурсов Cloud](/cloud/get-started/cloud/resource-tour), чтобы ознакомиться с руководствами по [мониторингу](/cloud/get-started/cloud/resource-tour#monitoring), [безопасности](/cloud/get-started/cloud/resource-tour#security) и [оптимизации затрат](/cloud/get-started/cloud/resource-tour#cost-optimization). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md index e1f6dc345b5..7e699eb5228 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md @@ -16,7 +16,6 @@ import step_5 from '@site/static/images/cloud/guides/sql_console/service_level_a import step_6 from '@site/static/images/cloud/guides/sql_console/service_level_access/6_service_settings.png' import step_7 from '@site/static/images/cloud/guides/sql_console/service_level_access/7_service_settings.png' - # Настройка назначений ролей для SQL-консоли {#configuring-sql-console-role-assignments} > В этом руководстве описано, как настроить назначения ролей для SQL-консоли, которые определяют глобальные права доступа к консоли и функции, доступные пользователю в консоли Cloud. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md index bc0a20548b5..86746e8437e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md @@ -16,7 +16,6 @@ import user_grant_permissions_options from '@site/static/images/cloud/security/c Пользователи SQL-консоли создаются для каждого сеанса и аутентифицируются с использованием X.509-сертификатов, которые автоматически обновляются. Пользователь удаляется при завершении сеанса. При формировании списков доступа для аудита перейдите на вкладку Settings для соответствующего сервиса в консоли и учитывайте доступ через SQL-консоль помимо пользователей, уже существующих в базе данных. Если настроены пользовательские роли, доступ пользователя отражается в роли, имя которой оканчивается на имя пользователя. - ## Пользователи и роли консоли SQL {#sql-console-users-and-roles} Базовые роли консоли SQL могут быть назначены пользователям с правами Service Read Only и Service Admin. Для получения дополнительной информации см. раздел [Manage SQL Console Role Assignments](/cloud/guides/sql-console/manage-sql-console-role-assignments). В этом руководстве показано, как создать настраиваемую роль для пользователя консоли SQL. @@ -52,8 +51,6 @@ GRANT database_developer TO `sql-console-role:my.user@domain.com`; - - ## Аутентификация в базе данных {#database-authentication} ### Идентификатор пользователя базы данных и пароль {#database-user-id--password} @@ -80,7 +77,6 @@ CREATE USER userName IDENTIFIED WITH sha256_hash BY 'hash'; Подробное пошаговое руководство с примерами см. в статье [How to connect to ClickHouse Cloud using SSH keys](/knowledgebase/how-to-connect-to-ch-cloud-using-ssh-keys) в нашей базе знаний. - ## Права доступа к базе данных {#database-permissions} Настройте следующие параметры в сервисах и базах данных с помощью SQL-оператора [GRANT](/sql-reference/statements/grant). @@ -161,7 +157,6 @@ GRANT default_role to userID; e. Нажмите на ссылку с числом пользователей, имеющих доступ к базе данных `There are # users with access to this service.`, чтобы просмотреть список пользователей. - ## Пользователи warehouse {#warehouse-users} Пользователи warehouse являются общими для всех сервисов в пределах одного и того же warehouse. Для получения дополнительной информации ознакомьтесь с разделом [контроль доступа к warehouse](/cloud/reference/warehouses#access-controls). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md index f58ccef59de..dd57d2b24cf 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md @@ -15,7 +15,6 @@ import samlAzureApp from '@site/static/images/cloud/security/saml-azure-app.png' import samlAzureClaims from '@site/static/images/cloud/security/saml-azure-claims.png'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - # Настройка SAML SSO {#saml-sso-setup} @@ -24,16 +23,12 @@ ClickHouse Cloud поддерживает единый вход (SSO) с исп В настоящее время мы поддерживаем SSO, инициируемый со стороны поставщика услуг (service provider-initiated), несколько организаций с использованием отдельных подключений и just-in-time‑подготовку учётных записей (provisioning). Мы пока не поддерживаем систему управления идентификацией между доменами (SCIM) или сопоставление атрибутов. - - ## Прежде чем начать {#before-you-begin} Вам потребуются права администратора в вашем IdP и роль **Admin** в вашей организации ClickHouse Cloud. После настройки подключения в вашем IdP свяжитесь с нами, предоставив информацию, указанную в процедуре ниже, чтобы завершить процесс. Мы рекомендуем, помимо SAML-подключения, настроить **прямую ссылку на вашу организацию**, чтобы упростить процесс входа. Каждый IdP реализует это по-своему. Ниже описано, как сделать это для вашего IdP. - - ## Как настроить ваш IdP {#how-to-configure-your-idp} ### Шаги {#steps} @@ -148,8 +143,6 @@ ClickHouse Cloud поддерживает единый вход (SSO) с исп - -
{" "} @@ -257,7 +250,6 @@ ClickHouse Cloud поддерживает единый вход (SSO) с исп | App attributes | email | 12. Нажмите **Finish**. - 14. Чтобы включить приложение, нажмите **OFF** для всех и измените настройку на **ON** для всех. Доступ также можно ограничить группами или организационными подразделениями, выбрав соответствующие параметры в левой части экрана.
@@ -342,7 +334,6 @@ Azure (Microsoft) SAML также может называться Azure Active D - ## Как это работает {#how-it-works} ### Управление пользователями с помощью SAML SSO {#user-management-with-saml-sso} @@ -357,8 +348,6 @@ Azure (Microsoft) SAML также может называться Azure Active D ClickHouse Cloud поддерживает мультиорганизационное SSO, предоставляя отдельное подключение для каждой организации. Используйте прямую ссылку (`https://console.clickhouse.cloud/?connection={organizationid}`) для входа в каждую соответствующую организацию. Перед входом в другую организацию обязательно выйдите из текущей. - - ## Дополнительная информация {#additional-information} Безопасность — наш главный приоритет, когда речь идет об аутентификации. По этой причине при реализации SSO мы приняли несколько решений, о которых вам необходимо знать. @@ -367,8 +356,6 @@ ClickHouse Cloud поддерживает мультиорганизационн - **Мы не связываем автоматически учетные записи с SSO и без него.** В вашем списке пользователей ClickHouse вы можете видеть несколько учетных записей для одних и тех же пользователей, даже если они используют один и тот же адрес электронной почты. - - ## Устранение распространённых проблем {#troubleshooting-common-issues} | Ошибка | Причина | Решение | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md index 6c97cb9e13c..3eb2bb975f1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md @@ -11,7 +11,6 @@ import Image from '@theme/IdealImage'; import ip_filtering_after_provisioning from '@site/static/images/cloud/security/ip-filtering-after-provisioning.png'; import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filter-add-single-ip.png'; - ## Настройка IP-фильтров {#setting-ip-filters} Списки IP-доступа фильтруют трафик к сервисам ClickHouse или по API-ключам, указывая, с каких исходных адресов разрешено подключение. Эти списки настраиваются отдельно для каждого сервиса и для каждого API-ключа. Списки можно задать при создании сервиса или API-ключа, а также изменить позднее. @@ -20,16 +19,12 @@ import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filte Если вы пропустите создание списка IP-доступа для сервиса ClickHouse Cloud, к этому сервису не будет разрешён никакой трафик. Если для списков IP-доступа сервисов ClickHouse установлено значение `Allow from anywhere`, ваш сервис может периодически переводиться из неактивного состояния в активное интернет-сканерами и краулерами, которые ищут публичные IP-адреса, что может привести к незначительным непредвиденным расходам. ::: - - ## Подготовка {#prepare} Прежде чем начать, соберите IP-адреса или диапазоны, которые необходимо добавить в список доступа. Учтите удалённых сотрудников, места дежурств, VPN и т.п. Интерфейс управления списком доступа по IP принимает как отдельные адреса, так и записи в нотации CIDR. Нотация Classless Inter-domain Routing (CIDR) позволяет указывать диапазоны IP-адресов, меньшие, чем традиционные размеры масок подсетей классов A, B или C (/8, /16 или /24). [ARIN](https://account.arin.net/public/cidrCalculator) и ряд других организаций предоставляют калькуляторы CIDR при необходимости. Если вы хотите получить дополнительную информацию о нотации CIDR, ознакомьтесь с RFC [Classless Inter-domain Routing (CIDR)](https://www.rfc-editor.org/rfc/rfc4632.html). - - ## Создание или изменение списка доступа по IP-адресам {#create-or-modify-an-ip-access-list} :::note Применимо только к подключениям вне PrivateLink @@ -90,8 +85,6 @@ import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filte Чтобы применить внесённые изменения, нажмите **Save**. - - ## Проверка {#verification} После создания фильтра убедитесь, что можно подключиться к сервису из разрешённого диапазона, и что подключения из‑вне этого диапазона блокируются. Для проверки можно использовать простую команду `curl`: @@ -118,7 +111,6 @@ curl https://.clickhouse.cloud:8443 Ok. ``` - ## Ограничения {#limitations} - Списки доступа по IP-адресам на данный момент поддерживают только IPv4 diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md index 03ab2a836b2..59dfd8c15ca 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md @@ -18,7 +18,6 @@ import pe_remove_private_endpoint from '@site/static/images/cloud/security/pe-re import aws_private_link_pe_filters from '@site/static/images/cloud/security/aws-privatelink-pe-filters.png'; import aws_private_link_ped_nsname from '@site/static/images/cloud/security/aws-privatelink-pe-dns-name.png'; - # AWS PrivateLink {#aws-privatelink} @@ -69,14 +68,10 @@ ClickHouse Cloud поддерживает [межрегиональный Privat Примеры Terraform см. [здесь](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/). - - ## Важные замечания {#considerations} ClickHouse пытается группировать ваши сервисы, чтобы повторно использовать одну и ту же опубликованную [конечную точку сервиса](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html#endpoint-service-overview) в пределах региона AWS. Однако такая группировка не гарантируется, особенно если вы распределяете свои сервисы между несколькими организациями ClickHouse. Если у вас уже настроен PrivateLink для других сервисов в вашей организации ClickHouse, во многих случаях вы можете пропустить большинство шагов благодаря такой группировке и перейти сразу к финальному шагу: добавьте «Endpoint ID» ClickHouse в список разрешённых для сервиса ClickHouse. - - ## Предварительные требования для этого процесса {#prerequisites} Перед началом вам потребуется: @@ -84,8 +79,6 @@ ClickHouse пытается группировать ваши сервисы, ч 1. Ваша учётная запись AWS. 1. [API-ключ ClickHouse](/cloud/manage/openapi) с необходимыми правами для создания и управления частными конечными точками на стороне ClickHouse. - - ## Шаги {#steps} Следуйте этим шагам, чтобы подключить сервисы ClickHouse Cloud через AWS PrivateLink. @@ -178,7 +171,6 @@ jq .result #### Вариант 2: AWS CloudFormation {#option-2-aws-cloudformation} - Далее необходимо создать VPC Endpoint, используя `Service name`console или `endpointServiceId`API, полученные на шаге [Obtain Endpoint "Service name" ](#obtain-endpoint-service-info). Убедитесь, что вы используете соответствующие идентификаторы подсетей (subnet IDs), группы безопасности (security groups) и идентификатор VPC (VPC ID). @@ -281,7 +273,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -d @pl_config.json | jq ``` - Чтобы удалить идентификатор конечной точки из списка разрешённых конечных точек: ```bash @@ -343,7 +334,6 @@ jq .result В этом примере соединение по имени хоста со значением `privateDnsHostname` будет маршрутизировано через PrivateLink, а соединение по имени хоста со значением `endpointServiceId` — через интернет. - ## Устранение неполадок {#troubleshooting} ### Несколько PrivateLink в одном регионе {#multiple-privatelinks-in-one-region} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md index 7daf5a64401..f62a92663a3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md @@ -21,7 +21,6 @@ import gcp_pe_remove_private_endpoint from '@site/static/images/cloud/security/g import gcp_privatelink_pe_filters from '@site/static/images/cloud/security/gcp-privatelink-pe-filters.png'; import gcp_privatelink_pe_dns from '@site/static/images/cloud/security/gcp-privatelink-pe-dns.png'; - # Private Service Connect {#private-service-connect} @@ -50,16 +49,12 @@ Private Service Connect (PSC) — это сетевая возможность G 1. Добавьте идентификатор конечной точки (Endpoint ID) в сервис ClickHouse Cloud. 1. Добавьте идентификатор конечной точки (Endpoint ID) в список разрешённых подключений (allow list) сервиса ClickHouse. - - ## Внимание {#attention} ClickHouse пытается группировать ваши сервисы, чтобы повторно использовать один и тот же опубликованный [PSC endpoint](https://cloud.google.com/vpc/docs/private-service-connect) в пределах региона GCP. Однако такая группировка не гарантируется, особенно если вы распределяете свои сервисы между несколькими организациями ClickHouse. Если у вас уже настроен PSC для других сервисов в вашей организации ClickHouse, вы часто можете пропустить большинство шагов благодаря этой группировке и перейти непосредственно к заключительному шагу: [добавить «Endpoint ID» в список разрешённых сервисов ClickHouse](#add-endpoint-id-to-services-allow-list). Примеры Terraform доступны [здесь](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/). - - ## Прежде чем начать {#before-you-get-started} :::note @@ -98,7 +93,6 @@ jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\ * Вы можете [создать новый ключ](/cloud/manage/openapi) или использовать уже существующий. ::: - ## Получите подключение сервиса GCP (service attachment) и имя DNS для Private Service Connect {#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect} ### Вариант 1: консоль ClickHouse Cloud {#option-1-clickhouse-cloud-console} @@ -125,7 +119,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud Запомните значения `endpointServiceId` и `privateDnsHostname`. Они понадобятся на следующих шагах. - ## Создание конечной точки сервиса {#create-service-endpoint} :::important @@ -218,7 +211,6 @@ output "psc_connection_id" { используйте значение `endpointServiceId`API или `Service name`console из шага [Obtain GCP service attachment for Private Service Connect](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect) ::: - ## Задайте приватное DNS-имя для конечной точки {#set-private-dns-name-for-endpoint} :::note @@ -227,8 +219,6 @@ output "psc_connection_id" { Необходимо указать DNS-имя, полученное на шаге [Obtain GCP service attachment for Private Service Connect](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect), на IP-адрес конечной точки GCP Private Service Connect. Это гарантирует, что сервисы и компоненты внутри вашей VPC/сети смогут корректно разрешать это имя. - - ## Добавление Endpoint ID в организацию ClickHouse Cloud {#add-endpoint-id-to-clickhouse-cloud-organization} ### Вариант 1: консоль ClickHouse Cloud {#option-1-clickhouse-cloud-console-1} @@ -288,7 +278,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## Добавить «Endpoint ID» в список разрешённых для сервиса ClickHouse {#add-endpoint-id-to-services-allow-list} Необходимо добавить Endpoint ID в список разрешённых для каждого экземпляра сервиса, который должен быть доступен через Private Service Connect. @@ -343,7 +332,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## Доступ к экземпляру с использованием Private Service Connect {#accessing-instance-using-private-service-connect} Каждый сервис с включённым Private Link имеет две конечные точки: публичную и приватную. Для подключения по Private Link необходимо использовать приватную конечную точку `privateDnsHostname`, значение которой берётся из раздела [Obtain GCP service attachment for Private Service Connect](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect). @@ -371,7 +359,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud В этом примере подключение к хосту `xxxxxxx.yy-xxxxN.p.gcp.clickhouse.cloud` будет выполняться через Private Service Connect, тогда как `xxxxxxx.yy-xxxxN.gcp.clickhouse.cloud` будет использовать соединение через интернет. - ## Устранение неполадок {#troubleshooting} ### Проверка конфигурации DNS {#test-dns-setup} @@ -404,7 +391,6 @@ DNS_NAME — используйте `privateDnsHostname` из шага [Obta openssl s_client -connect ${DNS_NAME}:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -447,7 +433,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: appl Чтобы настроить такое подключение, задайте правила межсетевого экрана (firewall) в вашем GCP VPC так, чтобы разрешить подключения из ClickHouse Cloud к вашему внутреннему/приватному сервису базы данных. Ознакомьтесь со [стандартными исходящими (egress) IP-адресами для регионов ClickHouse Cloud](/manage/data-sources/cloud-endpoints-api), а также с [доступными статическими IP-адресами](https://api.clickhouse.cloud/static-ips.json). - ## Дополнительная информация {#more-information} Для получения более подробной информации см. [cloud.google.com/vpc/docs/configure-private-service-connect-services](https://cloud.google.com/vpc/docs/configure-private-service-connect-services). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md index e78a2b3261d..b38bba16657 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md @@ -27,7 +27,6 @@ import azure_pe_remove_private_endpoint from '@site/static/images/cloud/security import azure_privatelink_pe_filter from '@site/static/images/cloud/security/azure-privatelink-pe-filter.png'; import azure_privatelink_pe_dns from '@site/static/images/cloud/security/azure-privatelink-pe-dns.png'; - # Azure Private Link {#azure-private-link} @@ -54,16 +53,12 @@ Azure поддерживает межрегиональное подключен В ClickHouse Cloud для Azure PrivateLink фильтрация была переведена с использования resourceGUID на фильтры по Resource ID. Вы всё ещё можете использовать resourceGUID, так как он сохраняет обратную совместимость, но мы рекомендуем перейти на фильтры по Resource ID. Для миграции просто создайте новую конечную точку (endpoint) с использованием Resource ID, привяжите её к сервису и удалите старую конечную точку, основанную на resourceGUID. ::: - - ## Внимание {#attention} ClickHouse пытается сгруппировать ваши сервисы, чтобы повторно использовать одну и ту же опубликованную [службу Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview) в пределах региона Azure. Однако такая группировка не гарантируется, особенно если вы распределяете свои сервисы между несколькими организациями ClickHouse. Если у вас уже настроен Private Link для других сервисов в вашей организации ClickHouse, вы в большинстве случаев можете пропустить основную часть шагов благодаря этой группировке и перейти сразу к последнему шагу: [добавить идентификатор ресурса Private Endpoint в список разрешённых для ваших сервисов](#add-private-endpoint-id-to-services-allow-list). Примеры Terraform можно найти в репозитории [Terraform Provider для ClickHouse](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/). - - ## Получение псевдонима подключения Azure для Private Link {#obtain-azure-connection-alias-for-private-link} ### Вариант 1: консоль ClickHouse Cloud {#option-1-clickhouse-cloud-console} @@ -109,7 +104,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud Сохраните значение `endpointServiceId`. Оно потребуется на следующем шаге. - ## Создание приватной конечной точки в Azure {#create-private-endpoint-in-azure} :::important @@ -216,7 +210,6 @@ resource "azurerm_private_endpoint" "example_clickhouse_cloud" { Идентификатор ресурса частной конечной точки доступен в портале Azure. Откройте частную конечную точку, созданную на предыдущем шаге, и нажмите **JSON View**: - В разделе «Свойства» найдите поле `id` и скопируйте его значение: @@ -229,8 +222,6 @@ resource "azurerm_private_endpoint" "example_clickhouse_cloud" { - - ## Настройка DNS для Private Link {#setting-up-dns-for-private-link} Вам необходимо создать зону Private DNS (`${location_code}.privatelink.azure.clickhouse.cloud`) и подключить её к вашей виртуальной сети (VNet), чтобы получить доступ к ресурсам через Private Link. @@ -310,7 +301,6 @@ nslookup xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud. Адрес: 10.0.0.4 ``` - ## Добавление идентификатора ресурса Private Endpoint в организацию ClickHouse Cloud {#add-the-private-endpoint-id-to-your-clickhouse-cloud-organization} ### Вариант 1: консоль ClickHouse Cloud {#option-1-clickhouse-cloud-console-1} @@ -379,7 +369,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## Добавьте идентификатор ресурса частной конечной точки (Private Endpoint Resource ID) в allow list вашего сервиса (или сервисов) {#add-private-endpoint-id-to-services-allow-list} По умолчанию сервис ClickHouse Cloud недоступен по соединению Private Link, даже если соединение Private Link одобрено и установлено. Необходимо явно добавить идентификатор ресурса частной конечной точки (Private Endpoint Resource ID) для каждого сервиса, который должен быть доступен через Private Link. @@ -443,7 +432,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## Доступ к вашему сервису ClickHouse Cloud с использованием Private Link {#access-your-clickhouse-cloud-service-using-private-link} Каждый сервис с включённым Private Link имеет публичную и приватную конечную точку. Для подключения через Private Link необходимо использовать приватную конечную точку — это `privateDnsHostname`API или `DNS name`console, полученные на шаге [Получение псевдонима подключения Azure для Private Link](#obtain-azure-connection-alias-for-private-link). @@ -486,7 +474,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud Используйте `privateDnsHostname` для подключения к вашей службе ClickHouse Cloud через Private Link. - ## Устранение неполадок {#troubleshooting} ### Проверка настроек DNS {#test-dns-setup} @@ -525,7 +512,6 @@ OpenSSL должен успешно подключиться (см. CONNECTED в openssl s_client -connect abcd.westus3.privatelink.azure.clickhouse.cloud:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -564,7 +550,6 @@ INSTANCE_ID=<идентификатор инстанса> curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | jq .result.privateEndpointIds ``` - ## Дополнительная информация {#more-information} Для получения дополнительной информации об Azure Private Link посетите [azure.microsoft.com/en-us/products/private-link](https://azure.microsoft.com/en-us/products/private-link). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md index 89a9e95cac2..f3fc18e333c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md @@ -11,11 +11,8 @@ import Image from '@theme/IdealImage'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' import cmek_performance from '@site/static/images/_snippets/cmek-performance.png'; - # Шифрование данных {#data-encryption} - - ## Шифрование на уровне хранилища {#storage-encryption} ClickHouse Cloud по умолчанию использует шифрование данных в состоянии покоя с использованием управляемых облачным провайдером ключей AES‑256. Для получения дополнительной информации см.: @@ -23,8 +20,6 @@ ClickHouse Cloud по умолчанию использует шифровани - [Шифрование данных в состоянии покоя по умолчанию в GCP](https://cloud.google.com/docs/security/encryption/default-encryption) - [Шифрование данных в состоянии покоя в службе хранилища Azure](https://learn.microsoft.com/en-us/azure/storage/common/storage-service-encryption) - - ## Шифрование на уровне базы данных {#database-encryption} @@ -111,16 +106,12 @@ TDE необходимо включить при создании сервиса #### Служба опроса ключей KMS {#kms-key-poller} - - При использовании CMEK валидность предоставленного ключа KMS проверяется каждые 10 минут. Если ключ KMS становится недоступным, служба ClickHouse будет остановлена. Чтобы возобновить работу службы, восстановите доступ к ключу KMS, следуя инструкциям в данном руководстве, а затем перезапустите службу. ### Резервное копирование и восстановление {#backup-and-restore} Резервные копии шифруются тем же ключом, что и связанная служба. При восстановлении зашифрованной резервной копии создаётся зашифрованный экземпляр, который использует тот же ключ KMS, что и исходный экземпляр. При необходимости вы можете выполнить ротацию ключа KMS после восстановления; дополнительные сведения см. в разделе [Ротация ключей](#key-rotation). - - ## Производительность {#performance} Шифрование базы данных использует встроенную в ClickHouse [виртуальную файловую систему для шифрования данных](/operations/storing-data#encrypted-virtual-file-system) для шифрования и защиты ваших данных. Для этой функции используется алгоритм `AES_256_CTR`, который, как ожидается, приведёт к снижению производительности на 5–15 % в зависимости от нагрузки: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md index d26da1fc8c4..42d17c8c086 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md @@ -7,8 +7,6 @@ doc_type: 'guide' keywords: ['ведение журнала аудита', 'журналы базы данных', 'соответствие требованиям', 'безопасность', 'мониторинг'] --- - - # Журнал аудита базы данных {#database-audit-log} ClickHouse по умолчанию предоставляет журналы аудита базы данных. На этой странице основное внимание уделяется журналам, связанным с безопасностью. Для получения дополнительной информации о данных, записываемых системой, см. документацию по [системным таблицам](/operations/system-tables/overview). @@ -17,8 +15,6 @@ ClickHouse по умолчанию предоставляет журналы а Информация записывается непосредственно в системные таблицы и по умолчанию хранится в течение 30 дней. Этот период может быть дольше или короче и зависит от частоты слияний в системе. Пользователи могут предпринять дополнительные меры для более длительного хранения логов или экспорта логов в систему управления информацией и событиями безопасности (SIEM) для долгосрочного хранения. Подробности ниже. ::: - - ## Журналы, связанные с безопасностью {#security-relevant-logs} ClickHouse записывает события базы данных, связанные с безопасностью, в первую очередь в журналы сеансов и запросов. @@ -53,13 +49,10 @@ FROM clusterAllReplicas('default', system.query_log) WHERE user=’compromised_account’ ``` - ## Сохранение данных журналов в сервисах {#reatining-log-data-within-services} Клиенты, которым требуется более долгосрочное хранение или повышенная надежность журналов, могут использовать материализованные представления для достижения этих целей. Дополнительные сведения о том, что такое материализованные представления, каковы их преимущества и как их реализовать, см. в наших видеоматериалах и документации по [материализованным представлениям](/materialized-views). - - ## Экспорт журналов {#exporting-logs} Системные журналы можно записывать или экспортировать в хранилище в разных форматах, совместимых с SIEM-системами. Для получения дополнительной информации ознакомьтесь с нашей документацией по [табличным функциям](/sql-reference/table-functions). Наиболее распространённые способы: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md index 7b77b82d765..8a7943806bd 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md @@ -7,20 +7,14 @@ doc_type: 'guide' keywords: ['byoc', 'security', 'playbook', 'best practices', 'compliance'] --- - - # Руководство по безопасности BYOC {#byoc-security-playbook} ClickHouse предоставляет услугу Bring Your Own Cloud (BYOC) в рамках модели совместной ответственности в области безопасности, описание которой можно загрузить из нашего Trust Center по адресу https://trust.clickhouse.com. Приведённая ниже информация предоставляется клиентам BYOC в качестве примеров того, как можно выявлять потенциальные события, связанные с безопасностью. Клиентам следует учитывать эту информацию в контексте собственной программы безопасности, чтобы определить, будут ли полезны дополнительные механизмы обнаружения и оповещения. - - ## Потенциально скомпрометированные учетные данные ClickHouse {#compromised-clickhouse-credentials} См. документацию по [журналу аудита базы данных](/cloud/security/audit-logging/database-audit-log) для примеров запросов по обнаружению атак с использованием учетных данных и запросов для расследования вредоносной активности. - - ## Атака отказа в обслуживании на уровне приложения {#application-layer-dos-attack} Существует множество методов проведения атаки отказа в обслуживании (DoS). Если атака нацелена на вывод из строя экземпляра ClickHouse с помощью специальной полезной нагрузки, восстановите систему в рабочее состояние или перезагрузите её и ограничьте доступ, чтобы вернуть контроль. Используйте следующий запрос для просмотра [system.crash_log](/operations/system-tables/crash_log) и получения дополнительной информации об атаке. @@ -30,15 +24,12 @@ SELECT * FROM clusterAllReplicas('default',system.crash_log) ``` - ## Скомпрометированные роли AWS, созданные ClickHouse {#compromised-clickhouse-created-aws-roles} ClickHouse использует предварительно созданные роли для обеспечения работы системных функций. В этом разделе предполагается, что заказчик использует AWS с CloudTrail и имеет доступ к журналам CloudTrail. Если инцидент может быть результатом скомпрометированной роли, просмотрите действия в CloudTrail и CloudWatch, связанные с ролями и действиями ClickHouse IAM. Обратитесь к стеку [CloudFormation](/cloud/reference/byoc/onboarding/aws#cloudformation-iam-roles) или модулю Terraform, предоставленным в рамках первоначальной настройки, чтобы получить список ролей IAM. - - ## Несанкционированный доступ к кластеру EKS {#unauthorized-access-eks-cluster} ClickHouse BYOC работает внутри EKS. В этом разделе предполагается, что клиент использует в AWS сервисы CloudTrail и CloudWatch и имеет доступ к журналам. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md index 3a7a6da8ef5..b1826567fb5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md @@ -15,7 +15,6 @@ import rta_3 from '@site/static/images/cloud/onboard/discover/use_cases/3_rta.pn - - ## ClickHouse Cloud в качестве назначения {#clickhouse-cloud-destination} См. официальную документацию на сайте Fivetran: @@ -51,8 +46,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [Обзор назначения ClickHouse](https://fivetran.com/docs/destinations/clickhouse) - [Руководство по настройке назначения ClickHouse](https://fivetran.com/docs/destinations/clickhouse/setup-guide) - - ## Свяжитесь с нами {#contact-us} Если у вас есть вопросы или предложения по новой функциональности, пожалуйста, создайте [запрос в службу поддержки](/about-us/support). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md index 1398184be7d..b6ad72f6d2b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md @@ -30,7 +30,6 @@ import nifi14 from '@site/static/images/integrations/data-ingestion/etl-tools/ni import nifi15 from '@site/static/images/integrations/data-ingestion/etl-tools/nifi_15.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Apache NiFi к ClickHouse {#connect-apache-nifi-to-clickhouse} @@ -42,27 +41,20 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - ## Соберите сведения о подключении {#1-gather-your-connection-details} - - ## Загрузите и запустите Apache NiFi {#2-download-and-run-apache-nifi} Для нового развертывания скачайте двоичный файл с https://nifi.apache.org/download.html и запустите NiFi командой `./bin/nifi.sh start` - - ## Загрузите драйвер ClickHouse JDBC {#3-download-the-clickhouse-jdbc-driver} 1. Перейдите на страницу релизов драйвера ClickHouse JDBC на GitHub и найдите последнюю версию JDBC-драйвера 2. В выбранной версии релиза нажмите «Show all xx assets» и найдите JAR-файл, содержащий ключевое слово `shaded` или `all`, например `clickhouse-jdbc-0.5.0-all.jar` 3. Поместите JAR-файл в каталог, доступный Apache NiFi, и запомните абсолютный путь к нему - - ## Добавьте службу контроллера `DBCPConnectionPool` и настройте её свойства {#4-add-dbcpconnectionpool-controller-service-and-configure-its-properties} 1. Чтобы настроить Controller Service в Apache NiFi, перейдите на страницу NiFi Flow Configuration, нажав кнопку с иконкой шестерёнки @@ -107,8 +99,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## Чтение из таблицы с помощью процессора `ExecuteSQL` {#5-read-from-a-table-using-the-executesql-processor} 1. Добавьте процессор `ExecuteSQL` вместе с соответствующими входящими и последующими процессорами @@ -134,8 +124,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## Запись в таблицу с использованием процессоров `MergeRecord` и `PutDatabaseRecord` {#6-write-to-a-table-using-mergerecord-and-putdatabaserecord-processor} 1. Для записи нескольких строк в одной операции вставки необходимо сначала объединить несколько записей в одну. Это можно сделать с помощью процессора `MergeRecord` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md index 302eb98e87d..1d4badd8122 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md @@ -13,7 +13,6 @@ import Image from '@theme/IdealImage'; import GCS_examine_bucket_1 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-1.png'; import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-2.png'; - # Интеграция Google Cloud Storage с ClickHouse {#integrate-google-cloud-storage-with-clickhouse} :::note @@ -22,8 +21,6 @@ import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestio В ClickHouse GCS рассматривается как привлекательное решение для хранения данных для пользователей, стремящихся разделить хранение и вычисления. Для этого реализована поддержка использования GCS в качестве хранилища для движка MergeTree. Это позволит пользователям использовать масштабируемость и экономические преимущества GCS, а также производительность вставки и выполнения запросов движка MergeTree. - - ## MergeTree с хранилищем в GCS {#gcs-backed-mergetree} ### Создание диска {#creating-a-disk} @@ -140,7 +137,6 @@ import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestio Полный список настроек, относящихся к этому описанию диска, можно найти [здесь](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3). - ### Создание таблицы {#creating-a-table} Если вы настроили диск на использование бакета с правом записи, вы сможете создать таблицу, как в примере ниже. Ради краткости мы используем подмножество столбцов набора данных NYC taxi и отправляем данные напрямую в таблицу на базе GCS: @@ -189,7 +185,6 @@ SELECT passenger_count, avg(tip_amount) AS avg_tip, avg(total_amount) AS avg_amo Дополнительные сведения о настройке потоков см. в разделе [Оптимизация производительности](../s3/index.md#s3-optimizing-performance). - ## Использование Google Cloud Storage (GCS) {#gcs-multi-region} :::tip @@ -257,8 +252,6 @@ ClickHouse Keeper для работы требует двух узлов, поэ - Скопируйте файл на место (`/etc/clickhouse-keeper/keeper_config.xml` на каждый из серверов Keeper) - Отредактируйте `server_id` на каждой машине в соответствии с ее номером записи в `raft_configuration` - - ```xml title=/etc/clickhouse-keeper/keeper_config.xml @@ -352,7 +345,6 @@ ClickHouse Keeper для работы требует двух узлов, поэ * Отредактируйте файл, указав ваши имена хостов, и убедитесь, что они корректно разрешаются с узлов серверов ClickHouse - ```xml title=/etc/clickhouse-server/config.d/remote-servers.xml @@ -452,7 +444,6 @@ sudo systemctl status clickhouse-keeper Отправляйте команды в ClickHouse Keeper с помощью `netcat`. Например, команда `mntr` возвращает состояние кластера ClickHouse Keeper. Если выполнить эту команду на каждом узле Keeper, вы увидите, что один из них является лидером, а два других — ведомыми: - ```bash echo mntr | nc localhost 9181 ``` @@ -561,7 +552,6 @@ is_broken: 0 cache_path: ``` - 3 строки в наборе. Прошло: 0,002 сек. ```` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md index 27254572a12..78753c990c0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md @@ -10,7 +10,6 @@ keywords: ['Google Dataflow ClickHouse', 'Dataflow ClickHouse integration', 'Apa import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Интеграция Google Dataflow с ClickHouse {#integrating-google-dataflow-with-clickhouse} @@ -22,8 +21,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [Java runner](#1-java-runner) - [Готовые шаблоны](#2-predefined-templates) - - ## Java runner {#1-java-runner} [Java runner](./java-runner) позволяет реализовывать пользовательские конвейеры Dataflow с использованием интеграции Apache Beam SDK `ClickHouseIO`. Такой подход обеспечивает полную гибкость и контроль над логикой конвейера, позволяя адаптировать ETL‑процесс под конкретные требования. Однако этот вариант требует знаний программирования на Java и знакомства с фреймворком Apache Beam. @@ -33,8 +30,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - Оптимален для сложных или нетривиальных сценариев использования. - Требует написания кода и понимания API Beam. - - ## Предопределённые шаблоны {#2-predefined-templates} ClickHouse предлагает [предопределённые шаблоны](./templates), разработанные для конкретных сценариев использования, например импорта данных из BigQuery в ClickHouse. Эти шаблоны готовы к использованию и упрощают процесс интеграции, что делает их отличным выбором для пользователей, предпочитающих решение без написания кода. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md index 83e9ac6257b..14a7bfb9adf 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md @@ -10,15 +10,12 @@ keywords: ['Dataflow Java Runner', 'Google Dataflow ClickHouse', 'Apache Beam Ja import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Java-раннер Dataflow {#dataflow-java-runner} Java-раннер Dataflow позволяет выполнять пользовательские конвейеры Apache Beam в службе Dataflow в Google Cloud. Такой подход обеспечивает максимальную гибкость и хорошо подходит для сложных ETL‑процессов. - - ## Как это работает {#how-it-works} 1. **Реализация конвейера** diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md index b688aea96f5..05127912bdc 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md @@ -10,30 +10,23 @@ keywords: ['google dataflow', 'gcp', 'конвейер данных', 'шабл import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Шаблоны Google Dataflow {#google-dataflow-templates} Шаблоны Google Dataflow предоставляют удобный способ запускать готовые к использованию конвейеры обработки данных без необходимости писать собственный код. Эти шаблоны предназначены для упрощения распространённых задач обработки данных и построены на основе [Apache Beam](https://beam.apache.org/), используя коннекторы, такие как `ClickHouseIO`, для бесшовной интеграции с базами данных ClickHouse. Запуская эти шаблоны на Google Dataflow, вы можете обеспечить высокомасштабируемую распределённую обработку данных при минимальных затратах усилий. - - ## Зачем использовать шаблоны Dataflow? {#why-use-dataflow-templates} - **Простота использования**: Шаблоны устраняют необходимость писать код, предоставляя предварительно настроенные конвейеры обработки данных, адаптированные под конкретные сценарии. - **Масштабируемость**: Dataflow обеспечивает эффективное масштабирование вашего конвейера, обрабатывая большие объёмы данных за счёт распределённой обработки. - **Экономичность**: Вы платите только за фактически потреблённые ресурсы и можете оптимизировать затраты на выполнение конвейера. - - ## Как запускать шаблоны Dataflow {#how-to-run-dataflow-templates} На данный момент официальный шаблон ClickHouse доступен через консоль Google Cloud, CLI или REST API Dataflow. Подробные пошаговые инструкции см. в руководстве [Google Dataflow Run Pipeline From a Template Guide](https://cloud.google.com/dataflow/docs/templates/provided-templates). - - ## Список шаблонов ClickHouse {#list-of-clickhouse-templates} * [BigQuery в ClickHouse](./templates/bigquery-to-clickhouse) * [GCS в ClickHouse](https://github.com/ClickHouse/DataflowTemplates/issues/3) (скоро) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md index 3f3ef364b30..40338b3770a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md @@ -18,7 +18,6 @@ import dataflow_extended_template_form from '@site/static/images/integrations/da import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # Шаблон Dataflow BigQuery to ClickHouse {#dataflow-bigquery-to-clickhouse-template} Шаблон BigQuery to ClickHouse представляет собой пакетный конвейер обработки данных, который выполняет приём данных из таблицы BigQuery в таблицу ClickHouse. @@ -26,16 +25,12 @@ import TabItem from '@theme/TabItem'; - - ## Требования к конвейеру {#pipeline-requirements} * Исходная таблица BigQuery должна существовать. * Целевая таблица ClickHouse должна существовать. * Хост ClickHouse должен быть доступен с рабочих машин Dataflow. - - ## Параметры шаблона {#template-parameters}
@@ -60,14 +55,10 @@ import TabItem from '@theme/TabItem'; | `queryTempDataset` | Укажите существующий набор данных, в котором будет создана временная таблица для хранения результатов запроса. Например, `temp_dataset`. | | | | `KMSEncryptionKey` | При чтении из BigQuery с использованием источника-запроса используйте этот ключ Cloud KMS для шифрования всех создаваемых временных таблиц. Например, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. | | | - - :::note Значения по умолчанию для всех параметров `ClickHouseIO` описаны в разделе [коннектора Apache Beam `ClickHouseIO`](/integrations/apache-beam#clickhouseiowrite-parameters) ::: - - ## Схема исходных и целевых таблиц {#source-and-target-tables-schema} Чтобы эффективно загрузить набор данных BigQuery в ClickHouse, конвейер выполняет процесс вывода схемы столбцов, состоящий из следующих этапов: @@ -81,8 +72,6 @@ import TabItem from '@theme/TabItem'; При этом ваш набор данных BigQuery (таблица или результат запроса) должен иметь точно такие же имена столбцов, как и целевая таблица ClickHouse. ::: - - ## Отображение типов данных {#data-types-mapping} Типы BigQuery преобразуются на основе определения вашей таблицы ClickHouse. Поэтому в приведённой выше таблице перечислено @@ -98,8 +87,6 @@ import TabItem from '@theme/TabItem'; | [**Numeric - Integer Types**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**Integer Types**](../../../sql-reference/data-types/int-uint) | В BigQuery все типы Int (`INT`, `SMALLINT`, `INTEGER`, `BIGINT`, `TINYINT`, `BYTEINT`) являются синонимами типа `INT64`. Рекомендуется задать в ClickHouse корректный размер целочисленного типа, так как шаблон будет преобразовывать столбец на основе заданного типа столбца (`Int8`, `Int16`, `Int32`, `Int64`). Шаблон также будет преобразовывать беззнаковые целочисленные типы, если они используются в таблице ClickHouse (`UInt8`, `UInt16`, `UInt32`, `UInt64`). | | [**Numeric - Float Types**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**Float Types**](../../../sql-reference/data-types/float) | Поддерживаемые типы ClickHouse: `Float32` и `Float64`. | - - ## Запуск шаблона {#running-the-template} Шаблон BigQuery to ClickHouse доступен для выполнения через Google Cloud CLI. @@ -188,12 +175,8 @@ job: Перейдите на вкладку [Dataflow Jobs](https://console.cloud.google.com/dataflow/jobs) в Google Cloud Console, чтобы отслеживать статус задания. Там вы найдете подробную информацию о задании, включая прогресс и возможные ошибки: - - - - ## Устранение неполадок {#troubleshooting} ### Ошибка Memory limit (total) exceeded (code 241) {#code-241-dbexception-memory-limit-total-exceeded} @@ -203,8 +186,6 @@ job: * Увеличьте ресурсы экземпляра: обновите сервер ClickHouse до более мощного экземпляра с большим объёмом памяти, чтобы справляться с нагрузкой на обработку данных. * Уменьшите размер пакета: настройте размер пакета в конфигурации задания Dataflow так, чтобы отправлять меньшие порции данных в ClickHouse, снижая потребление памяти на пакет. Эти изменения помогают сбалансировать использование ресурсов во время ингестии данных. - - ## Исходный код шаблона {#template-source-code} Исходный код шаблона доступен в форке репозитория [DataflowTemplates](https://github.com/ClickHouse/DataflowTemplates) от ClickHouse. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md index 24d00e76ecb..0ddee54200f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import Image from '@theme/IdealImage'; - # Интеграция Confluent Cloud с ClickHouse {#integrating-confluent-cloud-with-clickhouse}
@@ -30,15 +29,11 @@ import Image from '@theme/IdealImage';
- - ## Предварительные требования {#prerequisites} Предполагается, что вы знакомы со следующим: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Confluent Cloud - - ## Официальный коннектор Kafka от ClickHouse для Confluent Cloud {#the-official-kafka-connector-from-clickhouse-with-confluent-cloud} #### Создание топика {#create-a-topic} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md index 6195318f331..aed29e83993 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md @@ -12,7 +12,6 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import AddCustomConnectorPlugin from '@site/static/images/integrations/data-ingestion/kafka/confluent/AddCustomConnectorPlugin.png'; - # Интеграция платформы Confluent с ClickHouse {#integrating-confluent-platform-with-clickhouse}
@@ -27,15 +26,11 @@ import AddCustomConnectorPlugin from '@site/static/images/integrations/data-inge
- - ## Предварительные требования {#prerequisites} Мы исходим из того, что вы знакомы со следующим: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Платформой Confluent и [пользовательскими коннекторами (Custom Connectors)](https://docs.confluent.io/cloud/current/connectors/bring-your-connector/overview.html). - - ## Официальный коннектор Kafka от ClickHouse для Confluent Platform {#the-official-kafka-connector-from-clickhouse-with-confluent-platform} ### Установка на Confluent Platform {#installing-on-confluent-platform} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md index ce81f40c017..e0bb991b8d7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md @@ -13,14 +13,10 @@ integration: - category: 'data_ingestion' --- - - # Интеграция Kafka с ClickHouse {#integrating-kafka-with-clickhouse} [Apache Kafka](https://kafka.apache.org/) — это распределённая платформа потоковой передачи событий с открытым исходным кодом, используемая тысячами компаний для высокопроизводительных конвейеров данных, потоковой аналитики, интеграции данных и критически важных для бизнеса приложений. ClickHouse предоставляет несколько способов **чтения из** и **записи в** Kafka и другие брокеры, совместимые с Kafka API (например, Redpanda, Amazon MSK). - - ## Доступные варианты {#available-options} Выбор подходящего варианта для вашего сценария использования зависит от нескольких факторов, включая тип развертывания ClickHouse, направление потока данных и эксплуатационные требования. @@ -91,8 +87,6 @@ Kafka Connect — это open source-фреймворк, который рабо #### Начало работы {#kafka-table-engine-getting-started} - - Чтобы начать работу с движком таблиц Kafka, см. [справочную документацию](./kafka-table-engine.md). ### Выбор варианта {#choosing-an-option} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md index 1395a258649..76b824b9d7c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md @@ -10,7 +10,6 @@ keywords: ['Приёмник ClickHouse Kafka Connect', 'коннектор Kafk import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # ClickHouse Kafka Connect Sink {#clickhouse-kafka-connect-sink} :::note @@ -92,7 +91,6 @@ schemas.enable=false Полная таблица параметров конфигурации: - | Property Name | Description | Default Value | |-------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | `hostname` (Required) | Имя хоста или IP-адрес сервера | N/A | @@ -124,8 +122,6 @@ schemas.enable=false ### Целевые таблицы {#target-tables} - - ClickHouse Connect Sink читает сообщения из топиков Kafka и записывает их в соответствующие таблицы. ClickHouse Connect Sink записывает данные в уже существующие таблицы. Пожалуйста, убедитесь, что целевая таблица с подходящей схемой создана в ClickHouse до начала вставки данных в неё. Для каждого топика требуется отдельная целевая таблица в ClickHouse. Имя целевой таблицы должно совпадать с именем исходного топика. @@ -205,7 +201,6 @@ ClickHouse Connect Sink читает сообщения из топиков Kafk Коннектор может потреблять данные из нескольких топиков. - ```json { "name": "clickhouse-connect", @@ -340,7 +335,6 @@ com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} * `byte-rate`: Средняя скорость отправки данных в байтах в секунду * `compression-rate`: Достигнутый коэффициент сжатия - **Метрики на уровне партиций:** - `records-sent-total`: Общее количество записей, отправленных в партицию - `bytes-sent-total`: Общее количество байт, отправленных в партицию @@ -430,8 +424,6 @@ com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} - Стандартные настройки коннектора уже удовлетворяют требованиям по пропускной способности - Ваш кластер ClickHouse без труда справляется с входящей нагрузкой - - #### Понимание потока данных {#understanding-the-data-flow} Перед началом настройки важно понять, как данные проходят через коннектор: @@ -467,24 +459,17 @@ Kafka Connect (фреймворк) выбирает сообщения из то Для оптимальной работы с ClickHouse ориентируйтесь на более крупные пакеты: - - ```properties # Увеличить количество записей за один опрос {#increase-the-number-of-records-per-poll} consumer.max.poll.records=5000 ``` - # Увеличить размер выборки раздела до 5 МБ {#increase-the-partition-fetch-size-5-mb} consumer.max.partition.fetch.bytes=5242880 - - # Необязательно: увеличьте минимальный размер получаемых данных, чтобы дожидаться большего объёма (1 МБ) {#optional-increase-minimum-fetch-size-to-wait-for-more-data-1-mb} consumer.fetch.min.bytes=1048576 - - # Необязательно: уменьшите время ожидания, если задержка критична {#optional-reduce-wait-time-if-latency-is-critical} consumer.fetch.max.wait.ms=300 @@ -581,7 +566,6 @@ consumer.fetch.max.wait.ms=300 При использовании `exactlyOnce=true` с асинхронными вставками: - ```json { "config": { @@ -685,7 +669,6 @@ SETTINGS **Распространённые проблемы с производительностью**: - | Симптом | Возможная причина | Решение | | --------------------------------- | ------------------------------------ | ------------------------------------------------------------------ | | Большое отставание consumer'а | Слишком маленькие батчи | Увеличьте `max.poll.records`, включите async inserts | @@ -783,7 +766,6 @@ SETTINGS * `UnknownHostException` — выбрасывается, когда не удаётся разрешить имя хоста. * `IOException` — выбрасывается, когда возникает проблема с сетью. - #### "Все мои данные пустые/нули" {#all-my-data-is-blankzeroes} Скорее всего, поля в ваших данных не соответствуют полям в таблице — это особенно часто встречается с CDC (фиксацией изменений данных) и форматом Debezium. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md index 6a808838d22..4da7513d3a7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md @@ -10,7 +10,6 @@ keywords: ['kafka', 'kafka connect', 'jdbc', 'интеграция', 'конве import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # JDBC connector {#jdbc-connector} :::note @@ -54,8 +53,6 @@ JDBC Connector распространяется под [Confluent Community Lice Следующие параметры важны для использования JDBC-коннектора с ClickHouse. Полный список параметров можно найти [здесь](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/index.html): - - * `_connection.url_` - должен иметь формат `jdbc:clickhouse://<clickhouse host>:<clickhouse http port>/<target database>` * `connection.user` - пользователь с правами записи в целевую базу данных * `table.name.format` - таблица ClickHouse, в которую выполняется вставка данных. Таблица должна уже существовать. @@ -84,8 +81,6 @@ JDBC Connector распространяется под [Confluent Community Lice Убедитесь, что таблица создана, удалив её, если она уже существует из предыдущих примеров. Ниже приведён пример, совместимый с уменьшенным набором данных GitHub. Обратите внимание на отсутствие каких-либо типов Array или Map, которые в настоящее время не поддерживаются: - - ```sql CREATE TABLE github ( @@ -150,7 +145,6 @@ SELECT count() FROM default.github; ### Рекомендуемые дополнительные материалы {#recommended-further-reading} - * [Параметры конфигурации приёмника Kafka (Kafka Sink Configuration Parameters)](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/sink_config_options.html#sink-config-options) * [Подробный разбор Kafka Connect – JDBC Source Connector (Kafka Connect Deep Dive – JDBC Source Connector)](https://www.confluent.io/blog/kafka-connect-deep-dive-jdbc-source-connector) * [Подробный разбор Kafka Connect JDBC Sink: работа с первичными ключами (Kafka Connect JDBC Sink deep-dive: Working with Primary Keys)](https://rmoff.net/2021/03/12/kafka-connect-jdbc-sink-deep-dive-working-with-primary-keys/) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md index 026bb8616cb..11452a58d38 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md @@ -6,12 +6,8 @@ slug: /integrations/data-ingestion/kafka/kafka-table-engine-named-collections doc_type: 'guide' --- - - # Интеграция ClickHouse с Kafka с использованием именованных коллекций {#integrating-clickhouse-with-kafka-using-named-collections} - - ## Введение {#introduction} В этом руководстве мы рассмотрим, как подключить ClickHouse к Kafka с использованием именованных коллекций. Использование файла конфигурации для именованных коллекций дает несколько преимуществ: @@ -21,8 +17,6 @@ doc_type: 'guide' Это руководство было проверено на Apache Kafka 3.4.1 и ClickHouse 24.5.1. - - ## Предварительные условия {#assumptions} В этом документе предполагается, что у вас уже есть: @@ -30,8 +24,6 @@ doc_type: 'guide' 2. Развернутый и запущенный кластер ClickHouse. 3. Базовые знания SQL и опыт работы с конфигурациями ClickHouse и Kafka. - - ## Предварительные требования {#prerequisites} Убедитесь, что пользователь, создающий именованную коллекцию, обладает необходимыми правами доступа: @@ -45,7 +37,6 @@ doc_type: 'guide' См. руководство [User Management Guide](./../../../guides/sre/user-management/index.md) для получения подробной информации о включении управления доступом. - ## Конфигурация {#configuration} Добавьте следующий раздел в файл конфигурации ClickHouse `config.xml`: @@ -106,7 +97,6 @@ doc_type: 'guide' 3. Раздел внутри `` содержит расширенные параметры конфигурации Kafka. Дополнительные параметры смотрите в [документации по конфигурации librdkafka](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md). 4. В этом примере используется протокол безопасности `SASL_SSL` и механизм `PLAIN`. При необходимости скорректируйте эти параметры в соответствии с конфигурацией вашего кластера Kafka. - ## Создание таблиц и баз данных {#creating-tables-and-databases} Создайте необходимые базы данных и таблицы в вашем кластере ClickHouse. Если вы запускаете ClickHouse на отдельном узле, опустите часть SQL-команды, относящуюся к кластеру, и используйте любой другой движок вместо `ReplicatedMergeTree`. @@ -193,7 +183,6 @@ SELECT FROM second_kafka_table; ``` - ## Проверка настройки {#verifying-the-setup} Теперь в ваших кластерах Kafka должны появиться соответствующие группы потребителей: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md index 890620dfe97..15a12942ab5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md @@ -28,16 +28,14 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr -> Примечание: Политика, показанная в видео, является избыточно разрешительной и предназначена только для быстрого начала работы. См. ниже рекомендации по настройке IAM по принципу наименьших привилегий. - - +> Примечание: политика доступа, показанная в видео, является слишком разрешительной и предназначена только для быстрого начала работы. См. ниже рекомендации по настройке IAM по принципу наименьших привилегий. ## Предварительные требования {#prerequisites} -Мы предполагаем: -* вы знакомы с [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md), Amazon MSK и MSK Connectors. Рекомендуем руководство Amazon MSK [Getting Started guide](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) и [MSK Connect guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html). -* брокер MSK доступен из публичной сети. См. раздел [Public Access](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html) в Developer Guide. +Мы предполагаем: +* вы знакомы с [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md), +* вы знакомы с Amazon MSK и коннекторами MSK. Рекомендуем руководства Amazon MSK: [Getting Started guide](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html) и [MSK Connect guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html). ## Официальный коннектор Kafka от ClickHouse для Amazon MSK {#the-official-kafka-connector-from-clickhouse-with-amazon-msk} @@ -184,4 +182,4 @@ consumer.max.partition.fetch.bytes=1048576 1. **Проверьте подключение (краткий чек‑лист):** 1. В среде коннектора убедитесь, что разрешается DNS-имя bootstrap для MSK и выполняется подключение по TLS к порту брокера. 1. Установите TLS-подключение к ClickHouse на порт 9440 (или 8443 для HTTPS). - 1. Если используются сервисы AWS (Glue/Secrets Manager), разрешите исходящий трафик к их конечным точкам. + 1. Если используются сервисы AWS (Glue/Secrets Manager), разрешите исходящий трафик к их конечным точкам. \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md index cd84f937b1a..790901b31c7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md @@ -25,7 +25,6 @@ import HardwareSize from '@site/static/images/integrations/data-ingestion/s3/har Прежде чем настраивать число потоков и размеры блоков для улучшения производительности вставки, мы рекомендуем сначала разобраться в механизме вставки данных в S3. Если вы уже знакомы с этим механизмом или хотите получить только краткие рекомендации, переходите сразу к нашему примеру [ниже](/integrations/s3/performance#example-dataset). - ## Механизм вставки (одиночный узел) {#insert-mechanics-single-node} На производительность и использование ресурсов механизма вставки данных ClickHouse (для одиночного узла), помимо конфигурации оборудования, влияют два основных фактора: **размер блока вставки** и **параллелизм вставки**. @@ -81,7 +80,6 @@ ClickHouse будет постоянно [сливать части](https://cli ② Объедините загруженные блоки в памяти в один более крупный блок. ``` - ③ Запишите объединённый блок в новый part на диск. Перейдите к шагу ① @@ -118,13 +116,10 @@ ClickHouse будет постоянно [сливать части](https://cli Для функции и таблицы s3 параллельная загрузка отдельного файла определяется значениями [max_download_threads](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_threads) и [max_download_buffer_size](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_buffer_size). Файлы будут загружаться параллельно только в том случае, если их размер больше, чем `2 * max_download_buffer_size`. По умолчанию значение `max_download_buffer_size` установлено в 10MiB. В некоторых случаях вы можете безопасно увеличить размер этого буфера до 50 MB (`max_download_buffer_size=52428800`), чтобы гарантировать, что каждый файл загружается одним потоком. Это может сократить время, которое каждый поток тратит на обращения к S3, и таким образом уменьшить время ожидания S3. Кроме того, для файлов, слишком маленьких для параллельного чтения, для увеличения пропускной способности ClickHouse автоматически предзагружает данные, предварительно читая такие файлы асинхронно. - ## Измерение производительности {#measuring-performance} Оптимизация производительности запросов с использованием табличных функций S3 необходима как при выполнении запросов к данным «на месте», то есть при ad‑hoc‑запросах, когда используются только вычислительные ресурсы ClickHouse, а данные остаются в S3 в исходном формате, так и при вставке данных из S3 в таблицу ClickHouse на движке MergeTree. Если не указано иное, следующие рекомендации применимы к обоим сценариям. - - ## Влияние размеров аппаратной конфигурации {#impact-of-hardware-size} @@ -137,14 +132,10 @@ ClickHouse будет постоянно [сливать части](https://cli и, следовательно, общую пропускную способность приёма данных. - - ## Локальность региона {#region-locality} Убедитесь, что ваши бакеты находятся в том же регионе, что и экземпляры ClickHouse. Эта несложная оптимизация может значительно повысить производительность (пропускную способность), особенно если вы разворачиваете экземпляры ClickHouse в инфраструктуре AWS. - - ## Форматы {#formats} ClickHouse может читать файлы, хранящиеся в бакетах S3, в [поддерживаемых форматах](/interfaces/formats#formats-overview) с помощью функции `s3` и движка `S3`. При чтении «сырых» файлов некоторые из этих форматов имеют определённые преимущества: @@ -155,8 +146,6 @@ ClickHouse может читать файлы, хранящиеся в баке * Каждый формат сжатия имеет свои преимущества и недостатки, обычно балансируя уровень сжатия и скорость, смещая приоритет в сторону производительности сжатия или распаковки. При сжатии «сырых» файлов, таких как CSV или TSV, lz4 обеспечивает самую быструю распаковку, жертвуя степенью сжатия. Gzip, как правило, сжимает лучше, но ценой немного более медленного чтения. Xz идёт ещё дальше, обычно обеспечивая наилучшее сжатие при самой медленной скорости сжатия и распаковки. При экспорте Gz и lz4 обеспечивают сопоставимую скорость сжатия. Соотнесите это со скоростью вашего сетевого подключения. Любой выигрыш от более быстрой распаковки или сжатия легко будет нивелирован медленным подключением к вашим бакетам S3. * Для форматов, таких как Native или Parquet, сжатие обычно не оправдывает накладные расходы. Любая экономия места, скорее всего, будет минимальной, поскольку эти форматы по своей сути компактны. Время, затраченное на сжатие и распаковку, редко компенсирует время сетевой передачи, особенно учитывая, что S3 глобально доступен с высокой пропускной способностью сети. - - ## Пример набора данных {#example-dataset} Чтобы продемонстрировать дальнейший потенциал оптимизации, в качестве примера мы будем использовать [публикации из набора данных Stack Overflow](/data-modeling/schema-design#stack-overflow-dataset), оптимизируя как производительность запросов, так и скорость вставки этих данных. @@ -202,7 +191,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow При чтении результатов запросов начальный запрос может казаться более медленным, чем повторный запуск того же запроса. Это можно объяснить как собственным кэшированием S3, так и [ClickHouse Schema Inference Cache](/operations/system-tables/schema_inference_cache). Этот кэш сохраняет выведенную схему для файлов, что позволяет пропустить этап вывода схемы при последующих обращениях и, таким образом, сократить время выполнения запроса. ::: - ## Использование потоков для чтения {#using-threads-for-reads} Производительность чтения из S3 масштабируется линейно с количеством ядер при условии, что вас не ограничивает пропускная способность сети или локальный ввод-вывод. Увеличение числа потоков также приводит к дополнительным затратам по памяти, о которых пользователям следует знать. Следующие параметры можно изменить для потенциального улучшения пропускной способности чтения: @@ -249,7 +237,6 @@ SETTINGS max_threads = 64 Пиковое использование памяти: 639.99 МиБ. ``` - ## Настройка потоков и размера блоков для вставок {#tuning-threads-and-block-size-for-inserts} Чтобы достичь максимальной производительности ингестии, необходимо выбрать (1) размер блока вставки и (2) соответствующий уровень параллелизма вставки на основе (3) количества доступных ядер CPU и объёма доступной RAM. Вкратце: @@ -287,7 +274,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow Как видно, настройка этих параметров повысила скорость вставки более чем на `33%`. Предлагаем читателю попытаться ещё больше повысить производительность одиночного узла. - ## Масштабирование по ресурсам и узлам {#scaling-with-resources-and-nodes} Масштабирование по ресурсам и узлам применяется как к запросам на чтение, так и к запросам на вставку. @@ -365,7 +351,6 @@ FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws 0 rows in set. Elapsed: 171.202 sec. Processed 59.82 million rows, 24.03 GB (349.41 тыс. строк/с., 140.37 МБ/с.) ``` - Читатели заметят, что при чтении файлов улучшилась производительность запросов `SELECT`, но не операций `INSERT`. По умолчанию, хотя операции чтения распределяются с использованием `s3Cluster`, вставки выполняются на инициирующем узле. Это означает, что сами чтения выполняются на каждом узле, но полученные строки будут направлены на инициирующий узел для дальнейшего распределения. В сценариях с высокой пропускной способностью это может стать узким местом. Чтобы решить эту проблему, установите параметр `parallel_distributed_insert_select` для функции `s3cluster`. Установка значения `parallel_distributed_insert_select=2` гарантирует, что `SELECT` и `INSERT` будут выполняться на каждом шарде из/в базовую таблицу движка Distributed на каждом узле. @@ -382,7 +367,6 @@ Peak memory usage: 11.75 GiB. Как и ожидалось, это снижает производительность вставки в три раза. - ## Дополнительная настройка {#further-tuning} ### Отключение дедупликации {#disable-de-duplication} @@ -416,7 +400,6 @@ SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, 0 строк в наборе. Прошло: 49.688 сек. Обработано 59.82 млн строк, 24.03 ГБ (1.20 млн строк/сек., 483.66 МБ/сек.) ``` - ## Прочие заметки {#misc-notes} * При ограниченном объёме памяти рассмотрите возможность уменьшить значение `max_insert_delayed_streams_for_parallel_write` при вставке данных в S3. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md index 0b170aec089..552b3cdf2af 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md @@ -25,15 +25,12 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Astrato к ClickHouse {#connecting-astrato-to-clickhouse} Astrato использует технологию Pushdown SQL для прямого выполнения запросов к ClickHouse Cloud или локальным развертываниям ClickHouse. Это означает, что вы можете получать доступ ко всем необходимым данным, опираясь на ведущую в отрасли производительность ClickHouse. - - ## Необходимые данные для подключения {#connection-data-required} При настройке подключения к данным вам потребуются: @@ -44,8 +41,6 @@ Astrato использует технологию Pushdown SQL для прямо - - ## Создание подключения данных к ClickHouse {#creating-the-data-connection-to-clickhouse} - В боковой панели выберите **Data** и перейдите на вкладку **Data Connection** @@ -75,8 +70,6 @@ Astrato использует технологию Pushdown SQL для прямо Если создаётся дубликат, к имени источника данных добавляется метка времени (timestamp). ::: - - ## Создание семантической модели / представления данных {#creating-a-semantic-model--data-view} В редакторе представления данных (Data View) вы увидите все свои таблицы и схемы (Schemas) в ClickHouse. Выберите нужные, чтобы начать. @@ -93,8 +86,6 @@ Astrato использует технологию Pushdown SQL для прямо - - ## Создание дашборда {#creating-a-dashboard} Всего за несколько шагов вы можете построить свой первый график в Astrato. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md index 0cb3299b9a6..0c9ad03aa42 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md @@ -22,15 +22,12 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import Image from '@theme/IdealImage'; - # Подключение Chartbrew к ClickHouse {#connecting-chartbrew-to-clickhouse} [Chartbrew](https://chartbrew.com) — это платформа визуализации данных, которая позволяет пользователям создавать дашборды и мониторить данные в режиме реального времени. Она поддерживает различные источники данных, включая ClickHouse, и предоставляет интерфейс без необходимости писать код для создания графиков и отчётов. - - ## Цель {#goal} В этом руководстве вы подключите Chartbrew к ClickHouse, выполните SQL-запрос и создадите визуализацию. В конце ваша панель мониторинга может выглядеть примерно так: @@ -41,14 +38,10 @@ import Image from '@theme/IdealImage'; Если у вас нет набора данных для работы, вы можете добавить один из примеров. В этом руководстве используется набор данных [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md). ::: - - ## 1. Соберите параметры подключения {#1-gather-your-connection-details} - - ## 2. Подключение Chartbrew к ClickHouse {#2-connect-chartbrew-to-clickhouse} 1. Войдите в [Chartbrew](https://chartbrew.com/login) и перейдите на вкладку **Connections**. @@ -72,8 +65,6 @@ import Image from '@theme/IdealImage'; - - ## 3. Создайте набор данных и выполните SQL-запрос {#3-create-a-dataset-and-run-a-sql-query} 1. Нажмите кнопку **Create dataset** или перейдите на вкладку **Datasets**, чтобы создать набор данных. @@ -100,7 +91,6 @@ ORDER BY year; После того как данные будут получены, нажмите **Configure dataset**, чтобы настроить параметры визуализации. - ## 4. Создание визуализации {#4-create-a-visualization} 1. Определите метрику (числовое значение) и размерность (категориальное значение) для визуализации. @@ -114,8 +104,6 @@ ORDER BY year; - - ## 5. Автоматизация обновления данных {#5-automate-data-updates} Чтобы панель мониторинга всегда отображала актуальные данные, вы можете запланировать автоматическое обновление: @@ -126,8 +114,6 @@ ORDER BY year; - - ## Дополнительные материалы {#learn-more} Более подробную информацию можно найти в статье в блоге о [Chartbrew и ClickHouse](https://chartbrew.com/blog/visualizing-clickhouse-data-with-chartbrew-a-step-by-step-guide/). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md index d350188126f..8a220f15bce 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md @@ -18,7 +18,6 @@ import databrain_06 from '@site/static/images/integrations/data-visualization/da import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Databrain к ClickHouse {#connecting-databrain-to-clickhouse} @@ -31,16 +30,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; В этом руководстве пошагово описано, как подключить Databrain к вашему инстансу ClickHouse. - - ## Предварительные требования {#pre-requisites} - База данных ClickHouse, развернутая как в вашей собственной инфраструктуре, так и в [ClickHouse Cloud](https://clickhouse.com/). - [Учетная запись Databrain](https://app.usedatabrain.com/users/sign-up). - Рабочее пространство Databrain для подключения вашего источника данных. - - ## Шаги по подключению Databrain к ClickHouse {#steps-to-connect-databrain-to-clickhouse} ### 1. Соберите данные для подключения {#1-gather-your-connection-details} @@ -102,7 +97,6 @@ GRANT SELECT ON your_database.* TO your_databrain_user; Замените `your_databrain_user` и `your_database` на фактические имя пользователя и имя базы данных. - ## Использование Databrain с ClickHouse {#using-databrain-with-clickhouse} ### Исследуйте данные {#explore-your-data} @@ -152,8 +146,6 @@ Databrain предлагает несколько расширенных воз - **Встраиваемая аналитика**: встраивайте дашборды и метрики напрямую в ваши приложения - **Семантический уровень**: создавайте повторно используемые модели данных и бизнес-логику - - ## Устранение неполадок {#troubleshooting} ### Ошибка подключения {#connection-fails} @@ -175,8 +167,6 @@ Databrain предлагает несколько расширенных воз 3. **Используйте подходящие типы данных**: Убедитесь, что в схеме ClickHouse используются оптимальные типы данных 4. **Оптимизируйте индексы**: Используйте первичные ключи и пропускающие индексы ClickHouse - - ## Подробнее {#learn-more} Дополнительные сведения о возможностях Databrain и создании эффективной аналитики: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md index 82c8a314a39..7d455a53f35 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md @@ -19,7 +19,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # Подключение ClickHouse к Deepnote {#connect-clickhouse-to-deepnote} @@ -28,15 +27,11 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr В этом руководстве предполагается, что у вас уже есть аккаунт Deepnote и запущенный экземпляр ClickHouse. - - ## Интерактивный пример {#interactive-example} Если вы хотите изучить интерактивный пример выполнения запросов к ClickHouse из ноутбуков с данными в Deepnote, нажмите кнопку ниже, чтобы создать шаблон проекта, подключённый к [песочнице ClickHouse](../../../getting-started/playground.md). [](https://deepnote.com/launch?template=ClickHouse%20and%20Deepnote) - - ## Подключение к ClickHouse {#connect-to-clickhouse} 1. В Deepnote выберите раздел «Integrations» и нажмите на плитку ClickHouse. @@ -52,8 +47,6 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr 3. Готово! ClickHouse интегрирован с Deepnote. - - ## Использование интеграции с ClickHouse. {#using-clickhouse-integration} 1. Для начала подключитесь к интеграции с ClickHouse в правой части блокнота. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md index e22a6b97ea8..d95bef570b0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md @@ -12,7 +12,6 @@ import dot_01 from '@site/static/images/integrations/data-visualization/dot_01.p import dot_02 from '@site/static/images/integrations/data-visualization/dot_02.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Dot {#dot} @@ -20,16 +19,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [Dot](https://www.getdot.ai/) — ваш **аналитик данных на базе ИИ**. Он подключается напрямую к ClickHouse, чтобы вы могли задавать вопросы о данных на естественном языке, исследовать их, проверять гипотезы и находить ответы на вопросы «почему» — прямо в Slack, Microsoft Teams, ChatGPT или во встроенном веб‑интерфейсе. - - ## Предварительные требования {#pre-requisites} - База данных ClickHouse, развернутая самостоятельно или в [ClickHouse Cloud](https://clickhouse.com/cloud) - Учетная запись в [Dot](https://www.getdot.ai/) - Учетная запись и проект в [Hashboard](https://www.hashboard.com/). - - ## Подключение Dot к ClickHouse {#connecting-dot-to-clickhouse} @@ -48,8 +43,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Dot использует **query-pushdown**: ClickHouse обрабатывает тяжёлые вычисления в масштабе, а Dot обеспечивает корректные и достоверные ответы. - - ## Основные возможности {#highlights} Dot делает данные доступными в формате диалога: @@ -60,8 +53,6 @@ Dot делает данные доступными в формате диало - **Достоверные результаты**: Dot проверяет запросы на соответствие вашим схемам и определениям, чтобы минимизировать ошибки. - **Масштабируемый**: опирается на механизм query-pushdown, сочетая интеллект Dot со скоростью ClickHouse. - - ## Безопасность и управление {#security} Dot соответствует требованиям корпоративного уровня: @@ -72,8 +63,6 @@ Dot соответствует требованиям корпоративног - **Управление и валидация**: Пространство для обучения и проверки помогает снижать риск галлюцинаций - **Соответствие требованиям**: Сертифицирован по стандарту SOC 2 Type I - - ## Дополнительные ресурсы {#additional-resources} - Сайт Dot: [https://www.getdot.ai/](https://www.getdot.ai/) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md index 975e6f63c7f..8d3331e7862 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md @@ -21,20 +21,15 @@ import draxlr_06 from '@site/static/images/integrations/data-visualization/draxl import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Draxlr к ClickHouse {#connecting-draxlr-to-clickhouse} Draxlr предоставляет интуитивно понятный интерфейс для подключения к вашей базе данных ClickHouse, позволяя вашей команде исследовать данные, визуализировать их и публиковать полученные инсайты за считанные минуты. В этом руководстве вы по шагам настроите успешное подключение. - - ## 1. Получите учетные данные для доступа к ClickHouse {#1-get-your-clickhouse-credentials} - - ## 2. Подключение Draxlr к ClickHouse {#2--connect-draxlr-to-clickhouse} 1. Нажмите кнопку **Connect a Database** на панели навигации. @@ -51,8 +46,6 @@ Draxlr предоставляет интуитивно понятный инте 6. Нажмите кнопку **Next** и дождитесь установления подключения. При успешном подключении откроется страница таблиц. - - ## 4. Исследуйте данные {#4-explore-your-data} 1. Нажмите на одну из таблиц в списке. @@ -67,8 +60,6 @@ Draxlr предоставляет интуитивно понятный инте - - ## 4. Использование SQL-запросов {#4-using-sql-queries} 1. Нажмите кнопку Explore на панели навигации. @@ -79,8 +70,6 @@ Draxlr предоставляет интуитивно понятный инте 3. Нажмите кнопку **Execute Query**, чтобы увидеть результаты. - - ## 4. Сохранение запроса {#4-saving-you-query} 1. После выполнения запроса нажмите кнопку **Save Query**. @@ -93,8 +82,6 @@ Draxlr предоставляет интуитивно понятный инте 4. Нажмите кнопку **Save**, чтобы сохранить запрос. - - ## 5. Создание дашбордов {#5-building-dashboards} 1. Нажмите кнопку **Dashboards** на панели навигации. @@ -107,7 +94,5 @@ Draxlr предоставляет интуитивно понятный инте 4. Выберите запрос из списка сохранённых запросов, укажите тип визуализации, затем нажмите кнопку **Add Dashboard Item**. - - ## Подробнее {#learn-more} Дополнительную информацию о Draxlr можно найти в [документации Draxlr](https://draxlr.notion.site/draxlr/Draxlr-Docs-d228b23383f64d00a70836ff9643a928). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md index 14446680f34..bec861bf5bb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md @@ -10,7 +10,6 @@ doc_type: 'guide' import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Embeddable к ClickHouse {#connecting-embeddable-to-clickhouse} @@ -21,13 +20,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Встроенная безопасность на уровне строк гарантирует, что каждый пользователь видит только те данные, к которым у него есть доступ. А два уровня полностью настраиваемого кэширования позволяют обеспечивать быструю, масштабируемую аналитику в режиме реального времени. - - ## 1. Соберите сведения о подключении {#1-gather-your-connection-details} - - ## 2. Создайте тип подключения к ClickHouse {#2-create-a-clickhouse-connection-type} Вы добавляете подключение к базе данных с помощью API Embeddable. Это подключение используется для подключения к вашему сервису ClickHouse. Вы можете добавить подключение с помощью следующего вызова API: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md index b3b2450f384..370624a3cb3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md @@ -31,15 +31,12 @@ import explo_15 from '@site/static/images/integrations/data-visualization/explo_ import explo_16 from '@site/static/images/integrations/data-visualization/explo_16.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Explo к ClickHouse {#connecting-explo-to-clickhouse} Клиентская аналитика для любой платформы. Создана для красивой визуализации. Спроектирована с упором на простоту. - - ## Цель {#goal} В этом руководстве вы подключите данные из ClickHouse к Explo и визуализируете результаты. Диаграмма будет выглядеть так: @@ -51,13 +48,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Если у вас ещё нет набора данных для работы, вы можете добавить один из примерных наборов. В этом руководстве используется набор данных [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md), поэтому вы можете выбрать его. В той же категории документации есть и несколько других примеров. ::: - - ## 1. Соберите сведения о подключении {#1-gather-your-connection-details} - - ## 2. Подключение Explo к ClickHouse {#2--connect-explo-to-clickhouse} 1. Зарегистрируйтесь в Explo. @@ -91,8 +84,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 54.211.43.19, 52.55.98.121, 3.214.169.94 и 54.156.141.148 ` - - ## 3. Создайте дашборд {#3-create-a-dashboard} 1. Перейдите на вкладку **Dashboard** в левой панели навигации. @@ -107,8 +98,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 4. Выполните SQL-запрос {#4-run-a-sql-query} 1. Найдите имя таблицы в правой боковой панели под заголовком схемы. Затем введите следующую команду в редактор датасета: @@ -123,8 +112,6 @@ LIMIT 100 - - ## 5. Построение графика {#5-build-a-chart} 1. С левой стороны экрана перетащите значок столбчатой диаграммы на область построения. @@ -147,8 +134,6 @@ LIMIT 100 - - ## Узнать больше {#learn-more} Найдите более подробную информацию об Explo и о том, как создавать дашборды, в документации Explo. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md index 968a7a7778c..bc856424c51 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md @@ -15,7 +15,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # Подключение ClickHouse к Fabi.ai {#connecting-clickhouse-to-fabiai} @@ -24,14 +23,10 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr - - ## Соберите данные для подключения {#gather-your-connection-details} - - ## Создайте аккаунт Fabi.ai и подключите ClickHouse {#connect-to-clickhouse} Войдите в свой аккаунт Fabi.ai или создайте новый: https://app.fabi.ai/ @@ -46,16 +41,12 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr 3. Поздравляем! Теперь вы подключили ClickHouse к Fabi.ai. - - ## Выполнение запросов к ClickHouse. {#querying-clickhouse} После того как вы подключили Fabi.ai к ClickHouse, откройте любой [Smartbook](https://docs.fabi.ai/analysis_and_reporting/smartbooks) и создайте SQL-ячейку. Если к вашему экземпляру Fabi.ai подключен только один источник данных, SQL-ячейка автоматически выберет ClickHouse, иначе вы можете указать источник для запроса в выпадающем списке источников. - - ## Дополнительные ресурсы {#additional-resources} Документация [Fabi.ai](https://www.fabi.ai): https://docs.fabi.ai/introduction diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md index 0ed30316dd5..1a26f7fd670 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md @@ -13,7 +13,6 @@ import hashboard_01 from '@site/static/images/integrations/data-visualization/ha import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение ClickHouse к Hashboard {#connecting-clickhouse-to-hashboard} @@ -26,15 +25,11 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; В этом руководстве описаны шаги по подключению Hashboard к вашему экземпляру ClickHouse. Эта информация также доступна в документации Hashboard по интеграции с [ClickHouse](https://docs.hashboard.com/docs/database-connections/clickhouse). - - ## Предварительные требования {#pre-requisites} - База данных ClickHouse, развернутая в вашей собственной инфраструктуре или в [ClickHouse Cloud](https://clickhouse.com/). - [Учетная запись Hashboard](https://hashboard.com/getAccess) и проект. - - ## Шаги по подключению Hashboard к ClickHouse {#steps-to-connect-hashboard-to-clickhouse} ### 1. Соберите сведения о подключении {#1-gather-your-connection-details} @@ -53,8 +48,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Теперь ваша база данных ClickHouse будет подключена к Hashboard, и вы можете приступить к созданию [Data Models](https://docs.hashboard.com/docs/data-modeling/add-data-model), [Explorations](https://docs.hashboard.com/docs/visualizing-data/explorations), [Metrics](https://docs.hashboard.com/docs/metrics) и [Dashboards](https://docs.hashboard.com/docs/dashboards). Подробную информацию об этих возможностях см. в соответствующей документации Hashboard. - - ## Узнать больше {#learn-more} Для получения информации о более продвинутых возможностях и устранении неполадок обратитесь к [документации Hashboard](https://docs.hashboard.com/). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md index a4a18a4daa8..72cad48e6e1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md @@ -18,13 +18,10 @@ import luzmo_02 from '@site/static/images/integrations/data-visualization/luzmo_ import luzmo_03 from '@site/static/images/integrations/data-visualization/luzmo_03.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Интеграция Luzmo и ClickHouse {#integrating-luzmo-with-clickhouse} - - ## 1. Настройка подключения к ClickHouse {#1-setup-a-clickhouse-connection} Чтобы создать подключение к ClickHouse, перейдите на страницу **Connections**, выберите **New Connection**, затем выберите ClickHouse в диалоговом окне New Connection. @@ -42,8 +39,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Обратитесь к примерам в нашей документации для разработчиков, чтобы узнать, как [создать подключение к ClickHouse](https://developer.luzmo.com/api/createAccount?exampleSection=AccountCreateClickhouseRequestBody) через наш API. - - ## 2. Добавьте наборы данных {#2-add-datasets} После подключения ClickHouse вы можете добавить наборы данных, как описано [здесь](https://academy.luzmo.com/article/ldx3iltg). Вы можете выбрать один или несколько наборов данных, доступных в вашем ClickHouse, и [связать](https://academy.luzmo.com/article/gkrx48x5) их в Luzmo, чтобы их можно было использовать вместе в одном дашборде. Также рекомендуем ознакомиться со статьёй [Подготовка данных к аналитике](https://academy.luzmo.com/article/u492qov0). @@ -54,8 +49,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## Примечания по использованию {#usage-notes} 1. Коннектор Luzmo для ClickHouse использует HTTP‑интерфейс API (как правило, доступный на порту 8123) для подключения. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md index f44676565cd..b734fd31a0a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md @@ -25,7 +25,6 @@ import mitzu_10 from '@site/static/images/integrations/data-visualization/mitzu_ import mitzu_11 from '@site/static/images/integrations/data-visualization/mitzu_11.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Mitzu к ClickHouse {#connecting-mitzu-to-clickhouse} @@ -34,8 +33,6 @@ Mitzu — это no-code решение для продуктовой анали Однако в отличие от этих платформ Mitzu не дублирует данные об использовании продукта компании. Вместо этого оно генерирует нативные SQL-запросы непосредственно к уже существующему хранилищу или озеру данных компании. - - ## Цель {#goal} В этом руководстве мы рассмотрим следующее: @@ -50,38 +47,28 @@ Mitzu — это no-code решение для продуктовой анали Это руководство представляет собой лишь краткий обзор того, как использовать Mitzu. Более подробную информацию вы можете найти в [документации Mitzu](https://docs.mitzu.io/). - - ## 1. Соберите сведения о подключении {#1-gather-your-connection-details} - - ## 2. Войдите или зарегистрируйтесь в Mitzu {#2-sign-in-or-sign-up-to-mitzu} Для начала перейдите на [https://app.mitzu.io](https://app.mitzu.io) и зарегистрируйтесь. - - ## 3. Настройте свое рабочее пространство {#3-configure-your-workspace} После создания организации следуйте руководству по первичной настройке `Set up your workspace` в левой панели. Затем нажмите ссылку `Connect Mitzu with your data warehouse`. - - ## 4. Подключение Mitzu к ClickHouse {#4-connect-mitzu-to-clickhouse} Сначала выберите ClickHouse в качестве типа подключения и задайте параметры подключения. Затем нажмите кнопку `Test connection & Save`, чтобы сохранить настройки. - - ## 5. Настройка таблиц событий {#5-configure-event-tables} После сохранения подключения выберите вкладку `Event tables` и нажмите кнопку `Add table`. В модальном окне выберите базу данных и таблицы, которые вы хотите добавить в Mitzu. @@ -103,8 +90,6 @@ Mitzu — это no-code решение для продуктовой анали
После того как все таблицы будут настроены, нажмите кнопку `Save & update event catalog`, и Mitzu найдет все события и их свойства в указанных выше таблицах. Этот шаг может занять до нескольких минут в зависимости от размера набора данных. - - ## 4. Запуск сегментационных запросов {#4-run-segmentation-queries} Сегментация пользователей в Mitzu так же проста, как в Amplitude, Mixpanel или PostHog. @@ -120,8 +105,6 @@ Mitzu — это no-code решение для продуктовой анали Вы можете выбрать любое событие или пользовательское свойство для разбиения (см. ниже, как интегрировать пользовательские свойства). ::: - - ## 5. Выполнение запросов по воронке {#5-run-funnel-queries} Выберите до 9 шагов для воронки. Задайте временное окно, в течение которого пользователи могут пройти воронку. @@ -135,8 +118,6 @@ Mitzu — это no-code решение для продуктовой анали Выберите `Funnel trends`, чтобы визуализировать динамику воронки во времени. ::: - - ## 6. Run retention queries {#6-run-retention-queries} Выберите не более двух шагов для расчёта коэффициента удержания. Задайте окно удержания для повторяющегося периода. @@ -150,8 +131,6 @@ Mitzu — это no-code решение для продуктовой анали Выберите `Weekly cohort retention`, чтобы визуализировать, как ваши показатели удержания меняются со временем. ::: - - ## 7. Запуск запросов по пути пользователя {#7-run-journey-queries} Выберите до 9 шагов для воронки. Укажите временной интервал, в течение которого пользователи могут завершить путь. Диаграмма пути в Mitzu дает наглядную карту всех вариантов прохождения пользователями выбранных событий. @@ -164,15 +143,11 @@ Mitzu — это no-code решение для продуктовой анали
- - ## 8. Запуск запросов по выручке {#8-run-revenue-queries} Если параметры выручки настроены, Mitzu может вычислить общий MRR и количество подписок на основе ваших платежных событий. - - ## 9. SQL native {#9-sql-native} Mitzu работает напрямую с SQL, то есть генерирует нативный SQL‑код из выбранной вами конфигурации на странице Explore. @@ -185,16 +160,12 @@ Mitzu работает напрямую с SQL, то есть генерируе Если вы столкнулись с ограничением интерфейса Mitzu, скопируйте SQL‑код и продолжайте работу в BI‑инструменте. ::: - - ## Поддержка Mitzu {#mitzu-support} Если у вас возникли сложности, свяжитесь с нами по адресу [support@mitzu.io](email://support@mitzu.io) Или присоединяйтесь к нашему сообществу в Slack [здесь](https://join.slack.com/t/mitzu-io/shared_invite/zt-1h1ykr93a-_VtVu0XshfspFjOg6sczKg) - - ## Узнать больше {#learn-more} Дополнительную информацию о Mitzu вы можете найти на сайте [mitzu.io](https://mitzu.io) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md index 59074c54545..d3bba89f1e4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md @@ -30,7 +30,6 @@ import rocketbi_17 from '@site/static/images/integrations/data-visualization/roc import rocketbi_18 from '@site/static/images/integrations/data-visualization/rocketbi_18.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Цель: создать свой первый дашборд в Rocket.BI {#goal-build-your-first-dashboard-with-rocketbi} @@ -43,8 +42,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Вы можете открыть [этот дашборд по ссылке.](https://demo.rocket.bi/dashboard/sales-dashboard-7?token=7eecf750-cbde-4c53-8fa8-8b905fec667e) - - ## Установка {#install} Запустите RocketBI с помощью наших предварительно собранных образов Docker. @@ -64,7 +61,6 @@ wget https://raw.githubusercontent.com/datainsider-co/rocket-bi/main/docker/.cli Чтобы собрать из исходников или выполнить расширенную настройку, ознакомьтесь с файлом [Rocket.BI Readme](https://github.com/datainsider-co/rocket-bi/blob/main/README.md). - ## Давайте соберём дашборд {#lets-build-the-dashboard} Во вкладке Dashboard вы найдёте свои отчёты, начните создавать визуализации, нажав **+New**. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md index 7581fa84484..3506f150e1b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md @@ -22,15 +22,12 @@ import zing_08 from '@site/static/images/integrations/data-visualization/zing_08 import zing_09 from '@site/static/images/integrations/data-visualization/zing_09.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение Zing Data к ClickHouse {#connect-zing-data-to-clickhouse} Zing Data — это платформа для исследования и визуализации данных. Zing Data подключается к ClickHouse с помощью JS-драйвера, предоставляемого ClickHouse. - - ## Как подключиться {#how-to-connect} 1. Соберите сведения для подключения. @@ -62,8 +59,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 6. После того как источник данных ClickHouse добавлен, он будет доступен всем пользователям в вашей организации Zing на вкладке **Data Sources** / **Sources**. - - ## Создание графиков и дашбордов в Zing Data {#creating-charts-and-dashboards-in-zing-data} 1. После добавления источника данных ClickHouse нажмите **Zing App** в веб-интерфейсе или выберите источник данных в мобильном приложении, чтобы начать создавать графики. @@ -93,8 +88,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained';
- - ## Связанные материалы {#related-content} - [Документация](https://docs.getzingdata.com/docs/) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md index 40493f79aec..92d14e897ff 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md @@ -19,7 +19,6 @@ import alias_table_config_example from '@site/static/images/integrations/data-vi import alias_table_select_example from '@site/static/images/integrations/data-visualization/grafana/alias_table_select_example.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Настройка источника данных ClickHouse в Grafana {#configuring-clickhouse-data-source-in-grafana} @@ -62,7 +61,6 @@ secureJsonData: Обратите внимание, что свойство `version` добавляется, когда конфигурация сохраняется через пользовательский интерфейс. Оно показывает версию плагина, в которой была сохранена конфигурация. - ### Протокол HTTP {#http-protocol} Если вы выберете подключение по протоколу HTTP, появятся дополнительные настройки. @@ -79,7 +77,6 @@ jsonData: path: additional/path/example ``` - #### Пользовательские HTTP-заголовки {#custom-http-headers} Вы можете добавлять пользовательские заголовки к запросам, отправляемым на ваш сервер. @@ -106,7 +103,6 @@ secureJsonData: secureHttpHeaders.X-Example-Secure-Header: значение защищенного заголовка ``` - ## Дополнительные настройки {#additional-settings} Эти дополнительные настройки не являются обязательными. @@ -125,7 +121,6 @@ jsonData: validateSql: false # при значении true выполняется валидация SQL в редакторе SQL. ``` - ### OpenTelemetry {#opentelemetry} OpenTelemetry (OTel) глубоко интегрирован в плагин. @@ -164,7 +159,6 @@ jsonData: messageColumn: # сообщение/содержимое лога. ``` - ### Трейсы {#traces} Чтобы ускорить [создание запросов для трейсов](./query-builder.md#traces), вы можете задать базу данных/таблицу по умолчанию, а также столбцы для запроса по трейсам. Это предварительно заполнит конструктор запросов исполняемым запросом поиска по трейсам, что делает работу на странице Explore быстрее для задач наблюдаемости. @@ -201,7 +195,6 @@ jsonData: serviceTagsColumn: # столбец тегов сервиса. Ожидается тип map. ``` - ### Псевдонимы столбцов {#column-aliases} Использование псевдонимов столбцов — удобный способ выполнять запросы к данным под другими именами и с другими типами. @@ -232,7 +225,6 @@ CREATE TABLE alias_example ( Для получения дополнительной информации см. документацию по типу столбца [ALIAS](/sql-reference/statements/create/table#alias). - #### Таблицы с псевдонимами столбцов {#column-alias-tables} По умолчанию Grafana подсказывает столбцы на основе ответа `DESC table`. @@ -275,7 +267,6 @@ INSERT INTO example_table_aliases (`alias`, `select`, `type`) VALUES Оба варианта псевдонимов можно использовать для выполнения сложных преобразований типов или извлечения полей из JSON. - ## Все параметры YAML {#all-yaml-options} Ниже перечислены все параметры конфигурации YAML, доступные в этом плагине. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md index 5561240b990..b34026db607 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md @@ -22,7 +22,6 @@ import valid_ds from '@site/static/images/integrations/data-visualization/grafan import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Плагин источника данных ClickHouse для Grafana {#clickhouse-data-source-plugin-for-grafana} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md index 75d4e3202c8..45cd44c3246 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md @@ -20,7 +20,6 @@ import trace_id_in_logs from '@site/static/images/integrations/data-visualizatio import demo_data_links from '@site/static/images/integrations/data-visualization/grafana/demo_data_links.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Конструктор запросов {#query-builder} @@ -37,8 +36,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [Traces](#traces): оптимизирован для поиска и просмотра трейсов. Лучше всего работает в режиме обзора при [настроенных значениях по умолчанию](./config.md#traces). - [SQL Editor](#sql-editor): SQL Editor можно использовать, когда вам нужен полный контроль над запросом. В этом режиме можно выполнять любые SQL-запросы. - - ## Типы запросов {#query-types} Настройка *Query Type* изменяет компоновку конструктора запросов в соответствии с типом создаваемого запроса. @@ -110,8 +107,6 @@ OpenTelemetry также можно включить, чтобы автомат Попробуйте убрать предложение `LIMIT`, установив его в `0` (если это допустимо для вашего набора данных). ::: - - | Field | Description | |----|----| | Builder Mode | В режиме Simple из запросов исключаются агрегаты и GROUP BY, тогда как в режиме Aggregate эти опции доступны. | @@ -164,8 +159,6 @@ OpenTelemetry также может быть включён для автома Этот тип запроса визуализирует данные в виде табличного представления в режиме Trace Search и в виде панели трассировок в режиме Trace ID. - - ## SQL-редактор {#sql-editor} Если запрос слишком сложен для конструктора запросов, вы можете использовать SQL-редактор. @@ -180,8 +173,6 @@ SQL-редактор можно открыть, выбрав «SQL Editor» в - - ## Ссылки на данные {#data-links} [Ссылки на данные](https://grafana.com/docs/grafana/latest/panels-visualizations/configure-data-links) в Grafana @@ -220,8 +211,6 @@ SQL-редактор можно открыть, выбрав «SQL Editor» в - - ## Макросы {#macros} Макросы — это простой способ добавить динамический SQL в запрос. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md index e5c56035b85..75942440385 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md @@ -8,8 +8,6 @@ description: 'Узнайте, как визуализировать данные doc_type: 'guide' --- - - # Визуализация данных в ClickHouse {#visualizing-data-in-clickhouse}
@@ -49,8 +47,6 @@ doc_type: 'guide' - [Tableau](./tableau/tableau-and-clickhouse.md) - [Zing Data](./community_integrations/zingdata-and-clickhouse.md) - - ## Совместимость ClickHouse Cloud с инструментами визуализации данных {#clickhouse-cloud-compatibility-with-data-visualization-tools} | Инструмент | Поддерживается через | Протестировано | Документировано | Комментарий | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md index 4fdba482c2f..92c587476ef 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md @@ -21,7 +21,6 @@ import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Lightdash {#lightdash} @@ -32,8 +31,6 @@ Lightdash — это **AI-first BI-платформа**, созданная дл Это партнёрство объединяет **скорость ClickHouse** и **удобство Lightdash для разработчиков**, упрощая как никогда ранее исследование, визуализацию и автоматизацию получения инсайтов с помощью ИИ. - - ## Создание интерактивной панели мониторинга с Lightdash и ClickHouse {#build-an-interactive-dashboard} В этом руководстве показано, как **Lightdash** подключается к **ClickHouse** для исследования ваших dbt-моделей и создания интерактивных панелей мониторинга. @@ -128,7 +125,6 @@ Lightdash — это **AI-first BI-платформа**, созданная дл Страница **Explore** состоит из пяти основных областей: - 1. **Размерности и метрики** — все поля, доступные в выбранной таблице 2. **Фильтры** — ограничивают данные, возвращаемые вашим запросом 3. **Диаграмма** — визуализирует результаты вашего запроса @@ -196,7 +192,6 @@ Lightdash — это **AI-first BI-платформа**, созданная дл - ## Подробнее {#learn-more} Чтобы узнать больше о подключении проектов dbt к Lightdash, посетите раздел [Документация Lightdash → Настройка ClickHouse](https://docs.lightdash.com/get-started/setup-lightdash/connect-project#clickhouse?utm_source=clickhouse&utm_medium=partner&utm_campaign=integration_docs). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md index eeb8a57b994..94341d2a8f6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md @@ -18,20 +18,15 @@ import looker_03 from '@site/static/images/integrations/data-visualization/looke import looker_04 from '@site/static/images/integrations/data-visualization/looker_04.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker {#looker} Looker может подключаться к ClickHouse Cloud или локальному развертыванию ClickHouse с помощью официального источника данных ClickHouse. - - ## 1. Получите параметры подключения {#1-gather-your-connection-details} - - ## 2. Создайте источник данных ClickHouse {#2-create-a-clickhouse-data-source} Перейдите в Admin -> Database -> Connections и нажмите кнопку «Add Connection» в правом верхнем углу. @@ -56,8 +51,6 @@ Looker может подключаться к ClickHouse Cloud или локал Теперь вы сможете подключить источник данных ClickHouse к проекту Looker. - - ## 3. Известные ограничения {#3-known-limitations} 1. Следующие типы данных по умолчанию обрабатываются как строки: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md index 0b499f86847..fa48e486cd5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md @@ -23,25 +23,18 @@ import looker_studio_enable_mysql from '@site/static/images/integrations/data-vi import looker_studio_mysql_cloud from '@site/static/images/integrations/data-visualization/looker_studio_mysql_cloud.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker Studio {#looker-studio} Looker Studio может подключаться к ClickHouse через интерфейс MySQL, используя официальный источник данных Google для MySQL. - - ## Настройка ClickHouse Cloud {#clickhouse-cloud-setup} - - ## Настройка локального сервера ClickHouse {#on-premise-clickhouse-server-setup} - - ## Подключение Looker Studio к ClickHouse {#connecting-looker-studio-to-clickhouse} Сначала войдите на сайт https://lookerstudio.google.com под своей учетной записью Google и создайте новый источник данных (Data Source): @@ -77,8 +70,6 @@ Looker Studio может подключаться к ClickHouse через ин Теперь вы можете приступать к анализу своих данных или созданию нового отчета! - - ## Использование Looker Studio с ClickHouse Cloud {#using-looker-studio-with-clickhouse-cloud} При использовании ClickHouse Cloud сначала необходимо включить интерфейс MySQL. Это можно сделать в диалоговом окне подключения, на вкладке «MySQL». diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md index 5eb2fecd36a..d8106475806 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md @@ -24,15 +24,12 @@ import metabase_07 from '@site/static/images/integrations/data-visualization/met import metabase_08 from '@site/static/images/integrations/data-visualization/metabase_08.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Подключение Metabase к ClickHouse {#connecting-metabase-to-clickhouse} Metabase — это простой в использовании UI-инструмент с открытым исходным кодом для формирования запросов к вашим данным. Metabase — это Java-приложение, которое можно запустить, просто скачав JAR‑файл и выполнив его командой `java -jar metabase.jar`. Metabase подключается к ClickHouse с помощью JDBC‑драйвера, который вы скачиваете и помещаете в папку `plugins`: - - ## Цель {#goal} В этом руководстве вы зададите ряд вопросов к данным в ClickHouse с помощью Metabase и визуализируете полученные ответы. Один из результатов будет выглядеть так: @@ -44,13 +41,9 @@ Metabase — это простой в использовании UI-инстру Если у вас нет набора данных для работы, вы можете добавить один из примеров. В этом руководстве используется набор данных [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md), поэтому вы можете выбрать его. В той же категории документации есть и несколько других вариантов. ::: - - ## 1. Соберите параметры подключения {#1-gather-your-connection-details} - - ## 2. Загрузка плагина ClickHouse для Metabase {#2--download-the-clickhouse-plugin-for-metabase} 1. Если у вас нет папки `plugins`, создайте её как подпапку в том же каталоге, где сохранён файл `metabase.jar`. @@ -63,8 +56,6 @@ Metabase — это простой в использовании UI-инстру 5. Откройте Metabase по адресу http://hostname:3000. При первом запуске вы увидите приветственный экран и вам нужно будет ответить на ряд вопросов. Если будет предложено выбрать базу данных, выберите "**I'll add my data later**": - - ## 3. Подключение Metabase к ClickHouse {#3--connect-metabase-to-clickhouse} 1. Нажмите на значок шестерёнки в правом верхнем углу и выберите **Admin Settings**, чтобы открыть административную страницу Metabase. @@ -83,8 +74,6 @@ Metabase — это простой в использовании UI-инстру 6. Нажмите кнопку **Save**, и Metabase просканирует вашу базу данных и обнаружит таблицы. - - ## 4. Выполните SQL-запрос {#4-run-a-sql-query} 1. Выйдите из **Admin settings**, нажав кнопку **Exit admin** в правом верхнем углу. @@ -97,8 +86,6 @@ Metabase — это простой в использовании UI-инстру - - ## 5. Задайте вопрос {#5-ask-a-question} 1. Нажмите **+ New** и выберите **Question**. Обратите внимание, что вы можете создать вопрос, начав с базы данных и таблицы. Например, следующий запрос выполняется к таблице `uk_price_paid` в базе данных `default`. Вот простой запрос, который вычисляет среднюю цену по городам в графстве Большой Манчестер: @@ -113,8 +100,6 @@ Metabase — это простой в использовании UI-инстру - - ## Подробнее {#learn-more} Вы можете найти дополнительную информацию о Metabase и о том, как создавать дашборды, в документации Metabase. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md index 92ab4ca38f5..b18500322f6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md @@ -13,21 +13,16 @@ import omni_01 from '@site/static/images/integrations/data-visualization/omni_01 import omni_02 from '@site/static/images/integrations/data-visualization/omni_02.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Omni {#omni} Omni может подключаться к ClickHouse Cloud или к локальному развертыванию ClickHouse через официальный источник данных ClickHouse. - - ## 1. Соберите данные для подключения {#1-gather-your-connection-details} - - ## 2. Создайте источник данных ClickHouse {#2-create-a-clickhouse-data-source} Перейдите в Admin -> Connections и нажмите кнопку «Add Connection» в правом верхнем углу. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md index 14d8826ca45..7c3736f8066 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md @@ -29,7 +29,6 @@ import powerbi_dsn_credentials from '@site/static/images/integrations/data-visua import powerbi_16 from '@site/static/images/integrations/data-visualization/powerbi_16.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Power BI {#power-bi} @@ -50,8 +49,6 @@ Microsoft Power BI может выполнять запросы к данным * [Выполнение запросов к данным из ClickHouse для визуализации в Power BI Desktop](#query-and-visualise-data) * [Настройка локального шлюза данных для Power BI Service](#power-bi-service) - - ## Предварительные требования {#prerequisites} ### Установка Power BI {#power-bi-installation} @@ -69,8 +66,6 @@ Microsoft Power BI может выполнять запросы к данным * Password — пароль пользователя * Database — имя базы данных на экземпляре, к которому вы хотите подключиться - - ## Power BI Desktop {#power-bi-desktop} Чтобы начать выполнять запросы к данным в Power BI Desktop, выполните следующие шаги: @@ -158,16 +153,12 @@ Microsoft Power BI может выполнять запросы к данным После завершения импорта данные ClickHouse будут доступны в Power BI как обычно.
- - ## Сервис Power BI {#power-bi-service} Чтобы использовать Microsoft Power BI Service, необходимо создать [локальный шлюз данных](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-onprem). Подробную информацию по настройке пользовательских коннекторов см. в документации Microsoft о том, как [использовать пользовательские коннекторы данных с локальным шлюзом данных](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-custom-connectors). - - ## Драйвер ODBC (только импорт) {#odbc-driver-import-only} Мы рекомендуем использовать ClickHouse Connector, который использует DirectQuery. @@ -237,8 +228,6 @@ Microsoft Power BI может выполнять запросы к данным После завершения импорта данные из ClickHouse будут доступны в Power BI как обычно. - - ## Известные ограничения {#known-limitations} ### UInt64 {#uint64} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md index 0b2fb126fd6..2528b0ab816 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md @@ -21,15 +21,12 @@ import quicksight_06 from '@site/static/images/integrations/data-visualization/q import quicksight_07 from '@site/static/images/integrations/data-visualization/quicksight_07.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # QuickSight {#quicksight} QuickSight может подключаться к локальному развертыванию ClickHouse (23.11+) через интерфейс MySQL, используя официальный источник данных MySQL и режим Direct Query. - - ## Настройка локально развернутого сервера ClickHouse {#on-premise-clickhouse-server-setup} Обратитесь к [официальной документации](/interfaces/mysql) по настройке сервера ClickHouse с включённым интерфейсом MySQL. @@ -122,7 +119,6 @@ mysql> show databases; Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. ``` - ## Подключение QuickSight к ClickHouse {#connecting-quicksight-to-clickhouse} Для начала перейдите на [https://quicksight.aws.amazon.com](https://quicksight.aws.amazon.com), откройте раздел Datasets и нажмите "New dataset": @@ -163,8 +159,6 @@ Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. Теперь вы можете опубликовать набор данных и создать новую визуализацию! - - ## Известные ограничения {#known-limitations} - Импорт SPICE работает некорректно; вместо него используйте режим Direct Query. См. [#58553](https://github.com/ClickHouse/ClickHouse/issues/58553). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md index ddef16f1a98..f3ca768abe5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md @@ -21,7 +21,6 @@ import splunk_9 from '@site/static/images/integrations/splunk/splunk-9.png'; import splunk_10 from '@site/static/images/integrations/splunk/splunk-10.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Подключение Splunk к ClickHouse {#connecting-splunk-to-clickhouse} @@ -36,8 +35,6 @@ Splunk — популярная платформа для обеспечения Идеальный сценарий использования этой интеграции — когда вы применяете ClickHouse для больших объёмов данных, таких как NetFlow, двоичные данные Avro или Protobuf, DNS, журналы трафика VPC и другие журналы OTel, которыми можно делиться с вашей командой в Splunk для поиска и построения дашбордов. При таком подходе данные не проходят приём в индексный слой Splunk, а запрашиваются напрямую из ClickHouse, аналогично другим интеграциям для визуализации, таким как [Metabase](https://www.metabase.com/) или [Superset](https://superset.apache.org/). - - ## Цель​ {#goal} В этом руководстве мы будем использовать JDBC-драйвер ClickHouse для подключения ClickHouse к Splunk. Мы установим локальный экземпляр Splunk Enterprise, но не будем индексировать какие-либо данные. Вместо этого мы будем использовать функции поиска через движок запросов DB Connect. @@ -50,8 +47,6 @@ Splunk — популярная платформа для обеспечения В этом руководстве используется [набор данных такси города Нью-Йорк](/getting-started/example-datasets/nyc-taxi). В нашей [документации](http://localhost:3000/docs/getting-started/example-datasets) есть и многие другие наборы данных, которые вы можете использовать. ::: - - ## Предварительные требования {#prerequisites} Перед началом работы вам потребуется: @@ -61,8 +56,6 @@ Splunk — популярная платформа для обеспечения - Административный или SSH-доступ к экземпляру ОС с установленным Splunk Enterprise - Данные для подключения к ClickHouse (см. [здесь](/integrations/metabase#1-gather-your-connection-details), если вы используете ClickHouse Cloud) - - ## Установка и настройка DB Connect в Splunk Enterprise {#install-and-configure-db-connect-on-splunk-enterprise} Сначала необходимо установить Java Runtime Environment на инстанс Splunk Enterprise. Если вы используете Docker, можно выполнить команду `microdnf install java-11-openjdk`. @@ -81,8 +74,6 @@ Splunk — популярная платформа для обеспечения - - ## Настройка JDBC для ClickHouse {#configure-jdbc-for-clickhouse} Скачайте [драйвер ClickHouse JDBC](https://github.com/ClickHouse/clickhouse-java) в папку DB Connect Drivers, например: @@ -111,7 +102,6 @@ ui_default_catalog = $database$ - ## Подключение поиска Splunk к ClickHouse {#connect-splunk-search-to-clickhouse} Перейдите в DB Connect App Configuration -> Databases -> Identities и создайте Identity для вашего ClickHouse. @@ -132,8 +122,6 @@ ui_default_catalog = $database$ Если вы получили ошибку, убедитесь, что добавили IP-адрес вашего экземпляра Splunk в список ClickHouse Cloud IP Access List. Для получения дополнительной информации смотрите [документацию](/cloud/security/setting-ip-filters). ::: - - ## Выполнение SQL-запроса {#run-a-sql-query} Теперь мы выполним SQL-запрос, чтобы убедиться, что всё работает корректно. @@ -148,8 +136,6 @@ ui_default_catalog = $database$ Если запрос выполнен успешно, вы должны увидеть результат. - - ## Создайте дашборд {#create-a-dashboard} Давайте создадим дашборд, который использует сочетание SQL и мощного Splunk Processing Language (SPL). @@ -194,7 +180,6 @@ ORDER BY year, count(*) DESC; " connection="chc" - ## Данные временных рядов {#time-series-data} В Splunk есть сотни встроенных функций, которые дашборды могут использовать для визуализации и представления данных временных рядов. В этом примере будут объединены SQL и SPL для создания запроса, который может работать с данными временных рядов в Splunk. @@ -209,7 +194,6 @@ FROM "demo"."conn" WHERE time >= now() - interval 1 HOURS" connection="chc" | sort - duration: ``` - ## Дополнительные материалы {#learn-more} Если вы хотите получить больше информации о Splunk DB Connect и создании дашбордов, перейдите к [документации Splunk](https://docs.splunk.com/Documentation). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md index b5da49a34c8..1188855280c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md @@ -28,15 +28,12 @@ import superset_11 from '@site/static/images/integrations/data-visualization/sup import superset_12 from '@site/static/images/integrations/data-visualization/superset_12.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Подключение Superset к ClickHouse {#connect-superset-to-clickhouse} Apache Superset — это платформа с открытым исходным кодом для исследования и визуализации данных, написанная на Python. Superset подключается к ClickHouse с помощью драйвера Python, предоставленного ClickHouse. Давайте посмотрим, как это работает... - - ## Цель {#goal} В этом руководстве вы создадите дашборд в Superset на основе данных из базы данных ClickHouse. Дашборд будет выглядеть следующим образом: @@ -48,13 +45,9 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; Если у вас нет набора данных для работы, вы можете добавить один из примеров. В этом руководстве используется набор данных [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md), поэтому вы можете выбрать именно его. В той же категории документации есть и несколько других наборов данных. ::: - - ## 1. Соберите параметры подключения {#1-gather-your-connection-details} - - ## 2. Установка драйвера {#2-install-the-driver} 1. Superset использует драйвер `clickhouse-connect` для подключения к ClickHouse. Подробную информацию о `clickhouse-connect` можно найти по адресу https://pypi.org/project/clickhouse-connect/, а установить его можно с помощью следующей команды: @@ -65,8 +58,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 2. Запустите (или перезапустите) Superset. - - ## 3. Подключение Superset к ClickHouse {#3-connect-superset-to-clickhouse} 1. В Superset выберите **Data** в верхнем меню, затем **Databases** в раскрывающемся меню. Добавьте новую базу данных, нажав кнопку **+ Database**: @@ -89,8 +80,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 4. Нажмите кнопки **CONNECT**, а затем **FINISH**, чтобы завершить мастер настройки. После этого вы увидите свою базу данных в списке баз данных. - - ## 4. Добавьте набор данных {#4-add-a-dataset} 1. Чтобы работать с данными ClickHouse в Superset, необходимо определить **_dataset_** (набор данных). В верхнем меню Superset выберите **Data**, затем **Datasets** в раскрывающемся меню. @@ -102,8 +91,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 3. Нажмите кнопку **ADD** в нижней части диалогового окна, и ваша таблица появится в списке наборов данных. Теперь вы готовы создавать дашборды и анализировать данные в ClickHouse! - - ## 5. Создание диаграмм и дашборда в Superset {#5--creating-charts-and-a-dashboard-in-superset} Если вы уже знакомы с Superset, этот раздел покажется вам вполне привычным. Если вы новичок в Superset, то... он похож на многие другие современные инструменты визуализации данных: чтобы начать, много времени не нужно, а детали и нюансы приходят с опытом по мере работы с инструментом. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md index d1de2c6fe86..20a5f4dc4b8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md @@ -29,7 +29,6 @@ import tableau_workbook6 from '@site/static/images/integrations/data-visualizati import tableau_workbook7 from '@site/static/images/integrations/data-visualization/tableau_workbook7.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Подключение Tableau к ClickHouse {#connecting-tableau-to-clickhouse} @@ -43,8 +42,6 @@ ClickHouse предоставляет официальный коннектор - - ## Предварительная настройка перед использованием {#setup-required-prior-usage} 1. Соберите сведения о подключении @@ -64,8 +61,6 @@ ClickHouse предоставляет официальный коннектор - Windows: `C:\Program Files\Tableau\Drivers` 5. Настройте источник данных ClickHouse в Tableau и приступайте к созданию визуализаций данных! - - ## Настройка источника данных ClickHouse в Tableau {#configure-a-clickhouse-data-source-in-tableau} Теперь, когда драйвер `clickhouse-jdbc` установлен и настроен, рассмотрим, как настроить источник @@ -127,8 +122,6 @@ ClickHouse предоставляет официальный коннектор Теперь вы готовы создавать визуализации в Tableau! - - ## Создание визуализаций в Tableau {#building-visualizations-in-tableau} Теперь, когда у нас настроен источник данных ClickHouse в Tableau, давайте визуализируем данные… @@ -187,8 +180,6 @@ ClickHouse предоставляет официальный коннектор Отличная работа! Вы успешно подключили Tableau к ClickHouse и открыли для себя целый мир возможностей для анализа и визуализации ваших данных в ClickHouse. - - ## Установка коннектора вручную {#install-the-connector-manually} Если вы используете устаревшую версию Tableau Desktop, которая не включает коннектор по умолчанию, вы можете установить его вручную, выполнив следующие шаги: @@ -199,19 +190,13 @@ ClickHouse предоставляет официальный коннектор * Windows: `C:\Users\[Windows User]\Documents\My Tableau Repository\Connectors` 3. Перезапустите Tableau Desktop. Если установка прошла успешно, коннектор появится в разделе `New Data Source`. - - ## Советы по подключению и анализу {#connection-and-analysis-tips} Для получения дополнительных рекомендаций по оптимизации интеграции Tableau с ClickHouse см. разделы [Советы по подключению](/integrations/tableau/connection-tips) и [Советы по анализу](/integrations/tableau/analysis-tips). - - ## Тесты {#tests} Коннектор тестируется с использованием [фреймворка TDVT](https://tableau.github.io/connector-plugin-sdk/docs/tdvt) и в настоящее время имеет уровень покрытия тестами 97%. - - ## Краткое описание {#summary} Вы можете подключить Tableau к ClickHouse, используя универсальный драйвер ODBC/JDBC для ClickHouse. Однако этот коннектор упрощает процесс настройки подключения. Если у вас возникнут какие-либо проблемы с коннектором, сообщите о них diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md index 0b41951d53b..f1997e48f56 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md @@ -11,13 +11,10 @@ doc_type: 'guide' import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Рекомендации по подключению {#connection-tips} - - ## Вкладка Initial SQL {#initial-sql-tab} Если на вкладке Advanced (по умолчанию) установлен флажок *Set Session ID*, вы можете задать [настройки](/operations/settings/settings/) на уровне сеанса с помощью @@ -26,7 +23,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; SET my_setting=значение; ``` - ## Вкладка Advanced {#advanced-tab} В 99% случаев вам не понадобится вкладка Advanced, для оставшегося 1% вы можете использовать следующие настройки: @@ -38,8 +34,6 @@ SET my_setting=значение; ``` Подробнее о сопоставлении типов данных читайте в соответствующем разделе. - - * **Параметры URL JDBC-драйвера**. В этом поле вы можете передать остальные [параметры драйвера](https://github.com/ClickHouse/clickhouse-jdbc#configuration), например `jdbcCompliance`. Учтите, что значения параметров должны передаваться в формате URL-encoded, и при передаче `custom_http_params` или `typeMappings` и в этом поле, и в предыдущих полях вкладки Advanced значения двух предшествующих полей на вкладке Advanced имеют более высокий приоритет. * Флажок **Set Session ID**. Нужен для задания параметров на уровне сеанса во вкладке Initial SQL, генерирует `session_id` с меткой времени и псевдослучайным числом в формате `"tableau-jdbc-connector-*{timestamp}*-*{number}*"`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md index ad772e90a8d..a9903775a5d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md @@ -21,23 +21,16 @@ import tableau_desktop_03 from '@site/static/images/integrations/data-visualizat import tableau_desktop_04 from '@site/static/images/integrations/data-visualization/tableau_desktop_04.png'; import tableau_desktop_05 from '@site/static/images/integrations/data-visualization/tableau_desktop_05.png'; - # Tableau Online {#tableau-online} Tableau Online может подключаться к ClickHouse Cloud или локальному развертыванию ClickHouse через интерфейс MySQL, используя официальный источник данных MySQL. - - ## Настройка ClickHouse Cloud {#clickhouse-cloud-setup} - - ## Настройка сервера ClickHouse в локальной инфраструктуре {#on-premise-clickhouse-server-setup} - - ## Подключение Tableau Online к ClickHouse (on-premise, без SSL) {#connecting-tableau-online-to-clickhouse-on-premise-without-ssl} Войдите в свою учетную запись Tableau Cloud и добавьте новый источник данных (Published Data Source). @@ -64,8 +57,6 @@ Tableau Online проанализирует базу данных и предо NB: если вы хотите использовать Tableau Online в сочетании с Tableau Desktop и делиться наборами данных ClickHouse между ними, убедитесь, что вы используете Tableau Desktop с тем же стандартным коннектором MySQL, следуя руководству по настройке, которое отображается [здесь](https://www.tableau.com/support/drivers), если выбрать MySQL в выпадающем списке Data Source. Если у вас Mac на процессоре M1, ознакомьтесь с [этой темой по устранению неполадок](https://community.tableau.com/s/question/0D58b0000Ar6OhvCQE/unable-to-install-mysql-driver-for-m1-mac) для обходного варианта установки драйвера. - - ## Подключение Tableau Online к ClickHouse (облачное или локальное развертывание с SSL) {#connecting-tableau-online-to-clickhouse-cloud-or-on-premise-setup-with-ssl} Так как невозможно указать SSL-сертификаты через мастер настройки подключения MySQL в Tableau Online, @@ -108,8 +99,6 @@ SSL-сертификат ClickHouse Cloud подписан центром сер Наконец, нажмите "Publish", и ваш источник данных со встроенными учетными данными будет автоматически открыт в Tableau Online. - - ## Известные ограничения (ClickHouse 23.11) {#known-limitations-clickhouse-2311} Все известные ограничения были устранены в ClickHouse `23.11`. Если вы столкнётесь с другими проблемами совместимости, пожалуйста, [свяжитесь с нами](https://clickhouse.com/company/contact) или создайте [новый issue](https://github.com/ClickHouse/ClickHouse/issues). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md index bac488c4ccc..b226aa73113 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md @@ -55,7 +55,6 @@ Install-Package ClickHouse.Driver *** - ## Быстрый старт {#quick-start} ```csharp @@ -83,7 +82,6 @@ using (var connection = new ClickHouseConnection("Host=my.clickhouse")) *** - ## Использование {#usage} ### Параметры строки подключения {#connection-string} @@ -154,7 +152,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### Вставка данных {#inserting-data} Вставляйте данные с использованием параметризованных запросов: @@ -178,7 +175,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### Массовая вставка {#bulk-insert} Для использования `ClickHouseBulkCopy` необходимы: @@ -221,7 +217,6 @@ Console.WriteLine($"Записано строк: {bulkCopy.RowsWritten}"); *** - ### Выполнение запросов SELECT {#performing-select-queries} Выполните запросы SELECT и обработайте результаты: @@ -249,7 +244,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### Необработанный стриминг {#raw-streaming} ```csharp @@ -263,7 +257,6 @@ var json = reader.ReadToEnd(); *** - ### Поддержка вложенных столбцов {#nested-columns} Вложенные типы ClickHouse (`Nested(...)`) можно читать и записывать с использованием семантики массивов. @@ -289,7 +282,6 @@ await bulkCopy.WriteToServerAsync(new[] { row1, row2 }); *** - ### Столбцы типа AggregateFunction {#aggregatefunction-columns} Столбцы типа `AggregateFunction(...)` нельзя напрямую использовать в запросах или при вставке данных. @@ -308,7 +300,6 @@ SELECT uniqMerge(c) FROM t; *** - ### Параметры SQL {#sql-parameters} При передаче параметров в запрос следует использовать форматирование параметров ClickHouse в следующем формате: @@ -339,7 +330,6 @@ INSERT INTO table VALUES ({val1:Int32}, {val2:Array(UInt8)}) *** - ## Поддерживаемые типы данных {#supported-data-types} `ClickHouse.Driver` поддерживает следующие типы данных ClickHouse с их соответствующими сопоставлениями с типами .NET: @@ -455,7 +445,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### Использование appsettings.json {#logging-appsettings-config} Вы можете настроить уровни логирования с помощью стандартной системы конфигурации .NET: @@ -486,7 +475,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### Использование конфигурации в оперативной памяти {#logging-inmemory-config} Вы также можете настроить детализацию логирования по категориям прямо в коде: @@ -523,7 +511,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - ### Категории и источники {#logging-categories} Драйвер использует отдельные категории, чтобы вы могли точно настраивать уровни логирования для каждого компонента: @@ -558,7 +545,6 @@ await connection.OpenAsync(); * события открытия и закрытия подключений * отслеживание идентификаторов сессий - ### Режим отладки: трассировка сети и диагностика {#logging-debugmode} Чтобы упростить диагностику сетевых проблем, библиотека драйвера предоставляет вспомогательный инструмент, позволяющий включить низкоуровневую трассировку внутренних сетевых механизмов .NET. Чтобы включить её, необходимо передать `LoggerFactory` с уровнем `Trace` и установить `EnableDebugMode` в значение `true` (или включить её вручную через класс `ClickHouse.Driver.Diagnostic.TraceHelper`). Предупреждение: это приведёт к генерации чрезвычайно подробных логов и повлияет на производительность. Не рекомендуется включать режим отладки в боевой (production) среде. @@ -580,7 +566,6 @@ var settings = new ClickHouseClientSettings() *** - ### Поддержка ORM и Dapper {#orm-support} `ClickHouse.Driver` поддерживает Dapper (с некоторыми ограничениями). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md index e8429e6a4c0..5028804e813 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md @@ -13,7 +13,6 @@ integration: import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; - # ClickHouse Go {#clickhouse-go} ## Простой пример {#a-simple-example} @@ -32,7 +31,6 @@ cd clickhouse-golang-example go mod init clickhouse-golang-example ``` - ### Скопируйте пример кода {#copy-in-some-sample-code} Скопируйте этот код в каталог `clickhouse-golang-example` под именем `main.go`. @@ -113,14 +111,12 @@ func connect() (driver.Conn, error) { } ``` - ### Выполните go mod tidy {#run-go-mod-tidy} ```bash go mod tidy ``` - ### Укажите параметры подключения {#set-your-connection-details} Ранее вы уже получили свои параметры подключения. Укажите их в `main.go` в функции `connect()`: @@ -141,7 +137,6 @@ func connect() (driver.Conn, error) { }, ``` - ### Запуск примера {#run-the-example} ```bash @@ -156,7 +151,6 @@ go run . 2023/03/06 14:18:33 name: hourly_data, uuid: a4e36bd4-1e82-45b3-be77-74a0fe65c52b ``` - ### Подробнее {#learn-more} Остальная документация в этой категории описывает подробности работы клиента Go для ClickHouse. @@ -248,7 +242,6 @@ go run main.go ``` - ### Управление версиями и совместимость {#versioning--compatibility} Клиент выпускается независимо от ClickHouse. Линейка 2.x представляет текущую основную мажорную версию в разработке. Все версии 2.x должны быть совместимы друг с другом. @@ -297,7 +290,6 @@ fmt.Println(v) **Во всех последующих примерах, если не указано явно, предполагается, что переменная `conn` для подключения к ClickHouse уже создана и доступна.** - #### Параметры подключения {#connection-settings} При открытии подключения можно использовать структуру `Options` для управления поведением клиента. Доступны следующие параметры: @@ -355,7 +347,6 @@ if err != nil { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/connect_settings.go) - #### Пул подключений {#connection-pooling} Клиент поддерживает пул подключений и при необходимости повторно использует их для выполнения запросов. В каждый момент времени будет использовано не более `MaxOpenConns`, а максимальный размер пула контролируется параметром `MaxIdleConns`. Для каждого выполнения запроса клиент получает подключение из пула и по завершении возвращает его обратно для повторного использования. Подключение используется на протяжении всего жизненного цикла батча и освобождается при вызове `Send()`. @@ -433,7 +424,6 @@ v, err := conn.ServerVersion() Если необходимы дополнительные параметры TLS, в коде приложения следует задать нужные поля в структуре `tls.Config`. Это может включать указание конкретных наборов шифров, принудительное использование определённой версии TLS (например, 1.2 или 1.3), добавление внутренней цепочки сертификатов CA, добавление клиентского сертификата (и закрытого ключа), если этого требует сервер ClickHouse, а также большинство других опций, применяемых в более сложных конфигурациях безопасности. - ### Аутентификация {#authentication} Укажите структуру Auth в настройках подключения, чтобы задать имя пользователя и пароль. @@ -456,7 +446,6 @@ v, err := conn.ServerVersion() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/auth.go) - ### Подключение к нескольким узлам {#connecting-to-multiple-nodes} Несколько адресов можно указать с помощью структуры `Addr`. @@ -510,7 +499,6 @@ if err != nil { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/1c0d81d0b1388dbb9e09209e535667df212f4ae4/examples/clickhouse_api/multi_host.go#L50-L67) - ### Выполнение {#execution} Произвольные операторы можно выполнять с помощью метода `Exec`. Это полезно для DDL и простых операторов. Не следует использовать его для больших вставок или итераций запросов. @@ -533,7 +521,6 @@ conn.Exec(context.Background(), "INSERT INTO example VALUES (1, 'test-1')") Обратите внимание на возможность передавать `Context` при выполнении запроса. Это позволяет задавать отдельные настройки на уровне запроса — см. [Использование Context](#using-context). - ### Пакетная вставка {#batch-insert} Чтобы вставить большое количество строк, клиент поддерживает пакетную вставку (batch). Для этого необходимо подготовить батч, к которому можно добавлять строки. В конце он отправляется методом `Send()`. Батчи хранятся в памяти до вызова `Send`. @@ -626,7 +613,6 @@ return batch.Send() Для полного перечня поддерживаемых типов Go для каждого типа столбца см. раздел [Преобразование типов](#type-conversions). - ### Запрос строк {#querying-rows} Пользователи могут либо выполнить запрос одной строки с помощью метода `QueryRow`, либо получить курсор для итерации по набору результатов с помощью `Query`. В то время как первый метод принимает переменную, в которую будут десериализованы данные, второй требует вызова `Scan` для каждой строки. @@ -677,7 +663,6 @@ return rows.Err() Наконец, обратите внимание на возможность передавать `Context` методам `Query` и `QueryRow`. Это может использоваться для настроек на уровне запроса — подробности см. в разделе [Using Context](#using-context). - ### Асинхронная вставка {#async-insert} Асинхронные вставки поддерживаются методом Async. Это позволяет пользователю указать, должен ли клиент ждать завершения операции вставки на сервере или может ответить сразу после получения данных. Таким образом фактически управляется параметр [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert). @@ -717,7 +702,6 @@ for i := 0; i < 100; i++ { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/async.go) - ### Колоночная вставка {#columnar-insert} Данные можно вставлять в колоночном формате. Это может дать выигрыш в производительности, если данные уже имеют такую структуру, поскольку нет необходимости преобразовывать их в строки. @@ -759,7 +743,6 @@ return batch.Send() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/columnar_insert.go) - ### Использование структур {#using-structs} Для пользователей структуры Go представляют логическую модель строки данных в ClickHouse. Для этого нативный интерфейс предоставляет ряд удобных функций. @@ -786,7 +769,6 @@ for _, v := range result { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/select_struct.go) - #### Scan struct {#scan-struct} `ScanStruct` позволяет считывать одну строку результата запроса в структуру. @@ -803,7 +785,6 @@ if err := conn.QueryRow(context.Background(), "SELECT Col1, COUNT() AS count FRO [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/scan_struct.go) - #### Добавление структуры {#append-struct} `AppendStruct` позволяет добавить структуру к существующему [batch](#batch-insert) и интерпретировать её как полноценную строку таблицы. Для этого требуется, чтобы столбцы структуры совпадали по именам и типам со столбцами таблицы. Хотя для всех столбцов таблицы должно существовать эквивалентное поле структуры, некоторые поля структуры могут не иметь эквивалентного представления в виде столбца. Такие поля будут просто игнорироваться. @@ -831,7 +812,6 @@ for i := 0; i < 1_000; i++ { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/append_struct.go) - ### Преобразование типов {#type-conversions} Клиент стремится быть максимально гибким в отношении принимаемых типов данных как для вставки, так и для маршалинга ответов. В большинстве случаев для типа столбца ClickHouse существует эквивалентный тип Golang, например, [UInt64](/sql-reference/data-types/int-uint/) — [uint64](https://pkg.go.dev/builtin#uint64). Эти логические соответствия должны поддерживаться всегда. Пользователи могут захотеть использовать типы данных, которые могут быть вставлены в столбцы или использованы для получения ответа, при условии, что предварительно будет выполнено преобразование либо переменной, либо полученных данных. Клиент нацелен на прозрачную поддержку таких преобразований, чтобы пользователям не нужно было заранее точно приводить данные к нужным типам перед вставкой, а также чтобы обеспечить гибкий маршалинг во время выполнения запроса. Такое прозрачное преобразование не допускает потери точности. Например, `uint32` не может использоваться для получения данных из столбца `UInt64`. В свою очередь, строку можно вставить в поле `datetime64`, если она соответствует требованиям формата. @@ -900,7 +880,6 @@ rows.Close() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/array.go) - #### Map {#map} Отображения (map) следует задавать как карты Go (map), в которых ключи и значения соответствуют правилам преобразования типов, определённым [выше](#type-conversions). @@ -946,7 +925,6 @@ rows.Close() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/map.go) - #### Кортежи {#tuples} Кортежи представляют собой набор столбцов произвольной длины. Столбцы могут быть либо явно именованы, либо задаваться только типом (без имени), например: @@ -1008,7 +986,6 @@ fmt.Printf("row: col1=%v, col2=%v, col3=%v\n", col1, col2, col3) Примечание: типизированные срезы и отображения поддерживаются при условии, что все подколонки в именованном `Tuple` имеют один и тот же тип. - #### Nested {#nested} Поле типа Nested эквивалентно массиву именованных кортежей (Array of named Tuples). Использование зависит от того, установил ли пользователь параметр [flatten_nested](/operations/settings/settings#flatten_nested) в значение 1 или 0. @@ -1117,7 +1094,6 @@ rows.Close() Если для `flatten_nested` используется значение по умолчанию — 1, вложенные столбцы разворачиваются в отдельные массивы. Для вставки и выборки при этом требуются вложенные срезы. Хотя произвольная глубина вложенности может работать, это официально не поддерживается. - ```go conn, err := GetNativeConnection(nil, nil, nil) if err != nil { @@ -1187,7 +1163,6 @@ if err := batch.Send(); err != nil { Благодаря более простому интерфейсу и официальной поддержке вложенности мы рекомендуем использовать `flatten_nested=0`. - #### Гео-типы {#geo-types} Клиент поддерживает гео-типы Point, Ring, Polygon и Multi Polygon. Эти поля в Go представлены типами из пакета [github.com/paulmach/orb](https://github.com/paulmach/orb). @@ -1271,7 +1246,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&point, &ring, &polygo [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/geo.go) - #### UUID {#uuid} Тип UUID поддерживается пакетом [github.com/google/uuid](https://github.com/google/uuid). UUID также можно передавать и сериализовать как строку или как любой тип, реализующий `sql.Scanner` или `Stringify`. @@ -1317,7 +1291,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2); err != [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/uuid.go) - #### Decimal {#decimal} Тип Decimal поддерживается пакетом [github.com/shopspring/decimal](https://github.com/shopspring/decimal). @@ -1371,7 +1344,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v\n", col1, col2, col3, co [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/decimal.go) - #### Nullable {#nullable} Значение `Nil` в Go соответствует `NULL` в ClickHouse. Его можно использовать, если поле объявлено как `Nullable`. При вставке `Nil` может передаваться как в обычный, так и в `Nullable`-столбец. В первом случае будет сохранено значение типа по умолчанию, например пустая строка для `string`. Для `Nullable`-версии в ClickHouse будет сохранено значение `NULL`. @@ -1426,7 +1398,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, & Клиент также поддерживает типы `sql.Null*`, например `sql.NullInt64`. Они совместимы с соответствующими типами ClickHouse. - #### Большие целые числа — Int128, Int256, UInt128, UInt256 {#big-ints---int128-int256-uint128-uint256} Числовые типы размером более 64 бит представлены с использованием встроенного в Go пакета [big](https://pkg.go.dev/math/big). @@ -1497,7 +1468,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v, col6=%v, col7=%v\n", co [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/big_int.go) - ### Сжатие {#compression} Поддержка методов сжатия зависит от используемого базового протокола. Для нативного протокола клиент поддерживает сжатие `LZ4` и `ZSTD`. Оно выполняется только на уровне блоков. Сжатие можно включить, добавив параметр конфигурации `Compression` к подключению. @@ -1547,7 +1517,6 @@ if err := batch.Send(); err != nil { Дополнительные способы сжатия доступны при использовании стандартного интерфейса по HTTP. Подробности см. в разделе [database/sql API - Compression](#compression). - ### Привязка параметров {#parameter-binding} Клиент поддерживает привязку параметров для методов `Exec`, `Query` и `QueryRow`. Как показано в примере ниже, это работает с использованием именованных, нумерованных и позиционных параметров. Ниже приведены примеры каждого из этих вариантов. @@ -1576,7 +1545,6 @@ fmt.Printf("Количество при именованном связыван [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind.go) - #### Особые случаи {#special-cases} По умолчанию срезы будут разворачиваться в список значений, разделённых запятыми, если они передаются как параметр запроса. Если нужно, чтобы набор значений был подставлен в квадратных скобках `[ ]`, следует использовать `ArraySet`. @@ -1616,7 +1584,6 @@ fmt.Printf("Количество с NamedDate: %d\n", count) [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind_special.go) - ### Использование контекста {#using-context} Контексты в Go предоставляют механизм передачи дедлайнов, сигналов отмены и других значений, связанных с запросом, через границы API. Все методы соединения принимают `context` в качестве первого аргумента. В то время как в предыдущих примерах использовался `context.Background()`, пользователи могут использовать эту возможность для передачи настроек, дедлайнов и для отмены запросов. @@ -1717,7 +1684,6 @@ for i := 1; i <= 6; i++ { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/context.go) - ### Информация о ходе выполнения, профиле и логах {#progressprofilelog-information} Информацию о ходе выполнения (Progress), профиле (Profile) и логах (Log) можно запрашивать при выполнении запросов. Информация о ходе выполнения содержит статистику по количеству строк и байт, которые были прочитаны и обработаны в ClickHouse. Напротив, информация профиля предоставляет сводку данных, возвращённых клиенту, включая суммарный объём байт (в несжатом виде), строк и блоков. Наконец, информация из логов предоставляет статистику по потокам, например, использование памяти и скорость обработки данных. @@ -1749,7 +1715,6 @@ rows.Close() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/progress.go) - ### Динамическое сканирование {#dynamic-scanning} Пользователям может потребоваться читать таблицы, для которых им заранее неизвестна схема или тип полей, возвращаемых запросом. Это типично в случаях, когда выполняется разовая (ad‑hoc) аналитика данных или разрабатываются универсальные инструменты. Для этого информация о типах столбцов доступна в ответах на запросы. Её можно использовать совместно с механизмом рефлексии (reflection) в Go для создания во время выполнения экземпляров переменных корректных типов, которые затем можно передавать в Scan. @@ -1788,7 +1753,6 @@ for rows.Next() { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/dynamic_scan_types.go) - ### Внешние таблицы {#external-tables} [Внешние таблицы](/engines/table-engines/special/external-data/) позволяют клиенту отправлять данные в ClickHouse в рамках запроса SELECT. Эти данные помещаются во временную таблицу и могут использоваться в самом запросе для вычислений. @@ -1855,7 +1819,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [Полный пример кода](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/external_data.go) - ### OpenTelemetry {#open-telemetry} ClickHouse позволяет передавать [контекст трассировки](/operations/opentelemetry/) в составе нативного протокола. Клиент позволяет создать Span с помощью функции `clickhouse.withSpan` и передать его через Context для этого. @@ -1878,7 +1841,6 @@ fmt.Printf("count: %d\n", count) Подробное описание использования трассировки см. в разделе [поддержка OpenTelemetry](/operations/opentelemetry/). - ## Database/SQL API {#databasesql-api} Интерфейс `database/sql` или «стандартный» API позволяет использовать клиент в сценариях, когда прикладной код должен быть агностичным к используемым базам данных, опираясь на стандартный интерфейс. Это имеет свою цену — дополнительные уровни абстракции и перенаправления вызовов, а также примитивы, которые не обязательно хорошо соответствуют ClickHouse. Однако эти издержки, как правило, приемлемы в сценариях, когда инструментам необходимо подключаться к нескольким базам данных. @@ -1927,7 +1889,6 @@ func ConnectDSN() error { **Во всех последующих примерах, если явно не указано иное, предполагается, что соединение с ClickHouse в переменной `conn` уже установлено и доступно.** - #### Настройки подключения {#connection-settings-1} В строку DSN можно передать следующие параметры: @@ -1966,7 +1927,6 @@ func ConnectSettings() error { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_settings.go) - #### Пул подключений {#connection-pooling-1} Пользователи могут влиять на использование предоставленного списка адресов узлов, как описано в разделе [Подключение к нескольким узлам](#connecting-to-multiple-nodes). Однако управление подключениями и пулом подключений по задумке делегируются `sql.DB`. @@ -2008,7 +1968,6 @@ func ConnectDSNHTTP() error { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_http.go) - #### Подключение к нескольким узлам {#connecting-to-multiple-nodes-1} Если вы используете `OpenDB`, можно подключаться к нескольким хостам, используя тот же подход к настройке опций, что и для ClickHouse API, при необходимости указывая `ConnOpenStrategy`. @@ -2056,7 +2015,6 @@ func MultiStdHostDSN() error { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/multi_host.go) - ### Использование TLS {#using-tls-1} Если используется строка подключения DSN, SSL можно включить с помощью параметра `secure=true`. Метод `OpenDB` использует тот же подход, что и [нативный API TLS](#using-tls), полагаясь на указание структуры TLS, отличной от nil. Хотя строка подключения DSN поддерживает параметр `skip_verify` для пропуска проверки SSL, для более сложных конфигураций TLS необходим метод `OpenDB`, так как он позволяет передавать собственную конфигурацию. @@ -2110,7 +2068,6 @@ func ConnectDSNSSL() error { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/ssl.go) - ### Аутентификация {#authentication-1} При использовании `OpenDB` информацию для аутентификации можно передать через стандартные опции. Для подключений на основе DSN имя пользователя и пароль могут быть переданы в строке подключения — либо как параметры, либо как учетные данные, закодированные в адресе. @@ -2151,7 +2108,6 @@ func ConnectDSNAuth() error { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/auth.go) - ### Выполнение {#execution-1} После установления соединения пользователи могут выполнять SQL-операторы с помощью метода `Exec`. @@ -2174,7 +2130,6 @@ _, err = conn.Exec("INSERT INTO example VALUES (1, 'test-1')") Этот метод не поддерживает передачу контекста — по умолчанию он выполняется с фоновым контекстом. При необходимости используйте `ExecContext` — см. раздел [Использование контекста](#using-context). - ### Пакетная вставка {#batch-insert-1} Семантику пакетной вставки можно реализовать, создав `sql.Tx` с помощью метода `Begin`. После этого можно подготовить пакет, вызвав метод `Prepare` с оператором `INSERT`. Он вернёт объект `sql.Stmt`, в который можно добавлять строки методом `Exec`. Пакет будет накапливаться в памяти до тех пор, пока для исходного `sql.Tx` не будет выполнен `Commit`. @@ -2209,7 +2164,6 @@ return scope.Commit() [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/batch.go) - ### Запрос строк/строки {#querying-rows-1} Запрос одной строки можно выполнить с помощью метода `QueryRow`. Он возвращает `*sql.Row`, для которого можно вызвать `Scan` с указателями на переменные, в которые должны быть считаны значения столбцов. Вариант `QueryRowContext` позволяет передать контекст, отличный от фонового контекста — см. [Использование контекста](#using-context). @@ -2256,7 +2210,6 @@ for rows.Next() { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/query_rows.go) - ### Асинхронная вставка {#async-insert-1} Асинхронные вставки можно выполнять через метод `ExecContext`. Ему следует передать контекст с включённым асинхронным режимом, как показано ниже. Это позволяет пользователю указать, должен ли клиент ждать завершения вставки на сервере или вернуть ответ сразу после получения данных. Тем самым фактически управляется параметр [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert). @@ -2288,7 +2241,6 @@ ctx := clickhouse.Context(context.Background(), clickhouse.WithStdAsync(false)) [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/async.go) - ### Колонночная вставка {#columnar-insert-1} Не поддерживается при использовании стандартного интерфейса. @@ -2352,7 +2304,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v", col1, col2, col3, col4 Поведение операции вставки такое же, как у API ClickHouse. - ### Сжатие {#compression-1} Стандартный API поддерживает те же алгоритмы сжатия, что и нативный [ClickHouse API](#compression), т.е. сжатие `lz4` и `zstd` на уровне блоков. Дополнительно для HTTP‑соединений поддерживаются gzip, deflate и br. Если любой из них включён, сжатие выполняется для блоков при вставке и в ответах на запросы. Остальные запросы, например ping или запросы на выполнение, останутся несжатыми. Это соответствует опциям `lz4` и `zstd`. @@ -2390,7 +2341,6 @@ conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&passwo * `br` — от `0` (лучшая скорость) до `11` (лучшее сжатие) * `zstd`, `lz4` — игнорируется - ### Привязка параметров {#parameter-binding-1} Стандартный API поддерживает те же возможности привязки параметров, что и [ClickHouse API](#parameter-binding), позволяя передавать параметры в методы `Exec`, `Query` и `QueryRow` (и их эквивалентные варианты с [Context](#using-context)). Поддерживаются позиционные, именованные и нумерованные параметры. @@ -2421,7 +2371,6 @@ fmt.Printf("Количество при именованном связыван Имейте в виду, что [особые случаи](#special-cases) по-прежнему актуальны. - ### Использование контекста {#using-context-1} Стандартный API поддерживает такую же возможность передавать дедлайны, сигналы отмены и другие значения, относящиеся к запросу, через контекст, как и [ClickHouse API](#using-context). В отличие от ClickHouse API, это реализовано за счет использования вариантов методов с `Context`, то есть методы, такие как `Exec`, которые по умолчанию используют фоновый контекст, имеют вариант `ExecContext`, которому контекст может быть передан в качестве первого параметра. Это позволяет передавать контекст на любом этапе выполнения приложения. Например, пользователи могут передавать контекст при установлении соединения через `ConnContext` или при запросе строки результата запроса через `QueryRowContext`. Примеры всех доступных методов приведены ниже. @@ -2509,7 +2458,6 @@ for rows.Next() { [Полный пример кода](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/context.go) - ### Сессии {#sessions} Если в нативных соединениях сессия присутствует изначально, то при соединениях по HTTP пользователю нужно явно задать идентификатор сессии, передавая его в настройках контекста. Это позволяет использовать такие функции, как временные таблицы, которые привязаны к сессии. @@ -2571,7 +2519,6 @@ for rows.Next() { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/session.go) - ### Динамическое сканирование {#dynamic-scanning-1} Аналогично [ClickHouse API](#dynamic-scanning), доступна информация о типах столбцов, что позволяет пользователям создавать в рантайме экземпляры переменных с корректными типами, которые можно передавать в `Scan`. Это позволяет читать столбцы, тип которых заранее неизвестен. @@ -2611,7 +2558,6 @@ for rows.Next() { [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/dynamic_scan_types.go) - ### Внешние таблицы {#external-tables-1} [Внешние таблицы](/engines/table-engines/special/external-data/) позволяют клиенту отправлять данные в ClickHouse с помощью запроса `SELECT`. Эти данные помещаются во временную таблицу и могут использоваться в самом запросе для обработки. @@ -2678,7 +2624,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/external_data.go) - ### OpenTelemetry {#open-telemetry-1} ClickHouse позволяет передавать [контекст трассировки](/operations/opentelemetry/) как часть нативного протокола. Клиент позволяет создать Span с помощью функции `clickhouse.withSpan` и передать его через Context для этого. При использовании HTTP в качестве транспорта это не поддерживается. @@ -2699,7 +2644,6 @@ fmt.Printf("count: %d\n", count) [Полный пример](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/open_telemetry.go) - ## Рекомендации по производительности {#performance-tips} * По возможности используйте ClickHouse API, особенно для примитивных типов. Это позволяет избежать значительных накладных расходов на рефлексию и дополнительные уровни косвенных обращений. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx index 42187db4c2d..a3c4980715d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx @@ -127,50 +127,50 @@ Client client = new Client.Builder() | `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | * `timeout` - тайм-аут в определённой единице времени.
- `unit` - единица времени для значения `timeout` | Устанавливает тайм-аут поддержания активности HTTP-соединения (keep-alive). Этот параметр можно использовать для отключения Keep-Alive, установив тайм-аут равным нулю — `0`

По умолчанию: -
Enum: `ClientConfigProperties.HTTP_KEEP_ALIVE_TIMEOUT`
Ключ: `http_keep_alive_timeout` | | `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | - `strategy` - константа перечисления (enum) `com.clickhouse.client.api.ConnectionReuseStrategy` | Определяет стратегию, которую должен использовать пул соединений: `LIFO` — если соединение должно переиспользоваться сразу после возврата в пул, или `FIFO` — чтобы использовать соединения в порядке их появления (возвращённые соединения не используются немедленно).

По умолчанию: `FIFO`
Enum: `ClientConfigProperties.CONNECTION_REUSE_STRATEGY`
Ключ: `connection_reuse_strategy` | | `setSocketTimeout(long timeout, ChronoUnit unit`)` | *`timeout`- таймаут в заданной единице измерения времени.
-`unit`- единица измерения времени для`timeout` | Задает тайм-аут сокета для операций чтения и записи

По умолчанию:`0`
Enum:`ClientConfigProperties.SOCKET_OPERATION_TIMEOUT`
Key:`socket_timeout` | - |`setSocketRcvbuf(long size)` | -`size`— размер в байтах | Задает размер приемного буфера TCP-сокета. Этот буфер размещается вне памяти JVM.

Default:`8196`
Enum:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
Key:`socket_rcvbuf` | - |`setSocketSndbuf(long size)` | *`size`- размер в байтах | Устанавливает буфер приёма TCP-сокета. Этот буфер располагается вне памяти JVM.

По умолчанию:`8196`
Enum:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
Ключ:`socket_sndbuf` | - |`setSocketKeepAlive(boolean value)` | -`value`- флаг, указывающий, нужно ли включить параметр. | Устанавливает опцию`SO_KEEPALIVE`для каждого TCP-сокета, создаваемого клиентом. TCP Keep Alive включает механизм, который проверяет активность соединения и помогает обнаруживать соединения, разорванные внезапно.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
Key:`socket_keepalive` | - |`setSocketTcpNodelay(boolean value)` | *`value`— флаг, указывающий, нужно ли включать эту опцию. | Устанавливает опцию`SO_NODELAY`для каждого TCP-сокета, создаваемого клиентом. Эта TCP-опция заставляет сокет передавать данные как можно скорее.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
Key:`socket_tcp_nodelay` | - |`setSocketLinger(int secondsToWait)` | -`secondsToWait`- количество секунд. | Задаёт время ожидания при закрытии (linger) для каждого TCP-сокета, создаваемого клиентом.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_LINGER_OPT`
Key:`socket_linger` | - |`compressServerResponse(boolean enabled)` | *`enabled`- флаг, определяющий, нужно ли включать эту опцию | Определяет, должен ли сервер сжимать свои ответы.

По умолчанию:`true`
Enum:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
Ключ:`compress` | - |`compressClientRequest(boolean enabled)` | -`enabled`— флаг, указывающий, включена ли опция | Определяет, должен ли клиент сжимать свои запросы.

По умолчанию:`false`
Enum:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
Ключ:`decompress` | - |`useHttpCompression(boolean enabled)` | *`enabled`- флаг, определяющий, должен ли быть включён параметр | Определяет, следует ли использовать HTTP-сжатие для обмена данными между клиентом и сервером, если включены соответствующие опции | - |`appCompressedData(boolean enabled)` | -`enabled`- флаг, указывающий, должна ли опция быть включена | Сообщает клиенту, что сжатие данных будет выполняться приложением.

По умолчанию:`false`
Enum:`ClientConfigProperties.APP_COMPRESSED_DATA`
Ключ:`app_compressed_data` | - |`setLZ4UncompressedBufferSize(int size)` | *`size`— размер в байтах | Устанавливает размер буфера, который будет принимать несжатую часть потока данных. Если размер буфера занижен — будет создан новый буфер, а в логах появится соответствующее предупреждение.

По умолчанию:`65536`
Enum:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
Key:`compression.lz4.uncompressed_buffer_size` | - |`disableNativeCompression` | -`disable`- флаг, определяющий, нужно ли отключить опцию | Отключить нативное сжатие. Если установлено в true, нативное сжатие будет отключено.

По умолчанию:`false`
Enum:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
Ключ:`disable_native_compression` | - |`setDefaultDatabase(String database)` | *`database`- название базы данных | Задает базу данных по умолчанию.

По умолчанию:`default`
Enum:`ClientConfigProperties.DATABASE`
Ключ:`database` | - |`addProxy(ProxyType type, String host, int port)` | -`type`- тип прокси-сервера.
-`host`- имя хоста или IP-адрес прокси-сервера.
-`port`- порт прокси-сервера | Задает прокси-сервер, используемый для связи с сервером. Настройка прокси обязательна, если для работы через него требуется аутентификация.

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_TYPE`
Key:`proxy_type`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_HOST`
Key:`proxy_host`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_PORT`
Key:`proxy_port` | - |`setProxyCredentials(String user, String pass)` | *`user`- имя пользователя для прокси.
-`pass`- пароль | Задает учетные данные пользователя для аутентификации на прокси-сервере.

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_USER`
Ключ:`proxy_user`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_PASSWORD`
Ключ:`proxy_password` | - |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- тайм-аут, заданный в некоторой единице времени.
-`timeUnit`- единица времени для параметра`timeout` | Устанавливает максимальное время выполнения запросов

Значение по умолчанию:`0`
Enum:`ClientConfigProperties.MAX_EXECUTION_TIME`
Ключ:`max_execution_time` | - |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- флаг, определяющий, должна ли быть включена опция | Определяет, нужно ли запоминать HTTP‑cookie и отправлять их обратно на сервер. | - |`setSSLTrustStore(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Определяет, должен ли клиент использовать SSL truststore для проверки хоста сервера.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_TRUST_STORE`
Key:`trust_store` | - |`setSSLTrustStorePassword(String password)` |`password`— секрет | Устанавливает пароль, который будет использоваться для доступа к SSL truststore, указанному с помощью`setSSLTrustStore(String path)`

По умолчанию: -
Перечисление:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
Ключ:`key_store_password` | - |`setSSLTrustStoreType(String type)` |`type`— имя типа хранилища доверенных сертификатов | Задаёт тип хранилища доверенных сертификатов, указанного в`setSSLTrustStore(String path)`.

По умолчанию: -
Enum: `ClientConfigProperties.SSL_KEYSTORE_TYPE`
Ключ:`key_store_type` | - |`setRootCertificate(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Определяет, должен ли клиент использовать указанный корневой (CA) сертификат для проверки подлинности узла сервера.

По умолчанию: -
Enum:`ClientConfigProperties.CA_CERTIFICATE`
Key:`sslrootcert` | - |`setClientCertificate(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Устанавливает путь к клиентскому сертификату, который будет использоваться при установлении SSL-соединения и для SSL-аутентификации.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_CERTIFICATE`
Key:`sslcert` | - |`setClientKey(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Задаёт закрытый ключ клиента, используемый для шифрования SSL-соединения с сервером.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_KEY`
Ключ:`ssl_key` | - |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- флаг, определяющий, следует ли включать эту опцию | Определяет, должен ли клиент использовать временную зону сервера при декодировании значений столбцов DateTime и Date. Если параметр включён, временная зона сервера должна быть установлена с помощью`setServerTimeZone(String timeZone)`

По умолчанию:`true`
Enum:`ClientConfigProperties.USE_SERVER_TIMEZONE`
Ключ:`use_server_time_zone` | - |`useTimeZone(String timeZone)` |`timeZone`- строковое значение допустимого идентификатора часового пояса Java (см.`java.time.ZoneId`) | Определяет, следует ли использовать указанную временную зону при декодировании значений столбцов DateTime и Date. Переопределяет временную зону сервера.

По умолчанию: -
Enum: `ClientConfigProperties.USE_TIMEZONE`
Key:`use_time_zone` | - |`setServerTimeZone(String timeZone)` |`timeZone`— строковое значение допустимого идентификатора часового пояса в Java (см.`java.time.ZoneId`) | Устанавливает часовой пояс сервера. По умолчанию используется часовой пояс UTC.

По умолчанию: `UTC`
Enum:`ClientConfigProperties.SERVER_TIMEZONE`
Ключ:`server_time_zone` | - |`useAsyncRequests(boolean async)` |`async`- флаг, указывающий, нужно ли включить эту опцию. | Определяет, должен ли клиент выполнять запрос в отдельном потоке. По умолчанию параметр отключён, так как приложение обычно лучше понимает, как организовывать многопоточные задачи, а запуск задач в отдельном потоке не улучшает производительность.

По умолчанию:`false`
Enum:`ClientConfigProperties.ASYNC_OPERATIONS`
Key:`async` | - |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService`— экземпляр службы исполнителей. | Задает сервис выполнения задач операции.

По умолчанию:`none`
Enum:`none`
Ключ:`none` | - |`setClientNetworkBufferSize(int size)` | *`size`— размер в байтах | Задает размер буфера в пространстве памяти приложения, который используется для копирования данных между сокетом и приложением в обоих направлениях. Увеличение этого размера уменьшает количество системных вызовов к TCP-стеку, но увеличивает объем памяти, расходуемой на каждое соединение. Этот буфер также подлежит сборке мусора, поскольку соединения кратковременные. Обратите также внимание, что выделение большого непрерывного блока памяти может быть проблемой.

По умолчанию:`300000`
Enum:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
Key:`client_network_buffer_size`| - |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`- константа перечисления`enum` `com.clickhouse.client.api.ClientFaultCause` | Задаёт типы ошибок, при которых выполняется повторная попытка.

По умолчанию:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
Enum:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
Ключ:`client_retry_on_failures` | - |`setMaxRetries(int maxRetries)` | *`maxRetries`- число повторных попыток | Устанавливает максимальное количество повторных попыток для сбоев, определённых в`retryOnFailures(ClientFaultCause ...causes)`

По умолчанию:`3`
Enum:`ClientConfigProperties.RETRY_ON_FAILURE`
Ключ:`retry` | - |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`— флаг, указывающий, должна ли быть включена опция | Большинство наборов данных содержат числовые данные, закодированные в виде небольших последовательностей байт. По умолчанию ридер выделяет необходимый буфер, считывает в него данные, а затем преобразует их в целевой класс Number. Это может создавать значительную нагрузку на сборщик мусора (GC), поскольку создаётся и освобождается большое количество мелких объектов. Если эта опция включена, ридер будет использовать заранее выделенные буферы для преобразования чисел. Это безопасно, поскольку у каждого ридера есть собственный набор буферов, и каждый экземпляр ридера используется только одним потоком. | - |`httpHeader(String key, String value)` | *`key`- ключ заголовка HTTP.
-`value`- строковое значение этого заголовка. | Устанавливает значение для одного HTTP-заголовка. Предыдущее значение перезаписывается.

По умолчанию:`none`
Допустимые значения (enum):`none`
Ключ:`none` | - |`httpHeader(String key, Collection values)` | -`key`- ключ заголовка HTTP.
-`values`- список строковых значений. | Задает значение одного HTTP-заголовка. Предыдущее значение переопределяется.

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | - |`httpHeaders(Map headers)` | *`header`— карта (map) HTTP-заголовков и их значений. | Задает несколько значений HTTP-заголовков одновременно.

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | - |`serverSetting(String name, String value)` | -`name`- имя настройки на уровне запроса.
-`value`- строковое значение настройки. | Задает, какие настройки передавать серверу вместе с каждым запросом. Настройки отдельных операций могут их переопределять. [Список настроек](/operations/settings/query-level)

Default:`none`
Enum:`none`
Key:`none` | - |`serverSetting(String name, Collection values)` | *`name`- имя настройки уровня запроса.
-`values`- строковые значения этой настройки. | Определяет, какие настройки передавать серверу вместе с каждым запросом. Отдельные настройки операций могут их переопределять. См. [список настроек](/operations/settings/query-level). Этот метод полезен, когда нужно задать настройки с несколькими значениями, например [roles](/interfaces/http#setting-role-with-query-parameters).

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | - |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`— реализация стратегии сопоставления столбцов с полями | Задает пользовательскую стратегию, используемую для сопоставления полей класса DTO и столбцов БД при регистрации DTO.

По умолчанию:`none`
Enum:`none`
Ключ:`none` | - |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth`- флаг, указывающий, нужно ли включить эту опцию | Задаёт, следует ли использовать базовую HTTP-аутентификацию для проверки имени пользователя и пароля. По умолчанию включено. Использование этого типа аутентификации устраняет проблемы с паролями, содержащими специальные символы, которые не могут быть корректно переданы в HTTP-заголовках.

По умолчанию:`true`
Enum:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
Key:`http_use_basic_auth` | - |`setClientName(String clientName)` | -`clientName`- строка с именем приложения | Задает дополнительную информацию о клиентском приложении. Эта строка будет передана серверу как имя клиента. В случае использования протокола HTTP она будет передана как заголовок`User-Agent`.

Default: -
Enum: `ClientConfigProperties.CLIENT_NAME`
Key:`client_name` | - |`useBearerTokenAuth(String bearerToken)` | *`bearerToken`— токен типа Bearer в закодированном виде | Указывает, следует ли использовать Bearer-аутентификацию и какой токен применять. Токен будет отправлен без изменений, поэтому его следует закодировать перед передачей этому методу.

По умолчанию: -
Enum:`ClientConfigProperties.BEARERTOKEN_AUTH`
Ключ:`bearer_token` | - |`registerClientMetrics(Object registry, String name)` | -`registry`- экземпляр реестра Micrometer
-`name`- имя группы метрик | Регистрирует сенсоры в экземпляре реестра Micrometer ([https://micrometer.io/](https://micrometer.io/)). | - |`setServerVersion(String version)` | *`version`- строковое значение версии сервера | Задает версию сервера, чтобы исключить ее определение автоматически.

По умолчанию: -
Перечисление:`ClientConfigProperties.SERVER_VERSION`
Ключ:`server_version` | - |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping`- карта подсказок типов | Задает сопоставление подсказок типов для типов ClickHouse. Например, чтобы многомерные массивы представлялись в виде контейнеров Java вместо отдельных объектов Array.

По умолчанию: -
Enum:`ClientConfigProperties.TYPE_HINT_MAPPING`
Key:`type_hint_mapping` | - |`sslSocketSNI(String sni)` | *`sni`- значение имени сервера в виде строки | Устанавливает имя сервера, используемое для SNI (Server Name Indication) в SSL/TLS-соединении.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_SOCKET_SNI`
Key:`ssl_socket_sni` | + |`setSocketRcvbuf(long size)` | -`size`— размер в байтах | Задает размер приемного буфера TCP-сокета. Этот буфер размещается вне памяти JVM.

Default:`8196`
Enum:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
Key:`socket_rcvbuf` | + |`setSocketSndbuf(long size)` | *`size`- размер в байтах | Устанавливает буфер приёма TCP-сокета. Этот буфер располагается вне памяти JVM.

По умолчанию:`8196`
Enum:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
Ключ:`socket_sndbuf` | + |`setSocketKeepAlive(boolean value)` | -`value`- флаг, указывающий, нужно ли включить параметр. | Устанавливает опцию`SO_KEEPALIVE`для каждого TCP-сокета, создаваемого клиентом. TCP Keep Alive включает механизм, который проверяет активность соединения и помогает обнаруживать соединения, разорванные внезапно.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
Key:`socket_keepalive` | + |`setSocketTcpNodelay(boolean value)` | *`value`— флаг, указывающий, нужно ли включать эту опцию. | Устанавливает опцию`SO_NODELAY`для каждого TCP-сокета, создаваемого клиентом. Эта TCP-опция заставляет сокет передавать данные как можно скорее.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
Key:`socket_tcp_nodelay` | + |`setSocketLinger(int secondsToWait)` | -`secondsToWait`- количество секунд. | Задаёт время ожидания при закрытии (linger) для каждого TCP-сокета, создаваемого клиентом.

По умолчанию: -
Enum:`ClientConfigProperties.SOCKET_LINGER_OPT`
Key:`socket_linger` | + |`compressServerResponse(boolean enabled)` | *`enabled`- флаг, определяющий, нужно ли включать эту опцию | Определяет, должен ли сервер сжимать свои ответы.

По умолчанию:`true`
Enum:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
Ключ:`compress` | + |`compressClientRequest(boolean enabled)` | -`enabled`— флаг, указывающий, включена ли опция | Определяет, должен ли клиент сжимать свои запросы.

По умолчанию:`false`
Enum:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
Ключ:`decompress` | + |`useHttpCompression(boolean enabled)` | *`enabled`- флаг, определяющий, должен ли быть включён параметр | Определяет, следует ли использовать HTTP-сжатие для обмена данными между клиентом и сервером, если включены соответствующие опции | + |`appCompressedData(boolean enabled)` | -`enabled`- флаг, указывающий, должна ли опция быть включена | Сообщает клиенту, что сжатие данных будет выполняться приложением.

По умолчанию:`false`
Enum:`ClientConfigProperties.APP_COMPRESSED_DATA`
Ключ:`app_compressed_data` | + |`setLZ4UncompressedBufferSize(int size)` | *`size`— размер в байтах | Устанавливает размер буфера, который будет принимать несжатую часть потока данных. Если размер буфера занижен — будет создан новый буфер, а в логах появится соответствующее предупреждение.

По умолчанию:`65536`
Enum:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
Key:`compression.lz4.uncompressed_buffer_size` | + |`disableNativeCompression` | -`disable`- флаг, определяющий, нужно ли отключить опцию | Отключить нативное сжатие. Если установлено в true, нативное сжатие будет отключено.

По умолчанию:`false`
Enum:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
Ключ:`disable_native_compression` | + |`setDefaultDatabase(String database)` | *`database`- название базы данных | Задает базу данных по умолчанию.

По умолчанию:`default`
Enum:`ClientConfigProperties.DATABASE`
Ключ:`database` | + |`addProxy(ProxyType type, String host, int port)` | -`type`- тип прокси-сервера.
-`host`- имя хоста или IP-адрес прокси-сервера.
-`port`- порт прокси-сервера | Задает прокси-сервер, используемый для связи с сервером. Настройка прокси обязательна, если для работы через него требуется аутентификация.

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_TYPE`
Key:`proxy_type`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_HOST`
Key:`proxy_host`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_PORT`
Key:`proxy_port` | + |`setProxyCredentials(String user, String pass)` | *`user`- имя пользователя для прокси.
-`pass`- пароль | Задает учетные данные пользователя для аутентификации на прокси-сервере.

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_USER`
Ключ:`proxy_user`

По умолчанию: -
Enum:`ClientConfigProperties.PROXY_PASSWORD`
Ключ:`proxy_password` | + |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- тайм-аут, заданный в некоторой единице времени.
-`timeUnit`- единица времени для параметра`timeout` | Устанавливает максимальное время выполнения запросов

Значение по умолчанию:`0`
Enum:`ClientConfigProperties.MAX_EXECUTION_TIME`
Ключ:`max_execution_time` | + |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- флаг, определяющий, должна ли быть включена опция | Определяет, нужно ли запоминать HTTP‑cookie и отправлять их обратно на сервер. | + |`setSSLTrustStore(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Определяет, должен ли клиент использовать SSL truststore для проверки хоста сервера.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_TRUST_STORE`
Key:`trust_store` | + |`setSSLTrustStorePassword(String password)` |`password`— секрет | Устанавливает пароль, который будет использоваться для доступа к SSL truststore, указанному с помощью`setSSLTrustStore(String path)`

По умолчанию: -
Перечисление:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
Ключ:`key_store_password` | + |`setSSLTrustStoreType(String type)` |`type`— имя типа хранилища доверенных сертификатов | Задаёт тип хранилища доверенных сертификатов, указанного в`setSSLTrustStore(String path)`.

По умолчанию: -
Enum: `ClientConfigProperties.SSL_KEYSTORE_TYPE`
Ключ:`key_store_type` | + |`setRootCertificate(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Определяет, должен ли клиент использовать указанный корневой (CA) сертификат для проверки подлинности узла сервера.

По умолчанию: -
Enum:`ClientConfigProperties.CA_CERTIFICATE`
Key:`sslrootcert` | + |`setClientCertificate(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Устанавливает путь к клиентскому сертификату, который будет использоваться при установлении SSL-соединения и для SSL-аутентификации.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_CERTIFICATE`
Key:`sslcert` | + |`setClientKey(String path)` |`path`— путь к файлу в локальной системе (на стороне клиента) | Задаёт закрытый ключ клиента, используемый для шифрования SSL-соединения с сервером.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_KEY`
Ключ:`ssl_key` | + |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- флаг, определяющий, следует ли включать эту опцию | Определяет, должен ли клиент использовать временную зону сервера при декодировании значений столбцов DateTime и Date. Если параметр включён, временная зона сервера должна быть установлена с помощью`setServerTimeZone(String timeZone)`

По умолчанию:`true`
Enum:`ClientConfigProperties.USE_SERVER_TIMEZONE`
Ключ:`use_server_time_zone` | + |`useTimeZone(String timeZone)` |`timeZone`- строковое значение допустимого идентификатора часового пояса Java (см.`java.time.ZoneId`) | Определяет, следует ли использовать указанную временную зону при декодировании значений столбцов DateTime и Date. Переопределяет временную зону сервера.

По умолчанию: -
Enum: `ClientConfigProperties.USE_TIMEZONE`
Key:`use_time_zone` | + |`setServerTimeZone(String timeZone)` |`timeZone`— строковое значение допустимого идентификатора часового пояса в Java (см.`java.time.ZoneId`) | Устанавливает часовой пояс сервера. По умолчанию используется часовой пояс UTC.

По умолчанию: `UTC`
Enum:`ClientConfigProperties.SERVER_TIMEZONE`
Ключ:`server_time_zone` | + |`useAsyncRequests(boolean async)` |`async`- флаг, указывающий, нужно ли включить эту опцию. | Определяет, должен ли клиент выполнять запрос в отдельном потоке. По умолчанию параметр отключён, так как приложение обычно лучше понимает, как организовывать многопоточные задачи, а запуск задач в отдельном потоке не улучшает производительность.

По умолчанию:`false`
Enum:`ClientConfigProperties.ASYNC_OPERATIONS`
Key:`async` | + |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService`— экземпляр службы исполнителей. | Задает сервис выполнения задач операции.

По умолчанию:`none`
Enum:`none`
Ключ:`none` | + |`setClientNetworkBufferSize(int size)` | *`size`— размер в байтах | Задает размер буфера в пространстве памяти приложения, который используется для копирования данных между сокетом и приложением в обоих направлениях. Увеличение этого размера уменьшает количество системных вызовов к TCP-стеку, но увеличивает объем памяти, расходуемой на каждое соединение. Этот буфер также подлежит сборке мусора, поскольку соединения кратковременные. Обратите также внимание, что выделение большого непрерывного блока памяти может быть проблемой.

По умолчанию:`300000`
Enum:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
Key:`client_network_buffer_size`| + |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`- константа перечисления`enum` `com.clickhouse.client.api.ClientFaultCause` | Задаёт типы ошибок, при которых выполняется повторная попытка.

По умолчанию:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
Enum:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
Ключ:`client_retry_on_failures` | + |`setMaxRetries(int maxRetries)` | *`maxRetries`- число повторных попыток | Устанавливает максимальное количество повторных попыток для сбоев, определённых в`retryOnFailures(ClientFaultCause ...causes)`

По умолчанию:`3`
Enum:`ClientConfigProperties.RETRY_ON_FAILURE`
Ключ:`retry` | + |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`— флаг, указывающий, должна ли быть включена опция | Большинство наборов данных содержат числовые данные, закодированные в виде небольших последовательностей байт. По умолчанию ридер выделяет необходимый буфер, считывает в него данные, а затем преобразует их в целевой класс Number. Это может создавать значительную нагрузку на сборщик мусора (GC), поскольку создаётся и освобождается большое количество мелких объектов. Если эта опция включена, ридер будет использовать заранее выделенные буферы для преобразования чисел. Это безопасно, поскольку у каждого ридера есть собственный набор буферов, и каждый экземпляр ридера используется только одним потоком. | + |`httpHeader(String key, String value)` | *`key`- ключ заголовка HTTP.
-`value`- строковое значение этого заголовка. | Устанавливает значение для одного HTTP-заголовка. Предыдущее значение перезаписывается.

По умолчанию:`none`
Допустимые значения (enum):`none`
Ключ:`none` | + |`httpHeader(String key, Collection values)` | -`key`- ключ заголовка HTTP.
-`values`- список строковых значений. | Задает значение одного HTTP-заголовка. Предыдущее значение переопределяется.

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | + |`httpHeaders(Map headers)` | *`header`— карта (map) HTTP-заголовков и их значений. | Задает несколько значений HTTP-заголовков одновременно.

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | + |`serverSetting(String name, String value)` | -`name`- имя настройки на уровне запроса.
-`value`- строковое значение настройки. | Задает, какие настройки передавать серверу вместе с каждым запросом. Настройки отдельных операций могут их переопределять. [Список настроек](/operations/settings/query-level)

Default:`none`
Enum:`none`
Key:`none` | + |`serverSetting(String name, Collection values)` | *`name`- имя настройки уровня запроса.
-`values`- строковые значения этой настройки. | Определяет, какие настройки передавать серверу вместе с каждым запросом. Отдельные настройки операций могут их переопределять. См. [список настроек](/operations/settings/query-level). Этот метод полезен, когда нужно задать настройки с несколькими значениями, например [roles](/interfaces/http#setting-role-with-query-parameters).

По умолчанию:`none`
Перечисление:`none`
Ключ:`none` | + |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`— реализация стратегии сопоставления столбцов с полями | Задает пользовательскую стратегию, используемую для сопоставления полей класса DTO и столбцов БД при регистрации DTO.

По умолчанию:`none`
Enum:`none`
Ключ:`none` | + |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth`- флаг, указывающий, нужно ли включить эту опцию | Задаёт, следует ли использовать базовую HTTP-аутентификацию для проверки имени пользователя и пароля. По умолчанию включено. Использование этого типа аутентификации устраняет проблемы с паролями, содержащими специальные символы, которые не могут быть корректно переданы в HTTP-заголовках.

По умолчанию:`true`
Enum:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
Key:`http_use_basic_auth` | + |`setClientName(String clientName)` | -`clientName`- строка с именем приложения | Задает дополнительную информацию о клиентском приложении. Эта строка будет передана серверу как имя клиента. В случае использования протокола HTTP она будет передана как заголовок`User-Agent`.

Default: -
Enum: `ClientConfigProperties.CLIENT_NAME`
Key:`client_name` | + |`useBearerTokenAuth(String bearerToken)` | *`bearerToken`— токен типа Bearer в закодированном виде | Указывает, следует ли использовать Bearer-аутентификацию и какой токен применять. Токен будет отправлен без изменений, поэтому его следует закодировать перед передачей этому методу.

По умолчанию: -
Enum:`ClientConfigProperties.BEARERTOKEN_AUTH`
Ключ:`bearer_token` | + |`registerClientMetrics(Object registry, String name)` | -`registry`- экземпляр реестра Micrometer
-`name`- имя группы метрик | Регистрирует сенсоры в экземпляре реестра Micrometer ([https://micrometer.io/](https://micrometer.io/)). | + |`setServerVersion(String version)` | *`version`- строковое значение версии сервера | Задает версию сервера, чтобы исключить ее определение автоматически.

По умолчанию: -
Перечисление:`ClientConfigProperties.SERVER_VERSION`
Ключ:`server_version` | + |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping`- карта подсказок типов | Задает сопоставление подсказок типов для типов ClickHouse. Например, чтобы многомерные массивы представлялись в виде контейнеров Java вместо отдельных объектов Array.

По умолчанию: -
Enum:`ClientConfigProperties.TYPE_HINT_MAPPING`
Key:`type_hint_mapping` | + |`sslSocketSNI(String sni)` | *`sni`- значение имени сервера в виде строки | Устанавливает имя сервера, используемое для SNI (Server Name Indication) в SSL/TLS-соединении.

По умолчанию: -
Enum:`ClientConfigProperties.SSL_SOCKET_SNI`
Key:`ssl_socket_sni` | ### Настройки сервера \{#server-settings\} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md index 40115b8c7bf..ad03e695d48 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md @@ -10,7 +10,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # Обзор Java-клиентов {#java-clients-overview} - [Клиент 0.8+](./client/client.mdx) @@ -152,7 +151,6 @@ JDBC-драйвер наследует те же возможности, что ``` - #### Настройка логирования {#configuring-logging} Настройка будет зависеть от того, какой фреймворк для логирования вы используете. Например, если вы используете `Logback`, вы можете настроить логирование в файле `logback.xml`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md index 8af1e63b08c..e53f1980156 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md @@ -12,7 +12,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # Драйвер R2DBC {#r2dbc-driver} ## Драйвер R2DBC {#r2dbc-driver} @@ -42,7 +41,6 @@ import CodeBlock from '@theme/CodeBlock'; ``` - ### Подключение к ClickHouse {#connect-to-clickhouse} ```java showLineNumbers @@ -53,7 +51,6 @@ ConnectionFactory connectionFactory = ConnectionFactories .flatMapMany(connection -> connection ``` - ### Запрос {#query} ```java showLineNumbers @@ -71,7 +68,6 @@ connection .subscribe(); ``` - ### Вставка {#insert} ```java showLineNumbers diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md index b94d24600dd..e6dbe6d56f5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # ClickHouse JS {#clickhouse-js} Официальный JS‑клиент для подключения к ClickHouse. @@ -66,7 +65,6 @@ npm i @clickhouse/client npm i @clickhouse/client-web ``` - ## Совместимость с ClickHouse {#compatibility-with-clickhouse} | Версия клиента | ClickHouse | @@ -111,7 +109,6 @@ const client = createClient({ Экземпляр клиента можно [заранее настроить](./js.md#configuration) при создании. - #### Конфигурация {#configuration} При создании экземпляра клиента можно настроить следующие параметры подключения: @@ -190,7 +187,6 @@ createClient({ }) ``` - ### Подключение {#connecting} #### Соберите сведения о подключении {#gather-your-connection-details} @@ -218,7 +214,6 @@ const client = createClient({ Репозиторий клиента содержит множество примеров, которые используют переменные окружения, например [создание таблицы в ClickHouse Cloud](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_cloud.ts), [использование асинхронных вставок](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/async_insert.ts) и многие другие. - #### Пул соединений (только Node.js) {#connection-pool-nodejs-only} Чтобы избежать накладных расходов на установку соединения при каждом запросе, клиент создает пул соединений с ClickHouse для их повторного использования, используя механизм Keep-Alive. По умолчанию Keep-Alive включен, а размер пула соединений равен `10`, но вы можете изменить его с помощью параметра конфигурации `max_open_connections` [параметра конфигурации](./js.md#configuration). @@ -259,7 +254,6 @@ interface BaseQueryParams { } ``` - ### Метод query {#query-method} Используется для большинства запросов, которые могут вернуть ответ, таких как `SELECT`, а также для отправки DDL, таких как `CREATE TABLE`, и должен вызываться с `await`. Ожидается, что возвращённый результирующий набор данных будет использоваться в приложении. @@ -287,7 +281,6 @@ interface ClickHouseClient { Не указывайте клаузу FORMAT в `query`, вместо этого используйте параметр `format`. ::: - #### Абстракции набора результатов и строк {#result-set-and-row-abstractions} `ResultSet` предоставляет несколько вспомогательных методов для обработки данных в вашем приложении. @@ -372,7 +365,6 @@ await new Promise((resolve, reject) => { **Пример:** (только Node.js) Потоковая выборка результата запроса в формате `CSV` с использованием классического подхода `on('data')`. Это эквивалентно использованию синтаксиса `for await const`. [Исходный код](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_text_line_by_line.ts) - ```ts const resultSet = await client.query({ query: 'SELECT number FROM system.numbers_mt LIMIT 5', @@ -431,7 +423,6 @@ while (true) { } ``` - ### Метод INSERT {#insert-method} Это основной метод вставки данных. @@ -453,7 +444,6 @@ interface ClickHouseClient { Если оператор INSERT был отправлен на сервер, флаг `executed` будет иметь значение `true`. - #### Метод insert и потоковая передача данных в Node.js {#insert-method-and-streaming-in-nodejs} Он может работать как с `Stream.Readable`, так и с обычным `Array`, в зависимости от [формата данных](./js.md#supported-data-formats), указанного для метода `insert`. См. также раздел о [потоковой передаче файлов](./js.md#streaming-files-nodejs-only). @@ -556,7 +546,6 @@ await client.insert({ См. [исходный код](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_exclude_columns.ts) для получения дополнительных сведений. - **Пример**: Вставка в другую базу данных, а не ту, что указана в экземпляре клиента. [Исходный код](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_into_different_db.ts). ```ts @@ -567,7 +556,6 @@ await client.insert({ }) ``` - #### Ограничения веб-версии {#web-version-limitations} В настоящий момент операции вставки в `@clickhouse/client-web` работают только с форматами `Array` и `JSON*`. @@ -595,7 +583,6 @@ interface InsertParams extends BaseQueryParams { В будущем это может измениться. См. также: [Базовые параметры для всех клиентских методов](./js.md#base-parameters-for-all-client-methods). - ### Метод command {#command-method} Он может использоваться для операторов, которые не возвращают результат, когда предложение `FORMAT` неприменимо, или когда вам вообще не нужен ответ. Примером такого оператора может быть `CREATE TABLE` или `ALTER TABLE`. @@ -666,7 +653,6 @@ await client.command({ Отмена запроса с помощью `abort_signal` не гарантирует, что соответствующий оператор не был выполнен сервером. ::: - ### Метод exec {#exec-method} Если у вас есть произвольный запрос, который не вписывается в `query`/`insert`, @@ -707,7 +693,6 @@ export interface QueryResult { } ``` - ### Ping {#ping} Метод `ping`, предназначенный для проверки состояния подключения, возвращает `true`, если сервер доступен. @@ -765,7 +750,6 @@ const result = await client.ping({ select: true, /* query_id, abort_signal, http Метод ping может принимать большинство стандартных параметров метода `query` — см. определение типа `PingParamsWithSelectQuery`. - ### Close (только Node.js) {#close-nodejs-only} Закрывает все открытые соединения и освобождает ресурсы. Ничего не делает в веб-версии. @@ -774,7 +758,6 @@ const result = await client.ping({ select: true, /* query_id, abort_signal, http await client.close() ``` - ## Потоковая передача файлов (только Node.js) {#streaming-files-nodejs-only} В клиентском репозитории есть несколько примеров потоковой передачи файлов с популярными форматами данных (NDJSON, CSV, Parquet). @@ -902,7 +885,6 @@ await client.insert({ Однако, если вы используете столбцы с типом `DateTime` или `DateTime64`, вы можете использовать как строки, так и объекты JS Date. Объекты JS Date можно передавать в `insert` как есть, при значении параметра `date_time_input_format`, установленном в `best_effort`. Подробнее см. в этом [примере](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_js_dates.ts). - ### Особенности типов Decimal* {#decimal-types-caveats} Можно вставлять значения Decimal с помощью форматов семейства `JSON*`. Предположим, у нас есть таблица, определённая как: @@ -953,7 +935,6 @@ await client.query({ См. [этот пример](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_decimals.ts) для получения дополнительных сведений. - ### Целочисленные типы: Int64, Int128, Int256, UInt64, UInt128, UInt256 {#integral-types-int64-int128-int256-uint64-uint128-uint256} Хотя сервер может принимать это значение как число, в выходных форматах семейства `JSON*` оно возвращается как строка, чтобы избежать @@ -982,7 +963,6 @@ const resultSet = await client.query({ expect(await resultSet.json()).toEqual([ { number: 0 } ]) ``` - ## Настройки ClickHouse {#clickhouse-settings} Клиент может настраивать поведение ClickHouse с помощью механизма [настроек](/operations/settings/settings/). @@ -1010,7 +990,6 @@ client.query({ Убедитесь, что пользователь, от имени которого выполняются запросы, имеет достаточные права для изменения настроек. ::: - ## Продвинутые темы {#advanced-topics} ### Запросы с параметрами {#queries-with-parameters} @@ -1045,7 +1024,6 @@ await client.query({ Дополнительные сведения см. на странице [https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax](https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax). - ### Сжатие {#compression} Примечание: сжатие запросов в настоящее время недоступно в веб-версии. Сжатие ответов работает как обычно. Версия для Node.js поддерживает оба варианта. @@ -1066,7 +1044,6 @@ createClient({ * `response: true` указывает серверу ClickHouse отправлять сжатое тело ответа. Значение по умолчанию: `response: false` * `request: true` включает сжатие тела запроса, отправляемого клиентом. Значение по умолчанию: `request: false` - ### Логирование (только Node.js) {#logging-nodejs-only} :::important @@ -1124,7 +1101,6 @@ const client = createClient({ Реализацию Logger по умолчанию можно найти [здесь](https://github.com/ClickHouse/clickhouse-js/blob/main/packages/client-common/src/logger.ts). - ### Сертификаты TLS (только для Node.js) {#tls-certificates-nodejs-only} Клиент Node.js опционально поддерживает как односторонний (только центр сертификации, Certificate Authority), @@ -1160,7 +1136,6 @@ const client = createClient({ Полные примеры конфигурации TLS для режимов [basic](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/basic_tls.ts) и [mutual](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/mutual_tls.ts) см. в репозитории. - ### Конфигурация Keep-Alive (только для Node.js) {#keep-alive-configuration-nodejs-only} Клиент по умолчанию включает Keep-Alive во внутреннем HTTP-агенте. Это означает, что установленные сокеты будут повторно использоваться для последующих запросов, а заголовок `Connection: keep-alive` будет отправляться автоматически. Сокеты, простаивающие без активности, по умолчанию остаются в пуле соединений 2500 миллисекунд (см. [заметки по настройке этого параметра](./js.md#adjusting-idle_socket_ttl)). @@ -1192,7 +1167,6 @@ curl -v --data-binary "SELECT 1" В данном случае `keep_alive_timeout` равен 10 секундам, и вы можете попробовать увеличить `keep_alive.idle_socket_ttl` до 9000 или даже 9500 миллисекунд, чтобы неактивные сокеты оставались открытыми немного дольше, чем по умолчанию. Следите за возможными ошибками «Socket hang-up», которые будут указывать на то, что сервер закрывает соединения раньше клиента, и снижайте значение до тех пор, пока ошибки не исчезнут. - #### Поиск и устранение неисправностей {#troubleshooting} Если вы сталкиваетесь с ошибками `socket hang up`, даже используя последнюю версию клиента, есть следующие варианты решения этой проблемы: @@ -1253,7 +1227,6 @@ const client = createClient({ См. [пример](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/read_only_user.ts), где подробнее показаны ограничения пользователя с readonly=1. - ### Прокси с путем (pathname) {#proxy-with-a-pathname} Если ваш экземпляр ClickHouse находится за прокси и в его URL-адресе есть путь (pathname), как, например, [http://proxy:8123/clickhouse_server](http://proxy:8123/clickhouse_server), укажите `clickhouse_server` в качестве параметра конфигурации `pathname` (с начальным слешем или без него); иначе, если этот путь указан напрямую в `url`, он будет интерпретирован как параметр `database`. Поддерживается несколько сегментов, например `/my_proxy/db`. @@ -1265,7 +1238,6 @@ const client = createClient({ }) ``` - ### Реверс‑прокси с аутентификацией {#reverse-proxy-with-authentication} Если перед вашим развертыванием ClickHouse стоит реверс‑прокси с аутентификацией, вы можете использовать параметр `http_headers`, чтобы передавать необходимые заголовки: @@ -1278,7 +1250,6 @@ const client = createClient({ }) ``` - ### Пользовательский HTTP/HTTPS-агент (экспериментальная функция, только Node.js) {#custom-httphttps-agent-experimental-nodejs-only} :::warning @@ -1360,7 +1331,6 @@ const client = createClient({ При использовании сертификатов *и* пользовательского *HTTPS*-агента, скорее всего, потребуется отключить заголовок авторизации по умолчанию с помощью настройки `set_basic_auth_header` (добавлена в 1.2.0), так как он конфликтует с заголовками TLS. Все заголовки TLS должны задаваться вручную. - ## Известные ограничения (Node.js/web) {#known-limitations-nodejsweb} - Для результирующих наборов не предусмотрены мапперы данных, поэтому используются только примитивы языка. Планируется добавление некоторых мапперов типов данных с [поддержкой формата RowBinary](https://github.com/ClickHouse/clickhouse-js/issues/216). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md index bbb4b42fbf6..96303d38502 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md @@ -10,7 +10,6 @@ doc_type: 'guide' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Разработка на ClickHouse с Moose OLAP {#developing-on-clickhouse-with-moose-olap} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md index 98abd6b7020..b0f6600a489 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md @@ -45,7 +45,6 @@ common.get_setting('invalid_setting_action') | http_buffer_size | 10MB | | Размер (в байтах) "внутрипамятного" буфера, используемого для потоковых HTTP-запросов. | | preserve_pandas_datetime_resolution | False | True, False | Если True и используется pandas 2.x, сохраняет разрешение типов datetime64/timedelta64 (например, 's', 'ms', 'us', 'ns'). Если False (или при pandas <2.x), приводит к наносекундному разрешению ('ns') для совместимости. | - ## Сжатие {#compression} ClickHouse Connect поддерживает сжатие lz4, zstd, brotli и gzip как для результатов запросов, так и для вставок. Всегда учитывайте, что использование сжатия обычно связано с компромиссом между сетевой пропускной способностью/скоростью передачи и нагрузкой на CPU (как на клиенте, так и на сервере). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md index 681ed33f098..58e4cf31c93 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md @@ -31,7 +31,6 @@ assert qr[0][0] == 4 `InsertContext` содержит изменяемое состояние, которое обновляется в процессе вставки, поэтому не является потокобезопасным. - ### Форматы записи {#write-formats} Форматы записи в настоящее время реализованы только для ограниченного числа типов. В большинстве случаев ClickHouse Connect попытается автоматически определить корректный формат записи для столбца, проверяя тип первого значения данных, не равного `NULL`. Например, при вставке в столбец типа `DateTime`, если первое вставляемое значение столбца — целое число Python, ClickHouse Connect вставит это целочисленное значение напрямую, предполагая, что оно фактически представляет собой число секунд с начала эпохи Unix. @@ -97,7 +96,6 @@ df = pd.DataFrame({ client.insert_df("users", df) ``` - #### Вставка из таблицы PyArrow {#pyarrow-table-insert} ```python @@ -115,7 +113,6 @@ arrow_table = pa.table({ client.insert_arrow("users", arrow_table) ``` - #### Вставка DataFrame на основе Arrow (pandas 2.x) {#arrow-backed-dataframe-insert-pandas-2} ```python @@ -134,7 +131,6 @@ df = pd.DataFrame({ client.insert_df_arrow("users", df) ``` - ### Часовые пояса {#time-zones} При вставке объектов Python `datetime.datetime` в столбцы ClickHouse `DateTime` или `DateTime64` ClickHouse Connect автоматически обрабатывает информацию о часовом поясе. Поскольку ClickHouse хранит все значения `DateTime` как не зависящие от часового пояса Unix-метки времени (секунды или доли секунды с начала эпохи Unix), преобразование часовых поясов автоматически выполняется на стороне клиента при вставке. @@ -176,7 +172,6 @@ print(*results.result_rows, sep="\n") При использовании pytz необходимо вызывать метод `localize()`, чтобы добавить информацию о часовом поясе к наивному объекту datetime. Передача `tzinfo=` напрямую в конструктор datetime приведёт к использованию некорректных исторических смещений. Для UTC вариант `tzinfo=pytz.UTC` работает корректно. См. [документацию pytz](https://pythonhosted.org/pytz/#localized-times-and-date-arithmetic) для получения дополнительной информации. ::: - #### Объекты datetime без часового пояса {#timezone-naive-datetime-objects} Если вы вставляете Python-объект `datetime.datetime` без часового пояса (без `tzinfo`), метод `.timestamp()` будет интерпретировать его как время в локальном часовом поясе системы. Чтобы избежать неоднозначности, рекомендуется: @@ -202,7 +197,6 @@ epoch_timestamp = int(naive_time.replace(tzinfo=pytz.UTC).timestamp()) client.insert('events', [[epoch_timestamp]], column_names=['event_time']) ``` - #### Столбцы DateTime с метаданными часового пояса {#datetime-columns-with-timezone-metadata} Столбцы ClickHouse могут быть объявлены с метаданными часового пояса (например, `DateTime('America/Denver')` или `DateTime64(3, 'Asia/Tokyo')`). Эти метаданные не влияют на то, как данные хранятся (по‑прежнему как метки времени в формате UTC), но определяют часовой пояс, используемый при запросе данных из ClickHouse. @@ -232,7 +226,6 @@ print(*results.result_rows, sep="\n") # (datetime.datetime(2023, 6, 15, 7, 30, tzinfo=),) {#datetimedatetime2023-6-15-7-30-tzinfodsttzinfo-americalos_angeles-pdt-1-day-170000-dst} ``` - ## Вставка из файла {#file-inserts} Пакет `clickhouse_connect.driver.tools` включает метод `insert_file`, который позволяет вставлять данные напрямую из файловой системы в существующую таблицу ClickHouse. Разбор данных выполняется на стороне сервера ClickHouse. `insert_file` принимает следующие параметры: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md index a56a7d1df62..62df8f99e47 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md @@ -31,7 +31,6 @@ assert result.result_set[1][0] == 'first_value2' Обратите внимание, что экземпляры `QueryContext` не являются потокобезопасными, однако в многопоточной среде можно получить их копию, вызвав метод `QueryContext.updated_copy`. - ## Потоковые запросы {#streaming-queries} Клиент ClickHouse Connect предоставляет несколько методов для получения данных в виде потока (реализовано как генератор Python): @@ -76,7 +75,6 @@ with client.query_row_block_stream('SELECT pickup, dropoff, pickup_longitude, pi Вы можете использовать свойство `source` у `StreamContext` для доступа к родительскому объекту `QueryResult`, содержащему имена и типы столбцов. - ### Типы потоков {#stream-types} Метод `query_column_block_stream` возвращает блок в виде последовательности данных столбцов, сохранённых в нативных типах данных Python. Используя приведённые выше запросы к `taxi_trips`, возвращаемые данные будут списком, каждый элемент которого — это другой список (или кортеж), содержащий все данные для соответствующего столбца. Таким образом, `block[0]` будет кортежем, содержащим только строки. Форматы, ориентированные на столбцы, чаще всего используются для выполнения агрегатных операций по всем значениям в столбце, например для суммирования общей стоимости поездок. @@ -101,7 +99,6 @@ with df_stream: Наконец, метод `query_arrow_stream` возвращает результат в формате ClickHouse `ArrowStream` как объект `pyarrow.ipc.RecordBatchStreamReader`, обёрнутый в `StreamContext`. Каждая итерация потока возвращает PyArrow RecordBlock. - ### Примеры потоковой обработки {#streaming-examples} #### Потоковая передача строк {#stream-rows} @@ -122,7 +119,6 @@ with client.query_rows_stream("SELECT number, number * 2 as doubled FROM system. # .... ``` - #### Потоковая передача блоков строк {#stream-row-blocks} ```python @@ -139,7 +135,6 @@ with client.query_row_block_stream("SELECT number, number * 2 FROM system.number # Получен блок из 34591 строк ``` - #### Потоковая передача DataFrame из Pandas {#stream-pandas-dataframes} ```python @@ -166,7 +161,6 @@ with client.query_df_stream("SELECT number, toString(number) AS str FROM system. # 2 65411 65411 ``` - #### Потоковая обработка пакетов Arrow {#stream-arrow-batches} ```python @@ -184,7 +178,6 @@ with client.query_arrow_stream("SELECT * FROM large_table") as stream: # Получен пакет Arrow с 34591 строк ``` - ## Запросы с использованием NumPy, Pandas и Arrow {#numpy-pandas-and-arrow-queries} ClickHouse Connect предоставляет специализированные методы выполнения запросов для работы со структурами данных NumPy, Pandas и Arrow. Эти методы позволяют получать результаты запросов непосредственно в этих популярных форматах данных без ручного преобразования. @@ -214,7 +207,6 @@ print(np_array) # [4 8]] {#4-8} ``` - ### Запросы Pandas {#pandas-queries} Метод `query_df` возвращает результаты запроса в виде объекта DataFrame библиотеки Pandas вместо `QueryResult` из ClickHouse Connect. @@ -239,7 +231,6 @@ print(df) # 4 4 8 {#4-4-8} ``` - ### Запросы PyArrow {#pyarrow-queries} Метод `query_arrow` возвращает результаты запроса в виде таблицы PyArrow. Он напрямую использует формат ClickHouse `Arrow`, поэтому принимает только три аргумента, общих с основным методом `query`: `query`, `parameters` и `settings`. Кроме того, предусмотрен дополнительный аргумент `use_strings`, который определяет, будут ли типы ClickHouse String в таблице Arrow представляться как строки (если True) или как байты (если False). @@ -266,7 +257,6 @@ print(arrow_table) # str: [["0","1","2"]] {#str-012} ``` - ### DataFrame на базе Arrow {#arrow-backed-dataframes} ClickHouse Connect поддерживает быстрое и экономное по памяти создание DataFrame из результатов Arrow с помощью методов `query_df_arrow` и `query_df_arrow_stream`. Эти методы являются тонкими обёртками вокруг методов выполнения запросов с использованием Arrow и выполняют преобразование в DataFrame без копирования данных, где это возможно: @@ -316,7 +306,6 @@ with client.query_df_arrow_stream( # Получен пакет с 34591 строками и типами данных: [UInt64, String] ``` - #### Примечания и особенности {#notes-and-caveats} - Отображение типов Arrow: при возврате данных в формате Arrow ClickHouse сопоставляет свои типы с ближайшими поддерживаемыми типами Arrow. Некоторые типы ClickHouse не имеют нативного аналога в Arrow и возвращаются как «сырые» байты в полях Arrow (обычно `BINARY` или `FIXED_SIZE_BINARY`). @@ -366,7 +355,6 @@ print([int.from_bytes(n, byteorder="little") for n in df["int_128_col"].to_list( Основной вывод: прикладной код должен выполнять эти преобразования в зависимости от возможностей выбранной библиотеки DataFrame и приемлемых компромиссов по производительности. Когда нативные для DataFrame преобразования недоступны, вариант с использованием чистого Python по‑прежнему остается рабочим. - ## Форматы чтения {#read-formats} Форматы чтения управляют типами данных значений, возвращаемых методами клиента `query`, `query_np` и `query_df`. (`raw_query` и `query_arrow` не изменяют данные, получаемые из ClickHouse, поэтому управление форматом к ним не применяется.) Например, если формат чтения для UUID изменён с формата по умолчанию `native` на альтернативный формат `string`, запрос ClickHouse к столбцу `UUID` будет возвращать строковые значения (в стандартном формате RFC 1422 8-4-4-4-12), а не объекты Python UUID. @@ -401,7 +389,6 @@ client.query('SELECT user_id, user_uuid, device_uuid from users', query_formats= client.query('SELECT device_id, dev_address, gw_address from devices', column_formats={'dev_address':'string'}) ``` - ### Параметры формата чтения (типы Python) {#read-format-options-python-types} | Тип ClickHouse | Базовый тип Python | Форматы чтения | Комментарии | @@ -462,7 +449,6 @@ result = client.query('SELECT name, avg(rating) FROM directors INNER JOIN movies К исходному объекту `ExternalData` можно добавить дополнительные внешние файлы данных с помощью метода `add_file`, который принимает те же параметры, что и конструктор. При использовании HTTP все внешние данные передаются как часть загрузки файла в формате `multipart/form-data`. - ## Часовые пояса {#time-zones} Существует несколько механизмов применения часового пояса к значениям ClickHouse `DateTime` и `DateTime64`. Внутри сервера ClickHouse любой объект `DateTime` или `DateTime64` всегда хранится как «наивное» числовое значение (без учёта часового пояса), представляющее количество секунд, прошедших с начала эпохи — 1970-01-01 00:00:00 по времени UTC. Для значений `DateTime64` представление может быть в миллисекундах, микросекундах или наносекундах, прошедших с начала эпохи, в зависимости от точности. В результате любое применение информации о часовом поясе всегда выполняется на стороне клиента. Обратите внимание, что это требует дополнительных вычислений, поэтому в приложениях, критичных к производительности, рекомендуется обрабатывать типы DateTime как метки времени эпохи (epoch timestamps), за исключением пользовательского отображения и конвертации (например, объекты Pandas Timestamps всегда являются 64-битным целым числом, представляющим количество наносекунд с начала эпохи, для повышения производительности). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md index cf8c4f062f4..bf1b258153a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md @@ -77,7 +77,6 @@ if __name__ == '__main__': Аналогичным образом вы можете сохранять данные в формате [TabSeparated](/interfaces/formats/TabSeparated), а также в других форматах. Обзор всех доступных вариантов приведён в разделе [Форматы входных и выходных данных](/interfaces/formats). - ## Многопоточные, многопроцессные и асинхронные/с управлением через цикл событий варианты использования {#multithreaded-multiprocess-and-asyncevent-driven-use-cases} ClickHouse Connect хорошо работает в многопоточных, многопроцессных и асинхронных приложениях, управляемых циклом событий. Вся обработка запросов и вставок выполняется в одном потоке, поэтому операции в целом потокобезопасны. (Параллельная обработка некоторых операций на низком уровне рассматривается как потенциальное будущее улучшение для преодоления накладных расходов, связанных с использованием одного потока, но даже в этом случае потокобезопасность будет сохранена.) @@ -116,7 +115,6 @@ asyncio.run(main()) См. также: [пример run_async](https://github.com/ClickHouse/clickhouse-connect/blob/main/examples/run_async.py). - ## Управление идентификаторами сессий ClickHouse {#managing-clickhouse-session-ids} Каждый запрос ClickHouse выполняется в контексте «сессии» ClickHouse. В настоящее время сессии используются для двух целей: @@ -142,7 +140,6 @@ client = clickhouse_connect.get_client(host='somehost.com', user='dbuser', passw В этом случае ClickHouse Connect не отправляет `session_id`, и сервер не будет считать отдельные запросы принадлежащими одному сеансу. Временные таблицы и параметры сеанса не будут сохраняться между запросами. - ## Настройка пула HTTP‑подключений {#customizing-the-http-connection-pool} ClickHouse Connect использует пулы подключений `urllib3` для работы с базовым HTTP‑подключением к серверу. По умолчанию все экземпляры клиента используют один и тот же пул подключений, чего достаточно для большинства сценариев использования. Этот пул по умолчанию поддерживает до 8 HTTP Keep Alive‑подключений к каждому серверу ClickHouse, задействованному приложением. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md index 794b172dd58..3d6798f23b3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md @@ -118,7 +118,6 @@ print(client.database) # Результат: 'github' {#output-github} ``` - ## Жизненный цикл клиента и рекомендации по использованию {#client-lifecycle-and-best-practices} Создание клиента ClickHouse Connect — ресурсоёмкая операция, которая включает установление соединения, получение метаданных сервера и инициализацию настроек. Следуйте этим рекомендациям для обеспечения оптимальной производительности: @@ -158,7 +157,6 @@ for i in range(1000): client.close() ``` - ### Многопоточные приложения {#multi-threaded-applications} :::warning @@ -216,7 +214,6 @@ def worker(thread_id): client.close() ``` - ### Корректная очистка {#proper-cleanup} Всегда закрывайте клиентов при завершении работы. Обратите внимание, что `client.close()` освобождает ресурсы клиента и закрывает HTTP‑соединения из пула только в том случае, если клиент владеет собственным пулом (например, когда он создан с пользовательскими параметрами TLS/прокси). Для стандартного общего пула используйте `client.close_connections()` для явной очистки сокетов; в противном случае соединения автоматически освобождаются по истечении времени простоя и при завершении процесса. @@ -236,7 +233,6 @@ with clickhouse_connect.get_client(host='my-host', username='default', password= result = client.query('SELECT 1') ``` - ### Когда использовать несколько клиентов {#when-to-use-multiple-clients} Несколько клиентов уместны в следующих случаях: @@ -283,7 +279,6 @@ WHERE date >= '2022-10-01 15:20:05' Привязка на стороне сервера поддерживается (сервером ClickHouse) только для запросов `SELECT`. Она не работает для `ALTER`, `DELETE`, `INSERT` или других типов запросов. В будущем это может измениться; см. [https://github.com/ClickHouse/ClickHouse/issues/42092](https://github.com/ClickHouse/ClickHouse/issues/42092). ::: - #### Привязка на стороне клиента {#client-side-binding} ClickHouse Connect также поддерживает привязку параметров на стороне клиента, что даёт большую гибкость при генерации шаблонных SQL‑запросов. Для привязки на стороне клиента аргумент `parameters` должен быть словарём или последовательностью. Привязка на стороне клиента использует [форматирование строк в стиле «printf»](https://docs.python.org/3/library/stdtypes.html#old-string-formatting) в Python для подстановки параметров. @@ -348,7 +343,6 @@ WHERE metric >= 35200.44 ::: - ### Аргумент settings {#settings-argument-1} Все ключевые методы ClickHouse Connect Client "insert" и "select" принимают необязательный именованный аргумент `settings` для передачи [пользовательских настроек](/operations/settings/settings.md) ClickHouse для соответствующего SQL‑выражения. Аргумент `settings` должен быть словарём. Каждый элемент должен представлять собой имя настройки ClickHouse и её соответствующее значение. Учтите, что значения будут преобразованы в строки при отправке на сервер в виде параметров запроса. @@ -364,7 +358,6 @@ settings = {'merge_tree_min_rows_for_concurrent_read': 65535, client.query("SELECT event_type, sum(timeout) FROM event_errors WHERE event_time > '2022-08-01'", settings=settings) ``` - ## Метод клиента `command` {#client-command-method} Используйте метод `Client.command` для отправки SQL‑запросов на сервер ClickHouse, которые обычно не возвращают данных либо возвращают одно примитивное значение или массив значений вместо полного набора данных. Этот метод принимает следующие параметры: @@ -408,7 +401,6 @@ print(result) client.command("DROP TABLE test_command") ``` - #### Простые запросы, возвращающие одно значение {#simple-queries-returning-single-values} ```python @@ -427,7 +419,6 @@ print(version) # Вывод: "25.8.2.29" {#output-258229} ``` - #### Команды с параметрами {#commands-with-parameters} ```python @@ -449,7 +440,6 @@ result = client.command( ) ``` - #### Команды с настройками {#commands-with-settings} ```python @@ -464,7 +454,6 @@ result = client.command( ) ``` - ## Метод клиента `query` {#client-query-method} Метод `Client.query` — основной способ получить один «пакетный» набор данных с сервера ClickHouse. Он использует нативный формат ClickHouse поверх HTTP для эффективной передачи больших наборов данных (до примерно одного миллиона строк). Этот метод принимает следующие параметры: @@ -512,7 +501,6 @@ print([col_type.name for col_type in result.column_types]) # Вывод: ['String', 'String'] {#output-string-string} ``` - #### Доступ к результатам запроса {#accessing-query-results} ```python @@ -547,7 +535,6 @@ print(result.first_row) # Вывод: (0, "0") {#output-0-0} ``` - #### Запрос с клиентскими параметрами {#query-with-client-side-parameters} ```python @@ -566,7 +553,6 @@ parameters = ("system", 5) result = client.query(query, parameters=parameters) ``` - #### Запрос с серверными параметрами {#query-with-server-side-parameters} ```python @@ -581,7 +567,6 @@ parameters = {"db": "system", "tbl": "query_log"} result = client.query(query, parameters=parameters) ``` - #### Запрос с параметрами {#query-with-settings} ```python @@ -599,7 +584,6 @@ result = client.query( ) ``` - ### Объект `QueryResult` {#the-queryresult-object} Базовый метод `query` возвращает объект `QueryResult` со следующими публичными свойствами: @@ -675,7 +659,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"]) ``` - #### Вставка по столбцам {#column-oriented-insert} ```python @@ -693,7 +676,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"], column_oriented=True) ``` - #### Вставка с явным указанием типов столбцов {#insert-with-explicit-column-types} ```python @@ -716,7 +698,6 @@ client.insert( ) ``` - #### Вставка в конкретную базу данных {#insert-into-specific-database} ```python @@ -738,7 +719,6 @@ client.insert( ) ``` - ## Вставка из файлов {#file-inserts} Для вставки данных напрямую из файлов в таблицы ClickHouse см. раздел [Расширенные методы вставки (вставка из файлов)](advanced-inserting.md#file-inserts). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md index 59f19ffd644..91f064af751 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md @@ -16,7 +16,6 @@ import CodeBlock from '@theme/CodeBlock'; import ConnectionDetails from '@site/i18n/ru/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # Введение {#introduction} ClickHouse Connect — это основной драйвер базы данных, обеспечивающий взаимодействие с широким спектром приложений на Python. @@ -88,7 +87,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='localhost', username='default', password='password') ``` - #### Используйте экземпляр клиента ClickHouse Connect для подключения к сервису ClickHouse Cloud: {#use-a-clickhouse-connect-client-instance-to-connect-to-a-clickhouse-cloud-service} :::tip @@ -101,7 +99,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='HOSTNAME.clickhouse.cloud', port=8443, username='default', password='ваш_пароль') ``` - ### Взаимодействие с базой данных {#interact-with-your-database} Чтобы выполнить SQL-команду ClickHouse, используйте метод клиента `command`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md index cc0bb72b0d4..c407c7630e3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md @@ -34,7 +34,6 @@ with engine.begin() as conn: Полный список поддерживаемых параметров см. в разделе [Connection arguments and Settings](driver-api.md#connection-arguments) ниже. Их также можно передавать через DSN SQLAlchemy. - ## Основные запросы {#sqlalchemy-core-queries} Диалект поддерживает `SELECT`-запросы SQLAlchemy Core с объединениями, фильтрацией, сортировкой, ограничениями и смещениями (LIMIT/OFFSET), а также `DISTINCT`. @@ -68,7 +67,6 @@ with engine.begin() as conn: conn.execute(delete(users).where(users.c.name.like("%temp%"))) ``` - ## DDL и рефлексия {#sqlalchemy-ddl-reflection} Вы можете создавать базы данных и таблицы, используя предоставленные DDL‑помощники и конструкторы типов/движков. Поддерживается рефлексия таблиц (включая типы столбцов и движок). @@ -103,7 +101,6 @@ with engine.begin() as conn: Отражённые столбцы включают атрибуты, специфичные для диалекта, такие как `clickhousedb_default_type`, `clickhousedb_codec_expression` и `clickhousedb_ttl_expression`, если они заданы на сервере. - ## Операции вставки (Core и базовый ORM) {#sqlalchemy-inserts} Операции вставки можно выполнять через SQLAlchemy Core, а также с помощью простых ORM-моделей для удобства. @@ -132,7 +129,6 @@ with Session(engine) as session: session.commit() ``` - ## Область применения и ограничения {#scope-and-limitations} - Основное назначение: Поддержка возможностей SQLAlchemy Core, таких как `SELECT` с `JOIN` (`INNER`, `LEFT OUTER`, `FULL OUTER`, `CROSS`), `WHERE`, `ORDER BY`, `LIMIT`/`OFFSET` и `DISTINCT`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md index 26c246b6061..4eb21fdc1ae 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md @@ -37,7 +37,6 @@ clickhouse = { version = "0.12.2", features = ["test-util"] } См. также [страницу crates.io](https://crates.io/crates/clickhouse). - ## Возможности Cargo {#cargo-features} * `lz4` (включена по умолчанию) — включает варианты `Compression::Lz4` и `Compression::Lz4Hc(_)`. Если она включена, `Compression::Lz4` используется по умолчанию для всех запросов, кроме `WATCH`. @@ -90,7 +89,6 @@ let client = Client::default() .with_database("test"); ``` - ### Подключение по HTTPS или к ClickHouse Cloud {#https-or-clickhouse-cloud-connection} HTTPS работает как с функциями (features) Cargo `rustls-tls`, так и с `native-tls`. @@ -116,7 +114,6 @@ let client = Client::default() * [Пример HTTPS с ClickHouse Cloud](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/clickhouse_cloud.rs) в репозитории клиента. Его также можно использовать для HTTPS-подключений к on-premise‑инстансам. - ### Выбор строк {#selecting-rows} ```rust @@ -152,7 +149,6 @@ NB: так как весь ответ передаётся в потоке, ку Используйте `wait_end_of_query` с осторожностью при выборке строк, так как это может привести к более высокому потреблению памяти на стороне сервера и, вероятно, снизит общую производительность. ::: - ### Добавление строк {#inserting-rows} ```rust @@ -175,7 +171,6 @@ insert.end().await?; * Строки отправляются постепенно в виде потока, чтобы распределить нагрузку на сеть. * ClickHouse вставляет пакеты строк атомарно, только если все строки попадают в один и тот же раздел и их количество меньше [`max_insert_block_size`](https://clickhouse.tech/docs/operations/settings/settings/#settings-max_insert_block_size). - ### Асинхронная вставка (пакетирование на стороне сервера) {#async-insert-server-side-batching} Вы можете использовать [асинхронные вставки ClickHouse](/optimize/asynchronous-inserts), чтобы избежать пакетирования входящих данных на стороне клиента. Это можно сделать, просто указав параметр `async_insert` в методе `insert` (или даже в экземпляре `Client`, чтобы он влиял на все вызовы `insert`). @@ -191,7 +186,6 @@ let client = Client::default() * [Пример асинхронной вставки](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/async_insert.rs) в репозитории клиента. - ### Возможность inserter (клиентская пакетная запись) {#inserter-feature-client-side-batching} Требуется фича Cargo `inserter`. @@ -233,7 +227,6 @@ inserter.end().await?; ::: - ### Выполнение операторов DDL {#executing-ddls} В случае одноузлового развертывания достаточно выполнить операторы DDL следующим образом: @@ -252,7 +245,6 @@ client .await?; ``` - ### Настройки ClickHouse {#clickhouse-settings} Вы можете применять различные [настройки ClickHouse](/operations/settings/settings), используя метод `with_option`. Например: @@ -269,7 +261,6 @@ let numbers = client Помимо `query`, аналогичным образом работают методы `insert` и `inserter`; кроме того, тот же метод можно вызвать у экземпляра `Client`, чтобы задать глобальные настройки для всех запросов. - ### Идентификатор запроса {#query-id} С помощью `.with_option` вы можете задать опцию `query_id`, чтобы идентифицировать запросы в журнале запросов ClickHouse. @@ -290,7 +281,6 @@ let numbers = client См. также: [пример query_id](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/query_id.rs) в репозитории клиента. - ### Идентификатор сессии {#session-id} Аналогично `query_id`, вы можете задать `session_id`, чтобы выполнять запросы в одной и той же сессии. `session_id` можно задать либо глобально на уровне клиента, либо для каждого отдельного вызова `query`, `insert` или `inserter`. @@ -307,7 +297,6 @@ let client = Client::default() См. также: пример [session_id](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/session_id.rs) в репозитории клиента. - ### Пользовательские HTTP‑заголовки {#custom-http-headers} Если вы используете аутентификацию через прокси или вам нужно передавать пользовательские заголовки, вы можете сделать это следующим образом: @@ -320,7 +309,6 @@ let client = Client::default() См. также: [пример использования пользовательских HTTP-заголовков](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_headers.rs) в репозитории клиента. - ### Пользовательский HTTP‑клиент {#custom-http-client} Это может быть полезно для тонкой настройки параметров лежащего в основе пула HTTP‑соединений. @@ -349,7 +337,6 @@ let client = Client::with_http_client(hyper_client).with_url("http://localhost:8 См. также: [пример с пользовательским HTTP‑клиентом](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_client.rs) в репозитории клиента. - ## Типы данных {#data-types} :::info @@ -456,7 +443,6 @@ struct MyRow { } ``` - * `DateTime` сопоставляется с `u32` или newtype-обёрткой вокруг него и представляет количество секунд, прошедших с эпохи UNIX. Также поддерживается [`time::OffsetDateTime`](https://docs.rs/time/latest/time/struct.OffsetDateTime.html) при использовании `serde::time::datetime`, для чего требуется фича `time`. ```rust @@ -535,7 +521,6 @@ struct MyRow { * Типы данных `Variant`, `Dynamic` и новый тип данных `JSON` пока не поддерживаются. - ## Мокирование {#mocking} Крейт предоставляет утилиты для мокирования сервера CH и тестирования DDL, а также запросов `SELECT`, `INSERT` и `WATCH`. Функциональность может быть включена с помощью feature `test-util`. Используйте её **только** как dev-зависимость. @@ -580,7 +565,6 @@ struct EventLog { } ``` - ## Известные ограничения {#known-limitations} * Типы данных `Variant`, `Dynamic` и (новый) `JSON` пока не поддерживаются. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md index 6da40e32b66..8a0ab1c73f9 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md @@ -19,24 +19,17 @@ import datagrip_6 from '@site/static/images/integrations/sql-clients/datagrip-6. import datagrip_7 from '@site/static/images/integrations/sql-clients/datagrip-7.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Подключение DataGrip к ClickHouse {#connecting-datagrip-to-clickhouse} - - ## Запустите или загрузите DataGrip {#start-or-download-datagrip} DataGrip доступен на сайте https://www.jetbrains.com/datagrip/ - - ## 1. Соберите сведения о подключении {#1-gather-your-connection-details} - - ## 2. Загрузите драйвер ClickHouse {#2-load-the-clickhouse-driver} 1. Запустите DataGrip и на вкладке **Data Sources** в диалоговом окне **Data Sources and Drivers** нажмите значок **+** @@ -58,8 +51,6 @@ DataGrip доступен на сайте https://www.jetbrains.com/datagrip/ - - ## 3. Подключение к ClickHouse {#3-connect-to-clickhouse} - Укажите параметры подключения к базе данных и нажмите **Test Connection**. @@ -79,8 +70,6 @@ ClickHouse Cloud требует шифрования SSL для всех под - - ## Подробнее {#learn-more} Дополнительную информацию о DataGrip см. в его документации. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md index c77fc62d19d..62a00f0bc7a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md @@ -21,7 +21,6 @@ import dbeaver_sql_editor from '@site/static/images/integrations/sql-clients/dbe import dbeaver_query_log_select from '@site/static/images/integrations/sql-clients/dbeaver-query-log-select.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Подключение DBeaver к ClickHouse {#connect-dbeaver-to-clickhouse} @@ -32,8 +31,6 @@ DBeaver доступен в нескольких редакциях. В этом Пожалуйста, используйте DBeaver версии 23.1.0 или новее для улучшенной поддержки столбцов `Nullable` в ClickHouse. ::: - - ## 1. Соберите информацию о вашем ClickHouse {#1-gather-your-clickhouse-details} DBeaver использует JDBC поверх HTTP(S) для подключения к ClickHouse; для этого вам потребуются: @@ -43,14 +40,10 @@ DBeaver использует JDBC поверх HTTP(S) для подключен - имя пользователя - пароль - - ## 2. Скачайте DBeaver {#2-download-dbeaver} DBeaver можно скачать по адресу https://dbeaver.io/download/ - - ## 3. Добавление базы данных {#3-add-a-database} - Используйте меню **Database > New Database Connection** или значок **New Database Connection** в **Database Navigator**, чтобы открыть диалоговое окно **Connect to a database**: @@ -79,8 +72,6 @@ DBeaver можно скачать по адресу https://dbeaver.io/download/ - - ## 4. Запрос к ClickHouse {#4-query-clickhouse} Откройте редактор SQL-запросов и выполните запрос. @@ -93,8 +84,6 @@ DBeaver можно скачать по адресу https://dbeaver.io/download/ - - ## Дальнейшие шаги {#next-steps} Подробную информацию о возможностях DBeaver см. в его [wiki](https://github.com/dbeaver/dbeaver/wiki), а о возможностях ClickHouse — в [документации ClickHouse](https://clickhouse.com/docs). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md index d9c36449ad2..57853f09968 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md @@ -18,7 +18,6 @@ import dropdown_cell_chart from '@site/static/images/integrations/sql-clients/ma import run_app_view from '@site/static/images/integrations/sql-clients/marimo/run-app-view.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Использование marimo с ClickHouse {#using-marimo-with-clickhouse} @@ -27,8 +26,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 1. Установите Marimo с поддержкой SQL {#install-marimo-sql} ```shell @@ -38,7 +35,6 @@ marimo edit clickhouse_demo.py Должен открыться веб‑браузер с адресом localhost. - ## 2. Подключение к ClickHouse. {#connect-to-clickhouse} Перейдите на панель источников данных слева в редакторе marimo и нажмите «Add database». @@ -53,8 +49,6 @@ marimo edit clickhouse_demo.py - - ## 3. Выполнение SQL-запросов {#run-sql} После настройки подключения вы можете создать новую SQL-ячейку и выбрать движок ClickHouse. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md index 0bd8241c4c6..2cd5cff90ca 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md @@ -47,7 +47,6 @@ import adjust_axis_scale from '@site/static/images/cloud/sqlconsole/adjust-axis- import give_a_query_a_name from '@site/static/images/cloud/sqlconsole/give-a-query-a-name.png' import save_the_query from '@site/static/images/cloud/sqlconsole/save-the-query.png' - # SQL-консоль {#sql-console} SQL-консоль — самый быстрый и простой способ изучать ваши базы данных и выполнять по ним запросы в ClickHouse Cloud. Вы можете использовать SQL-консоль, чтобы: @@ -57,8 +56,6 @@ SQL-консоль — самый быстрый и простой способ - Выполнять запросы и визуализировать результаты всего за несколько кликов - Делиться запросами с коллегами по команде и более эффективно сотрудничать. - - ## Изучение таблиц {#exploring-tables} ### Просмотр списка таблиц и сведений о схеме {#viewing-table-list-and-schema-info} @@ -83,8 +80,6 @@ SQL-консоль — самый быстрый и простой способ - - ## Фильтрация и сортировка таблиц {#filtering-and-sorting-tables} ### Сортировка таблицы {#sorting-a-table} @@ -125,8 +120,6 @@ SQL-консоль может преобразовать ваши сортиро Вы можете узнать больше о выполнении запросов в SQL-консоли, прочитав (link) документацию по запросам. - - ## Создание и выполнение запроса {#creating-and-running-a-query} ### Создание запроса {#creating-a-query} @@ -182,8 +175,6 @@ SQL-консоль может преобразовать ваши сортиро - - ## Использование GenAI для управления запросами {#using-genai-to-manage-queries} Эта функция позволяет пользователям писать запросы в виде вопросов на естественном языке, а консоль запросов будет создавать SQL‑запросы на основе контекста доступных таблиц. GenAI также может помогать пользователям отлаживать их запросы. @@ -294,8 +285,6 @@ SQL-консоль может преобразовать ваши сортиро 1. Создайте новый запрос, нажав на значок _+_, и вставьте следующий код: - - ```sql -- Показать общую стоимость и общее количество всех транзакций uk_price_paid по годам. SELECT year(date), sum(pricee) as total_price, Count(*) as total_transactions @@ -310,7 +299,6 @@ SQL-консоль может преобразовать ваши сортиро Помните, что GenAI — экспериментальная функция. Будьте осторожны при выполнении запросов, сгенерированных GenAI, для любых наборов данных. - ## Расширенные возможности выполнения запросов {#advanced-querying-features} ### Поиск по результатам запроса {#searching-query-results} @@ -339,8 +327,6 @@ SQL-консоль может преобразовать ваши сортиро - - ## Визуализация данных запроса {#visualizing-query-data} Некоторые данные проще интерпретировать в виде диаграмм. Вы можете быстро создавать визуализации на основе результатов запросов прямо из SQL-консоли всего за несколько кликов. В качестве примера мы используем запрос, который вычисляет еженедельную статистику поездок на такси в Нью-Йорке: @@ -401,7 +387,6 @@ SQL-консоль поддерживает десять типов график - ## Совместное использование запросов {#sharing-queries} Консоль SQL позволяет делиться запросами с вашей командой. Когда запрос становится общим, все участники команды могут просматривать и изменять его. Общие запросы — отличный способ совместной работы с вашей командой. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md index 2f7aad14477..ec4904387e7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md @@ -17,29 +17,22 @@ import tablum_ch_2 from '@site/static/images/integrations/sql-clients/tablum-ch- import tablum_ch_3 from '@site/static/images/integrations/sql-clients/tablum-ch-3.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Интеграция TABLUM.IO с ClickHouse {#connecting-tablumio-to-clickhouse} - - ## Откройте стартовую страницу TABLUM.IO {#open-the-tablumio-startup-page} :::note Вы можете установить самостоятельно размещаемую (self-hosted) версию TABLUM.IO на свой Linux-сервер с помощью Docker. ::: - - ## 1. Зарегистрируйтесь или войдите в сервис {#1-sign-up-or-sign-in-to-the-service} Сначала зарегистрируйтесь в TABLUM.IO, используя свою электронную почту, или выполните быстрый вход через учетную запись Google или Facebook. - - ## 2. Добавление коннектора ClickHouse {#2-add-a-clickhouse-connector} Подготовьте параметры подключения к ClickHouse, перейдите на вкладку **Connector** и заполните поля: URL хоста, порт, имя пользователя, пароль, имя базы данных и имя коннектора. После этого нажмите кнопку **Test connection**, чтобы проверить параметры подключения, а затем нажмите **Save connector for me**, чтобы сохранить коннектор. @@ -54,16 +47,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 3. Выберите коннектор {#3-select-the-connector} Перейдите на вкладку **Dataset**. Выберите недавно созданный коннектор ClickHouse в выпадающем списке. В правой панели вы увидите список доступных таблиц и схем. - - ## 4. Введите SQL‑запрос и выполните его {#4-input-a-sql-query-and-run-it} Введите запрос в SQL Console и нажмите **Run Query**. Результаты отобразятся в виде таблицы. @@ -81,8 +70,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; * делиться результатами в виде новой базы данных ClickHouse. ::: - - ## Узнать больше {#learn-more} Дополнительную информацию о TABLUM.IO можно найти на сайте https://tablum.io. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md index 3cc250a154e..245619bcbb7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md @@ -9,7 +9,6 @@ doc_type: 'guide' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Развертывание ClickHouse на Easypanel {#deploying-clickhouse-on-easypanel} @@ -18,8 +17,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [![Deploy to Easypanel](https://easypanel.io/img/deploy-on-easypanel-40.svg)](https://easypanel.io/docs/templates/clickhouse) - - ## Инструкции {#instructions} 1. Создайте виртуальную машину (VM) с Ubuntu у вашего облачного провайдера. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md index 1ddbdd997aa..41ffb0e8d85 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md @@ -19,18 +19,13 @@ import retool_04 from '@site/static/images/integrations/tools/data-integration/r import retool_05 from '@site/static/images/integrations/tools/data-integration/retool/retool_05.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Подключение Retool к ClickHouse {#connecting-retool-to-clickhouse} - - ## 1. Соберите сведения о подключении {#1-gather-your-connection-details} - - ## 2. Создайте ресурс ClickHouse {#2-create-a-clickhouse-resource} Войдите в свой аккаунт Retool и перейдите на вкладку _Resources_. Выберите «Create New» → «Resource»: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md index 3baaf4866e6..d7721ed0e6a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md @@ -22,7 +22,6 @@ import splunk_011 from '@site/static/images/integrations/tools/data-integration/ import splunk_012 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_012.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Сохранение журналов аудита ClickHouse Cloud в Splunk {#storing-clickhouse-cloud-audit-logs-into-splunk} @@ -33,12 +32,8 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; Это дополнение содержит только модульный ввод (modular input); никаких дополнительных пользовательских интерфейсов оно не предоставляет. - - # Установка {#installation} - - ## Для Splunk Enterprise {#for-splunk-enterprise} Загрузите ClickHouse Cloud Audit Add-on for Splunk с [Splunkbase](https://splunkbase.splunk.com/app/7709). @@ -55,8 +50,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; Если всё прошло успешно, вы должны увидеть установленное приложение ClickHouse Audit logs. В противном случае проверьте логи Splunkd на наличие ошибок. - - # Модульная конфигурация входных данных {#modular-input-configuration} Чтобы настроить модульный ввод, вам сначала понадобится информация из вашего развертывания ClickHouse Cloud: @@ -64,8 +57,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; - Идентификатор организации - Административный [API Key](/cloud/manage/openapi) - - ## Получение информации из ClickHouse Cloud {#getting-information-from-clickhouse-cloud} Войдите в [консоль ClickHouse Cloud](https://console.clickhouse.cloud/). @@ -86,8 +77,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; - - ## Настройка источника данных в Splunk {#configure-data-input-in-splunk} Вернувшись в Splunk, перейдите в Settings -> Data inputs. @@ -108,8 +97,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; Источник данных настроен, можно приступать к просмотру журналов аудита. - - # Использование {#usage} Модульный ввод данных сохраняет данные в Splunk. Для их просмотра используйте стандартный интерфейс поиска Splunk. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md index c70df9afa37..1207528493b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md @@ -7,16 +7,12 @@ title: 'Интерфейс Arrow Flight' doc_type: 'reference' --- - - # Интерфейс Apache Arrow Flight {#apache-arrow-flight-interface} ClickHouse поддерживает интеграцию с протоколом [Apache Arrow Flight](https://arrow.apache.org/docs/format/Flight.html) — высокопроизводительным RPC‑фреймворком, предназначенным для эффективной передачи колоночных данных с использованием формата Arrow IPC поверх gRPC. Этот интерфейс позволяет клиентам Flight SQL выполнять запросы к ClickHouse и получать результаты в формате Arrow, обеспечивая высокую пропускную способность и низкую задержку для аналитических нагрузок. - - ## Возможности {#features} * Выполнять SQL‑запросы по протоколу Arrow Flight SQL @@ -24,8 +20,6 @@ ClickHouse поддерживает интеграцию с протоколом * Интегрироваться с BI‑инструментами и прикладными решениями для работы с данными, поддерживающими Arrow Flight * Обеспечивать легковесный и высокопроизводительный обмен данными по gRPC - - ## Ограничения {#limitations} Интерфейс Arrow Flight в данный момент является экспериментальным и активно дорабатывается. Известные ограничения: @@ -36,8 +30,6 @@ ClickHouse поддерживает интеграцию с протоколом Если вы столкнетесь с проблемами совместимости или хотите внести вклад, пожалуйста, [создайте issue](https://github.com/ClickHouse/ClickHouse/issues) в репозитории ClickHouse. - - ## Запуск сервера Arrow Flight {#running-server} Чтобы включить сервер Arrow Flight в самоуправляемом экземпляре ClickHouse, добавьте следующую конфигурацию в конфигурационный файл сервера: @@ -54,7 +46,6 @@ ClickHouse поддерживает интеграцию с протоколом {} Application: Протокол совместимости Arrow Flight: 0.0.0.0:9005 ``` - ## Подключение к ClickHouse через Arrow Flight SQL {#connecting-to-clickhouse} Вы можете использовать любой клиент, который поддерживает Arrow Flight SQL. Например, с помощью `pyarrow`: @@ -70,7 +61,6 @@ for batch in reader: print(batch.to_pandas()) ``` - ## Совместимость {#compatibility} Интерфейс Arrow Flight совместим с инструментами, которые поддерживают Arrow Flight SQL, включая собственные приложения, реализованные на: @@ -81,8 +71,6 @@ for batch in reader: Если для вашего инструмента доступен нативный коннектор ClickHouse (например, JDBC, ODBC), предпочтительнее использовать именно его, если только Arrow Flight не требуется специально для достижения нужной производительности или совместимости форматов. - - ## Отмена запросов {#query-cancellation} Долго выполняющиеся запросы можно отменить, закрыв gRPC-соединение на стороне клиента. Поддержка более продвинутых возможностей отмены запланирована. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/cli.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/cli.md index 9c5db6bfe33..b854ba9b564 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/cli.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/cli.md @@ -20,7 +20,6 @@ ClickHouse предоставляет штатный клиент командн Клиент предоставляет информацию о выполнении запросов в реальном времени с индикатором прогресса, количеством прочитанных строк, объёмом обработанных данных (в байтах) и временем выполнения запроса. Он поддерживает как [параметры командной строки](#command-line-options), так и [файлы конфигурации](#configuration_files). - ## Установка {#install} Чтобы загрузить ClickHouse, выполните команду: @@ -39,7 +38,6 @@ sudo ./clickhouse install Различные версии клиента и сервера совместимы между собой, но некоторые функции могут быть недоступны в более старых клиентах. Рекомендуется использовать одну и ту же версию для клиента и сервера. - ## Запуск {#run} :::note @@ -71,7 +69,6 @@ ClickHouse client version 24.12.2.29 (official build). Полный список параметров командной строки см. в разделе [Command Line Options](#command-line-options). - ### Подключение к ClickHouse Cloud {#connecting-cloud} Информация о вашем сервисе ClickHouse Cloud доступна в консоли ClickHouse Cloud. Выберите сервис, к которому нужно подключиться, и нажмите **Connect**: @@ -123,7 +120,6 @@ ClickHouse client version 24.12.2.29 (official build). Чтобы сосредоточиться на синтаксисе запросов, в последующих примерах опущены параметры подключения (`--host`, `--port` и т. д.). Не забудьте добавить их, когда будете использовать команды. ::: - ## Интерактивный режим {#interactive-mode} ### Использование интерактивного режима {#using-interactive-mode} @@ -168,7 +164,6 @@ ClickHouse Client основан на `replxx` (аналог `readline`), поэ * `q`, `Q` или `:q` * `logout` или `logout;` - ### Информация об обработке запроса {#processing-info} Во время обработки запроса клиент показывает: @@ -247,7 +242,6 @@ $ echo "Hello\nGoodbye" | clickhouse-client --query "INSERT INTO messages FORMAT Когда указана опция `--query`, любой ввод данных добавляется к запросу после символа перевода строки. - ### Загрузка CSV-файла в удалённый сервис ClickHouse {#cloud-example} В этом примере демонстрируется загрузка примерного набора данных из CSV-файла `cell_towers.csv` в существующую таблицу `cell_towers` в базе данных `default`: @@ -261,7 +255,6 @@ clickhouse-client --host HOSTNAME.clickhouse.cloud \ < cell_towers.csv ``` - ### Примеры вставки данных из командной строки {#more-examples} Существует несколько способов вставить данные из командной строки. @@ -290,7 +283,6 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA В пакетном режиме формат данных по умолчанию — `TabSeparated` (см. [форматы](formats.md)). Вы можете указать формат в предложении `FORMAT` запроса, как показано в примере выше. - ## Запросы с параметрами {#cli-queries-with-parameters} Вы можете указать параметры в запросе и передать им значения с помощью опций командной строки. @@ -333,7 +325,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 1 строка в наборе. Прошло: 0.006 сек. ``` - ### Синтаксис запроса {#cli-queries-with-parameters-syntax} В запросе указывайте значения, которые хотите подставлять с помощью параметров командной строки, заключая их в фигурные скобки в следующем формате: @@ -347,7 +338,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 | `name` | Идентификатор подстановочного параметра. Соответствующая опция командной строки — `--param_ = value`. | | `data type` | [Тип данных](../sql-reference/data-types/index.md) параметра.

Например, структура данных вида `(integer, ('string', integer))` может иметь тип данных `Tuple(UInt8, Tuple(String, UInt8))` (можно также использовать другие [целочисленные](../sql-reference/data-types/int-uint.md) типы).

Также можно передавать в качестве параметров имя таблицы, имя базы данных и имена столбцов; в этом случае следует использовать тип данных `Identifier`. | - ### Примеры {#cli-queries-with-parameters-examples} ```bash @@ -358,7 +348,6 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe --query "SELECT {col:Identifier} as {alias:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` - ## Генерация SQL с помощью ИИ {#ai-sql-generation} ClickHouse Client включает встроенную поддержку ИИ для генерации SQL-запросов по описаниям на естественном языке. Эта функция помогает пользователям составлять сложные запросы без глубоких знаний SQL. @@ -379,7 +368,6 @@ ClickHouse Client включает встроенную поддержку ИИ 2. Генерировать соответствующий SQL‑запрос на основе обнаруженных таблиц и столбцов 3. Сразу выполнять сгенерированный запрос - ### Пример {#ai-sql-generation-example} ```bash @@ -413,7 +401,6 @@ GROUP BY c.name ORDER BY order_count DESC ``` - ### Конфигурация {#ai-sql-generation-configuration} Для генерации SQL-запросов с помощью ИИ необходимо настроить поставщика ИИ в конфигурационном файле клиента ClickHouse. Вы можете использовать OpenAI, Anthropic или любой совместимый с OpenAI API-сервис. @@ -438,7 +425,6 @@ export ANTHROPIC_API_KEY=your-anthropic-key clickhouse-client ``` - #### Файл конфигурации {#ai-sql-generation-configuration-file} Для более тонкого управления настройками ИИ задайте их в файле конфигурации ClickHouse Client, который находится по одному из следующих путей: @@ -543,7 +529,6 @@ ai: model: gpt-3.5-turbo ``` - ### Параметры {#ai-sql-generation-parameters}
@@ -650,7 +635,6 @@ clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] | `database` | Имя базы данных. | `default` | | `query_parameters` | Список пар «ключ–значение» `param1=value1[,¶m2=value2], ...`. Для некоторых параметров значение не требуется. Имена параметров и значений чувствительны к регистру. | - | - ### Примечания {#connection-string-notes} Если имя пользователя, пароль или база данных указаны в строке подключения, их нельзя указывать с помощью `--user`, `--password` или `--database` (и наоборот). @@ -685,7 +669,6 @@ ClickHouse Client будет пытаться подключиться к эти * `database` * `query parameters` - ### Примеры {#connection_string_examples} Подключитесь к `localhost` через порт 9000 и выполните запрос `SELECT 1`. @@ -766,7 +749,6 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 clickhouse-client clickhouse://192.168.1.15,192.168.1.25 ``` - ## Формат ID запроса {#query-id-format} В интерактивном режиме ClickHouse Client показывает ID для каждого запроса. По умолчанию ID имеет следующий формат: @@ -794,7 +776,6 @@ ID запроса: 927f137d-00f1-4175-8914-0dd066365e96 speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d ``` - ## Файлы конфигурации {#configuration_files} ClickHouse Client использует первый найденный файл из следующего списка: @@ -895,7 +876,6 @@ $ clickhouse-client --max_threads 1 Список настроек см. в разделе [Settings](../operations/settings/settings.md). - ### Параметры форматирования {#command-line-options-formatting} | Параметр | Описание | По умолчанию | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md index 5336729e38a..03e1aafd2b2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md @@ -72,7 +72,6 @@ doc_type: 'reference' $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" ``` - ### Выбор данных {#selecting-data} Вы можете выбрать данные из таблицы ClickHouse и сохранить их в файл формата Arrow с помощью следующей команды: @@ -81,7 +80,6 @@ $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow} ``` - ## Настройки формата {#format-settings} | Параметр | Описание | Значение по умолчанию | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md index 41b3eb8273e..abdae4274ae 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md @@ -15,7 +15,6 @@ import DataTypeMapping from './_snippets/data-types-matching.md' | ---- | ----- | --------- | | ✔ | ✔ | | - ## Описание {#description} [Apache Avro](https://avro.apache.org/) — это строчно-ориентированный формат сериализации данных, который использует двоичное кодирование для эффективной обработки. Формат `Avro` поддерживает чтение и запись [файлов данных Avro](https://avro.apache.org/docs/++version++/specification/#object-container-files). Этот формат рассчитан на самоописательные сообщения со встроенной схемой. Если вы используете Avro с реестром схем, обратитесь к формату [`AvroConfluent`](./AvroConfluent.md). @@ -54,7 +53,6 @@ $ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avr При импорте данных, если поле не найдено в схеме и включена настройка [`input_format_avro_allow_missing_fields`](/operations/settings/settings-formats.md/#input_format_avro_allow_missing_fields), вместо генерации ошибки будет использовано значение по умолчанию. - ### Запись данных в формате Avro {#writing-avro-data} Чтобы записать данные из таблицы ClickHouse в файл формата Avro: @@ -70,7 +68,6 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro Сжатие выходных данных и интервал синхронизации для файлов Avro можно настроить с помощью параметров [`output_format_avro_codec`](/operations/settings/settings-formats.md/#output_format_avro_codec) и [`output_format_avro_sync_interval`](/operations/settings/settings-formats.md/#output_format_avro_sync_interval) соответственно. - ### Определение схемы Avro {#inferring-the-avro-schema} С помощью функции ClickHouse [`DESCRIBE`](/sql-reference/statements/describe-table) вы можете быстро просмотреть выводимую (выведенную) схему файла Avro, как показано в следующем примере. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md index 206f5dd5ba7..a5854792e1e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md @@ -15,7 +15,6 @@ import DataTypesMatching from './_snippets/data-types-matching.md' | ---- | ----- | --------- | | ✔ | ✗ | | - ## Описание {#description} [Apache Avro](https://avro.apache.org/) — это строчно-ориентированный формат сериализации, который использует двоичное кодирование для эффективной обработки данных. Формат `AvroConfluent` поддерживает декодирование отдельных объектов — сообщений Kafka, закодированных в Avro и сериализованных с использованием [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) (или API-совместимых сервисов). @@ -61,7 +60,6 @@ format_avro_schema_registry_url = 'http://schema-registry-url'; SELECT * FROM topic1_stream; ``` - #### Использование базовой аутентификации {#using-basic-authentication} Если для вашего реестра схем требуется базовая аутентификация (например, при использовании Confluent Cloud), вы можете указать URL-кодированные учетные данные в настройке `format_avro_schema_registry_url`. @@ -81,7 +79,6 @@ kafka_format = 'AvroConfluent', format_avro_schema_registry_url = 'https://:@schema-registry-url'; ``` - ## Диагностика неполадок {#troubleshooting} Чтобы отслеживать ход ингестии и отлаживать ошибки потребителя Kafka, вы можете выполнить запрос к [системной таблице `system.kafka_consumers`](../../../operations/system-tables/kafka_consumers.md). Если в вашем развертывании несколько реплик (например, ClickHouse Cloud), необходимо использовать табличную функцию [`clusterAllReplicas`](../../../sql-reference/table-functions/cluster.md). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md index 812f0e1b7a9..98f95003d08 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md @@ -114,7 +114,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.bson' FORMAT BSONEachRow; ``` - ### Чтение данных {#reading-data} Считывайте данные в формате `BSONEachRow`: @@ -129,7 +128,6 @@ FORMAT BSONEachRow BSON — это двоичный формат, который не отображается в человекочитаемом виде в терминале. Используйте `INTO OUTFILE` для вывода файлов BSON. ::: - ## Настройки формата {#format-settings} | Параметр | Описание | Значение по умолчанию | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md index 897bc2efb2b..52609504edf 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md @@ -46,7 +46,6 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR Если это не удаётся и входное значение является числом, выполняется попытка сопоставить это число с идентификатором ENUM. Если входные данные содержат только идентификаторы ENUM, рекомендуется включить настройку [input_format_csv_enum_as_number](/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) для оптимизации разбора `ENUM`. - ## Пример использования {#example-usage} ## Настройки формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md index 62b81aca022..a49487c6ed3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md @@ -70,7 +70,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football.csv' FORMAT CSVWithNames; ``` - ### Чтение данных {#reading-data} Прочитайте данные в формате `CSVWithNames`: @@ -104,7 +103,6 @@ FORMAT CSVWithNames "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md index c4d757c869e..92b6729dd8f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md @@ -71,7 +71,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football_types.csv' FORMAT CSVWithNamesAndTypes; ``` - ### Чтение данных {#reading-data} Прочитайте данные в формате `CSVWithNamesAndTypes`: @@ -106,7 +105,6 @@ FORMAT CSVWithNamesAndTypes "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md index 8ca0f16af83..116e81d59e2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md @@ -17,7 +17,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | ---- | ----- | --------- | | ✔ | ✔ | | - ## Описание {#description} Формат `CapnProto` — это двоичный формат сообщений, похожий на формат [`Protocol Buffers`](https://developers.google.com/protocol-buffers/) и [Thrift](https://en.wikipedia.org/wiki/Apache_Thrift), но, в отличие от [JSON](./JSON/JSON.md) или [MessagePack](https://msgpack.org/), не имеет с ними общего. @@ -81,7 +80,6 @@ struct Message { $ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'" ``` - ### Использование автоматически сгенерированной схемы {#using-autogenerated-capn-proto-schema} Если у вас нет внешней схемы `CapnProto` для ваших данных, вы все равно можете считывать и выводить данные в формате `CapnProto`, используя автоматически сгенерированную схему. @@ -102,7 +100,6 @@ SETTINGS format_capn_proto_use_autogenerated_schema=1 $ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_capn_proto_use_autogenerated_schema=1 FORMAT CapnProto" ``` - ## Настройки формата {#format-settings} Настройка [`format_capn_proto_use_autogenerated_schema`](../../operations/settings/settings-formats.md/#format_capn_proto_use_autogenerated_schema) включена по умолчанию и применяется, если параметр [`format_schema`](/interfaces/formats#formatschema) не задан. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md index b430b1db54e..c85a9e0e593 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md @@ -56,7 +56,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparated; ``` - ### Чтение данных {#reading-data} Настройте параметры пользовательского разделителя: @@ -83,7 +82,6 @@ FORMAT CustomSeparated row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## Настройки формата {#format-settings} Дополнительные настройки: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md index daa10513193..0f3e74262d6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpaces; ``` - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md index f45cc616c3f..b3a501ac231 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNames; ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md index 61a12524168..20bb6300b03 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNamesAndTypes; ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md index bf7f6be7097..b7cc522072b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNames; ``` - ### Чтение данных {#reading-data} Настройте параметры пользовательского разделителя: @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNames row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md index cd1215b63fe..f06aaf476c0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNamesAndTypes; ``` - ### Чтение данных {#reading-data} Настройте параметры пользовательского разделителя: @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNamesAndTypes row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('Date';'Int16';'LowCardinality(String)';'LowCardinality(String)';'Int8';'Int8'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md index 9869037fcbb..a1c80f2e6a8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md @@ -83,5 +83,4 @@ LIMIT 3 Пиковое использование памяти: 271.92 МиБ. ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md index 9e6cd1a2c89..ccb5f4b6969 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md @@ -40,5 +40,4 @@ rt.start: navigation rt.bmr: 390,11,10 ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md index 1faf9e37364..453de3eb25c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md @@ -62,5 +62,4 @@ df2ec2f0669b000edff6adee264e7d68 Получена 1 строка. Время выполнения: 0,154 сек. ``` - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md index 8cba95a1c4f..3085c4ee4b0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md @@ -99,7 +99,6 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA } ``` - ## Настройки формата {#format-settings} Для формата ввода JSON, если настройка [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) установлена в значение `1`, diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md index 7ccbca236ea..8b62bb5996a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md @@ -31,7 +31,6 @@ SELECT * FROM json_as_object FORMAT JSONEachRow; {"json":{"any json stucture":"1"}} ``` - ### Массив объектов JSON {#an-array-of-json-objects} ```sql title="Query" @@ -45,7 +44,6 @@ SELECT * FROM json_square_brackets FORMAT JSONEachRow; {"field":{"id":"2","name":"name2"}} ``` - ### Столбцы со значениями по умолчанию {#columns-with-default-values} ```sql title="Query" @@ -62,5 +60,4 @@ SELECT time, json FROM json_as_object FORMAT JSONEachRow {"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}} ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md index 8f39e7095d4..1f4b53e24a5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md @@ -46,7 +46,6 @@ SELECT * FROM json_as_string; └───────────────────────────────────┘ ``` - ### Массив объектов JSON {#an-array-of-json-objects} ```sql title="Query" @@ -63,5 +62,4 @@ SELECT * FROM json_square_brackets; └────────────────────────────┘ ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md index d6bde8bef51..e3b5f1a97b4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONColumns; ``` - ### Чтение данных {#reading-data} Считывайте данные в формате `JSONColumns`: @@ -73,7 +72,6 @@ FORMAT JSONColumns } ``` - ## Настройки формата {#format-settings} При импорте столбцы с неизвестными именами будут пропущены, если настройка [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) установлена в значение `1`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md index 09c5411938e..68769c20556 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md @@ -67,5 +67,4 @@ doc_type: 'reference' Для формата ввода `JSONColumnsWithMetadata`, если параметр [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) имеет значение `1`, типы, указанные в метаданных во входных данных, будут сравниваться с типами соответствующих столбцов таблицы. - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md index 90f482f4327..df235bc9668 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md @@ -81,7 +81,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompact; ``` - ### Чтение данных {#reading-data} Считайте данные в формате `JSONCompact`: @@ -156,5 +155,4 @@ FORMAT JSONCompact } ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md index 0b3107181e7..1d16080576c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md @@ -44,7 +44,6 @@ JSON-файл со следующими данными, сохранённый INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactColumns; ``` - ### Чтение данных {#reading-data} Прочитайте данные, используя формат `JSONCompactColumns`: @@ -70,5 +69,4 @@ FORMAT JSONCompactColumns Столбцы, которые отсутствуют в блоке, будут заполнены значениями по умолчанию (здесь можно использовать настройку [`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields)) - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md index cd011244a94..b15b565df4e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRow; ``` - ### Чтение данных {#reading-data} Считайте данные в формате `JSONCompactEachRow`: @@ -82,5 +81,4 @@ FORMAT JSONCompactEachRow ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md index e21d272af56..25b0445e4c6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNames; ``` - ### Чтение данных {#reading-data} Считывайте данные в формате `JSONCompactEachRowWithNames`: @@ -84,7 +83,6 @@ FORMAT JSONCompactEachRowWithNames ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md index acc1add1b2b..75cc861593c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md @@ -51,7 +51,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNamesAndTypes; ``` - ### Чтение данных {#reading-data} Считайте данные в формате `JSONCompactEachRowWithNamesAndTypes`: @@ -86,7 +85,6 @@ FORMAT JSONCompactEachRowWithNamesAndTypes ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md index 4a815e7c3b9..d67f5c7f1f6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md @@ -48,5 +48,4 @@ FORMAT JSONCompactEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md index 1ebe4a2a3d2..77738584113 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md @@ -93,5 +93,4 @@ FORMAT JSONCompactStrings } ``` - ## Настройки форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md index 2cbd83b83b4..0e3f17e1956 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRow; ``` - ### Чтение данных {#reading-data} Считывайте данные в формате `JSONCompactStringsEachRow`: @@ -82,5 +81,4 @@ FORMAT JSONCompactStringsEachRow ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md index f9b1b3bc632..a48ea2e9ec1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNames; ``` - ### Чтение данных {#reading-data} Считайте данные в формате `JSONCompactStringsEachRowWithNames`: @@ -84,7 +83,6 @@ FORMAT JSONCompactStringsEachRowWithNames ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md index ff5b2e532b0..7977eed7cab 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md @@ -48,7 +48,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNamesAndTypes; ``` - ### Чтение данных {#reading-data} Считывайте данные в формате `JSONCompactStringsEachRowWithNamesAndTypes`: @@ -83,7 +82,6 @@ FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## Настройки формата {#format-settings} :::note diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md index 5c86576ae9f..e0b6f2d6e73 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md @@ -47,5 +47,4 @@ FORMAT JSONCompactStringsEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md index 0c5b815fb9f..87ebd4d26a0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md @@ -47,7 +47,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONEachRow; ``` - ### Чтение данных {#reading-data} Прочитайте данные в формате `JSONEachRow`: @@ -82,5 +81,4 @@ FORMAT JSONEachRow Импорт столбцов с неизвестными именами будет пропускаться, если параметр [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) установлен в 1. - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md index 61e46f04654..04563fc72ab 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md @@ -26,5 +26,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md index ceecb72e5e5..63537c3883f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONLines; ``` - ### Чтение данных {#reading-data} Прочитайте данные в формате `JSONLines`: @@ -84,5 +83,4 @@ FORMAT JSONLines Столбцы данных с неизвестными именами будут пропущены при импорте, если настройка [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) установлена в 1. - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md index 5de1ea8c011..f9df57be819 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## Описание {#description} В этом формате все данные представлены одним JSON-объектом, где каждая строка является отдельным полем этого объекта, аналогично формату [`JSONEachRow`](./JSONEachRow.md). - - ## Пример использования {#example-usage} ### Базовый пример {#basic-example} @@ -133,7 +129,6 @@ CREATE TABLE IF NOT EXISTS example_table Рассмотрим в качестве примера таблицу `UserActivity`: - ```response ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ │ 4324182021466249494 │ 5 │ 146 │ -1 │ @@ -213,11 +208,8 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` - ## Параметры форматирования {#format-settings} - - | Настройка | Описание | По умолчанию | Примечания | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [`input_format_import_nested_json`](/operations/settings/settings-formats.md/#input_format_import_nested_json) | сопоставлять вложенные данные JSON вложенным таблицам (работает для формата JSONEachRow). | `false` | | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md index 70989a5888e..6f94301e1c7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md @@ -200,7 +200,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONStrings; ``` - ### Чтение данных {#reading-data} Считайте данные в формате `JSONStrings`: @@ -213,7 +212,6 @@ FORMAT JSONStrings Результат будет в формате JSON: - ```json { "meta": diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md index 1ca778f6f3a..fa0b9340ecb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## Описание {#description} Отличается от [`JSONEachRow`](./JSONEachRow.md) только тем, что поля данных выводятся как строки, а не как типизированные JSON-значения. - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -65,7 +61,6 @@ FORMAT JSONStringsEachRow Вывод будет в формате JSON: - ```json {"date":"2022-04-30","season":"2021","home_team":"Sutton United","away_team":"Bradford City","home_team_goals":"1","away_team_goals":"4"} {"date":"2022-04-30","season":"2021","home_team":"Swindon Town","away_team":"Barrow","home_team_goals":"2","away_team_goals":"1"} @@ -86,5 +81,4 @@ FORMAT JSONStringsEachRow {"date":"2022-05-07","season":"2021","home_team":"Walsall","away_team":"Swindon Town","home_team_goals":"0","away_team_goals":"3"} ``` - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md index 577cad940a9..a5d9b416888 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md @@ -6,14 +6,10 @@ title: 'JSONStringsEachRowWithProgress' doc_type: 'reference' --- - - ## Описание {#description} Отличается от `JSONEachRow`/`JSONStringsEachRow` тем, что ClickHouse также возвращает сведения о прогрессе в формате JSON. - - ## Пример использования {#example-usage} ```json @@ -23,5 +19,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md index 5fda8d0b4ba..e4fe97ce4d0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md @@ -13,21 +13,15 @@ doc_type: 'guide' |------|-------|-----------------------------------| | ✗ | ✔ | `PrettyJSONLines`, `PrettyNDJSON` | - - ## Описание {#description} Отличается от [JSONEachRow](./JSONEachRow.md) только тем, что JSON форматируется в читабельном виде с разделением по строкам и отступом в 4 пробела. - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} Используем JSON-файл `football.json` со следующими данными: - - ```json { "date": "2022-04-30", @@ -185,7 +179,6 @@ FORMAT PrettyJSONEachRow Результат будет в формате JSON: - ```json { "date": "2022-04-30", @@ -327,6 +320,4 @@ FORMAT PrettyJSONEachRow - - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md index 950e402183c..b591bb73b3a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## Описание {#description} Формат `LineAsString` интерпретирует каждую строку входных данных как одно строковое значение. Этот формат может быть использован только для таблицы с одним полем типа [String](/sql-reference/data-types/string.md). Остальные столбцы должны иметь типы [`DEFAULT`](/sql-reference/statements/create/table.md/#default), [`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view) или быть опущены. - - ## Пример использования {#example-usage} ```sql title="Query" @@ -38,5 +34,4 @@ SELECT * FROM line_as_string; └───────────────────────────────────────────────────┘ ``` - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md index 25691de4423..8ad98487b30 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md @@ -13,14 +13,10 @@ doc_type: 'reference' |------|-------|-----------| | ✗ | ✔ | | - - ## Описание {#description} Формат `LineAsStringWithNames` похож на формат [`LineAsString`](./LineAsString.md), но выводит строку заголовков с именами столбцов. - - ## Пример использования {#example-usage} ```sql title="Query" @@ -42,5 +38,4 @@ Jane 25 Peter 35 ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md index c33967054a4..40a9f0b504f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## Описание {#description} Формат `LineAsStringWithNames` похож на формат [`LineAsString`](./LineAsString.md), но выводит две строки заголовков: одну с именами столбцов, другую — с их типами. - - ## Пример использования {#example-usage} ```sql @@ -44,5 +40,4 @@ Jane 25 Peter 35 ``` - ## Параметры формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md index 6530214650d..c40adcc912c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md @@ -11,16 +11,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | `MD` | - - ## Описание {#description} Вы можете экспортировать результаты в формате [Markdown](https://en.wikipedia.org/wiki/Markdown), чтобы получить данные, готовые для вставки в ваши файлы `.md`: Таблица в формате Markdown будет сгенерирована автоматически и может использоваться на платформах с поддержкой Markdown, таких как GitHub. Этот формат используется только для представления результатов. - - ## Пример использования {#example-usage} ```sql @@ -41,5 +37,4 @@ FORMAT Markdown | 4 | 8 | ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md index d0596278fe0..2c2fc70dc71 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## Описание {#description} ClickHouse поддерживает чтение и запись файлов данных в формате [MessagePack](https://msgpack.org/). - - ## Соответствие типов данных {#data-types-matching} | Тип данных MessagePack (`INSERT`) | Тип данных ClickHouse | Тип данных MessagePack (`SELECT`) | @@ -46,8 +42,6 @@ ClickHouse поддерживает чтение и запись файлов д | `int 64` | [`Decimal64`](/sql-reference/data-types/decimal.md) | `int 64` | | `bin 8` | [`Decimal128`/`Decimal256`](/sql-reference/data-types/decimal.md) | `bin 8 ` | - - ## Пример использования {#example-usage} Запись в файл «.msgpk»: @@ -58,7 +52,6 @@ $ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 2 $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; ``` - ## Настройки формата {#format-settings} | Настройка | Описание | По умолчанию | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md index a9e4657b9d8..9978b7bfed3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|-------|-----------| | ✔ | ✗ | | - - ## Описание {#description} ClickHouse поддерживает чтение [дампов](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html) MySQL. @@ -26,8 +24,6 @@ ClickHouse поддерживает чтение [дампов](https://dev.mysq Этот формат поддерживает автоматическое определение схемы: если дамп содержит запрос `CREATE` для указанной таблицы, структура определяется по нему, в противном случае схема определяется по данным запросов `INSERT`. ::: - - ## Пример использования {#example-usage} Предположим, у нас есть следующий файл дампа SQL: @@ -84,7 +80,6 @@ SETTINGS input_format_mysql_dump_table_name = 'test2' └───┘ ``` - ## Настройки формата {#format-settings} Вы можете указать имя таблицы, из которой нужно читать данные, с помощью настройки [`input_format_mysql_dump_table_name`](/operations/settings/settings-formats.md/#input_format_mysql_dump_table_name). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md index baa8687e572..65b08dca3f7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## Описание {#description} Формат `Npy` предназначен для загрузки массива NumPy из файла `.npy` в ClickHouse. @@ -23,8 +21,6 @@ doc_type: 'reference' В таблице ниже приведены поддерживаемые типы данных Npy и соответствующие им типы в ClickHouse: - - ## Соответствие типов данных {#data_types-matching} | Тип данных Npy (`INSERT`) | Тип данных ClickHouse | Тип данных Npy (`SELECT`) | @@ -42,8 +38,6 @@ doc_type: 'reference' | `S`, `U` | [String](/sql-reference/data-types/string.md) | `S` | | | [FixedString](/sql-reference/data-types/fixedstring.md) | `S` | - - ## Пример использования {#example-usage} ### Сохранение массива в формате .npy на Python {#saving-an-array-in-npy-format-using-python} @@ -76,5 +70,4 @@ FROM file('example_array.npy', Npy) $ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy} ``` - ## Настройки формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md index 496aa71e7a4..0cdd213c78a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md @@ -13,8 +13,6 @@ doc_type: 'reference' |----------------|-----------------|-----------| | ✗ | ✔ | | - - ## Описание {#description} В формате `Null` ничего не выводится. @@ -25,8 +23,6 @@ doc_type: 'reference' Формат `Null` может быть полезен для тестирования производительности. ::: - - ## Пример использования {#example-usage} ### Чтение данных {#reading-data} @@ -69,5 +65,4 @@ FORMAT Null Получено 0 строк. Время выполнения: 0.154 сек. ``` - ## Настройки формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md index c92fa766eba..f1dd5cc47a0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md @@ -13,14 +13,10 @@ doc_type: 'reference' |--------|----------|-----------| | ✔ | ✔ | | - - ## Описание {#description} [Apache ORC](https://orc.apache.org/) — это колоночный формат хранения, широко используемый в экосистеме [Hadoop](https://hadoop.apache.org/). - - ## Соответствие типов данных {#data-types-matching-orc} В таблице ниже приведено сравнение поддерживаемых типов данных ORC и соответствующих типов данных [ClickHouse](/sql-reference/data-types/index.md) в запросах `INSERT` и `SELECT`. @@ -50,8 +46,6 @@ doc_type: 'reference' - Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`. Типы `Tuple` и `Map` также могут быть вложенными. - Типы данных столбцов таблицы ClickHouse не обязаны совпадать с соответствующими полями ORC. При вставке данных ClickHouse интерпретирует типы данных согласно таблице выше, а затем [приводит](/sql-reference/functions/type-conversion-functions#cast) данные к типу, заданному для столбца таблицы ClickHouse. - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -101,7 +95,6 @@ FORMAT ORC ORC — это бинарный формат, который не отображается в человекочитаемом виде в терминале. Используйте оператор `INTO OUTFILE` для вывода данных в файлы ORC. ::: - ## Настройки формата {#format-settings} | Setting | Description | Default | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/One.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/One.md index ea208815fe9..025e28c93a1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/One.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/One.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## Описание {#description} Формат `One` — это специальный входной формат, который не читает данные из файла и возвращает только одну строку со столбцом типа [`UInt8`](../../sql-reference/data-types/int-uint.md) с именем `dummy` и значением `0` (как таблица `system.one`). Может использоваться с виртуальными столбцами `_file/_path` для получения списка всех файлов без чтения реальных данных. - - ## Пример использования {#example-usage} Пример: @@ -45,5 +41,4 @@ SELECT _file FROM file('path/to/files/data*', One); └──────────────┘ ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md index 3929b0aee9d..4fd2fb5aa12 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md @@ -9,17 +9,13 @@ title: 'Parquet' doc_type: 'reference' --- -| Входной формат | Выходной формат | Синоним | -|----------------|-----------------|---------| -| ✔ | ✔ | | - - +| Входной формат | Выходной формат | Псевдоним | +|----------------|-----------------|-----------| +| ✔ | ✔ | | ## Описание {#description} -[Apache Parquet](https://parquet.apache.org/) — это колоночный формат хранения данных, широко распространённый в экосистеме Hadoop. ClickHouse поддерживает чтение и запись данных в этом формате. - - +[Apache Parquet](https://parquet.apache.org/) — это столбцовый формат хранения данных, широко распространённый в экосистеме Hadoop. ClickHouse поддерживает чтение и запись данных в этом формате. ## Соответствие типов данных {#data-types-matching-parquet} @@ -66,15 +62,12 @@ doc_type: 'reference' Типы данных столбцов таблицы ClickHouse могут отличаться от соответствующих полей вставляемых данных Parquet. При вставке данных ClickHouse интерпретирует типы данных согласно приведённой выше таблице, а затем [приводит](/sql-reference/functions/type-conversion-functions#cast) данные к тому типу данных, который установлен для столбца таблицы ClickHouse. Например, столбец Parquet `UINT_32` может быть прочитан в столбец ClickHouse [IPv4](/sql-reference/data-types/ipv4.md). +Для некоторых типов Parquet нет близкого по соответствию типа ClickHouse. Они читаются следующим образом: - -Для некоторых типов Parquet не существует близкого по смыслу типа ClickHouse. Мы читаем их следующим образом: * `TIME` (время суток) читается как метка времени. Например, `10:23:13.000` становится `1970-01-01 10:23:13.000`. * `TIMESTAMP`/`TIME` с `isAdjustedToUTC=false` — это локальное время по настенным часам (поля год, месяц, день, час, минута, секунда и доля секунды в локальном часовом поясе, независимо от того, какой конкретный часовой пояс считается локальным), аналогично SQL `TIMESTAMP WITHOUT TIME ZONE`. ClickHouse читает его так, как если бы это была метка времени в UTC. Например, `2025-09-29 18:42:13.000` (представляющее показания локальных настенных часов) становится `2025-09-29 18:42:13.000` (`DateTime64(3, 'UTC')`, представляя точку во времени). При преобразовании к String выводятся корректные год, месяц, день, час, минута, секунда и доля секунды, которые затем могут интерпретироваться как относящиеся к какому-либо локальному часовому поясу, а не к UTC. Противоинтуитивно, смена типа с `DateTime64(3, 'UTC')` на `DateTime64(3)` не поможет, так как оба типа представляют точку во времени, а не показание часов, но `DateTime64(3)` будет некорректно форматироваться с использованием локального часового пояса. * `INTERVAL` в настоящий момент читается как `FixedString(12)` с сырым двоичным представлением временного интервала в том виде, в котором он закодирован в файле Parquet. - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -103,12 +96,13 @@ doc_type: 'reference' └────────────┴────────┴───────────────────────┴─────────────────────┴─────────────────┴─────────────────┘ ``` -Введите данные: +Вставьте данные: ```sql INSERT INTO football FROM INFILE 'football.parquet' FORMAT Parquet; ``` + ### Чтение данных {#reading-data} Прочитайте данные в формате `Parquet`: @@ -129,33 +123,30 @@ Parquet — это двоичный формат, который не отобр ## Параметры форматирования {#format-settings} - - -| Настройка | Описание | По умолчанию | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_format_parquet_case_insensitive_column_matching` | Не учитывать регистр при сопоставлении столбцов Parquet со столбцами CH. | `0` | -| `input_format_parquet_preserve_order` | Избегайте изменения порядка строк при чтении из файлов Parquet. Обычно это значительно замедляет чтение. | `0` | -| `input_format_parquet_filter_push_down` | При чтении файлов Parquet пропускать целые группы строк на основе выражений WHERE/PREWHERE и статистики min/max в метаданных Parquet. | `1` | -| `input_format_parquet_bloom_filter_push_down` | При чтении файлов Parquet пропускать целые группы строк на основе выражений WHERE и фильтра Блума в метаданных файлов Parquet. | `0` | -| `input_format_parquet_use_native_reader` | При чтении файлов в формате Parquet использовать нативный считыватель вместо считывателя Arrow. | `0` | -| `input_format_parquet_allow_missing_columns` | Допускать отсутствующие столбцы при чтении входных форматов Parquet | `1` | -| `input_format_parquet_local_file_min_bytes_for_seek` | Минимальное количество байт при локальном чтении файла, начиная с которого используется seek вместо чтения с пропуском (ignore) во входном формате Parquet | `8192` | -| `input_format_parquet_enable_row_group_prefetch` | Включить предварительную выборку групп строк при разборе файлов Parquet. В настоящее время предварительную выборку может выполнять только однопоточный парсер. | `1` | -| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | Пропускать столбцы с неподдерживаемыми типами при определении схемы для формата Parquet | `0` | -| `input_format_parquet_max_block_size` | Максимальный размер блока для читателя Parquet. | `65409` | -| `input_format_parquet_prefer_block_bytes` | Средний размер блока в байтах, выдаваемого читателем Parquet | `16744704` | -| `input_format_parquet_enable_json_parsing` | При чтении файлов Parquet разбирайте столбцы JSON как столбцы ClickHouse JSON Column. | `1` | -| `output_format_parquet_row_group_size` | Целевое количество строк в группе. | `1000000` | -| `output_format_parquet_row_group_size_bytes` | Целевой размер группы строк в байтах до сжатия. | `536870912` | -| `output_format_parquet_string_as_string` | Используйте тип Parquet String вместо Binary для столбцов типа String. | `1` | -| `output_format_parquet_fixed_string_as_fixed_byte_array` | Используйте тип Parquet FIXED_LEN_BYTE_ARRAY вместо Binary для столбцов FixedString. | `1` | -| `output_format_parquet_version` | Версия формата Parquet для формата вывода. Поддерживаемые версии: 1.0, 2.4, 2.6 и 2.latest (по умолчанию) | `2.latest` | -| `output_format_parquet_compression_method` | Метод сжатия для формата вывода Parquet. Поддерживаемые кодеки: snappy, lz4, brotli, zstd, gzip, none (без сжатия) | `zstd` | -| `output_format_parquet_compliant_nested_types` | В схеме файла Parquet используйте имя 'element' вместо 'item' для элементов списка. Это исторический артефакт реализации библиотеки Arrow. Как правило, это повышает совместимость, за исключением, возможно, некоторых старых версий Arrow. | `1` | -| `output_format_parquet_use_custom_encoder` | Используйте более быструю реализацию кодировщика Parquet. | `1` | -| `output_format_parquet_parallel_encoding` | Выполнять кодирование в Parquet в нескольких потоках. Требует включённой настройки output_format_parquet_use_custom_encoder. | `1` | -| `output_format_parquet_data_page_size` | Целевой размер страницы в байтах перед сжатием. | `1048576` | -| `output_format_parquet_batch_size` | Проверять размер страницы каждые указанное количество строк. Рассмотрите возможность уменьшения значения, если в столбцах средний размер значений превышает несколько КБ. | `1024` | -| `output_format_parquet_write_page_index` | Добавить возможность записывать страничный индекс в файлы Parquet. | `1` | -| `input_format_parquet_import_nested` | Устаревший параметр, ни на что не влияет. | `0` | -| `input_format_parquet_local_time_as_utc` | true | Определяет тип данных, используемый при выводе схемы (schema inference) для временных меток Parquet с isAdjustedToUTC=false. Если true: DateTime64(..., 'UTC'), если false: DateTime64(...). Ни один из вариантов не является полностью корректным, так как в ClickHouse нет типа данных для локального времени по настенным часам (wall-clock time). Как ни парадоксально, значение 'true', вероятно, является менее некорректным вариантом, поскольку форматирование временной метки 'UTC' как String даст представление правильного локального времени. | +| Настройка | Описание | По умолчанию | +| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `input_format_parquet_case_insensitive_column_matching` | Не учитывать регистр при сопоставлении столбцов Parquet со столбцами CH. | `0` | +| `input_format_parquet_preserve_order` | Избегайте изменения порядка строк при чтении из файлов Parquet. Обычно это сильно замедляет чтение. | `0` | +| `input_format_parquet_filter_push_down` | При чтении файлов Parquet пропускать целые группы строк на основе выражений WHERE/PREWHERE и статистики min/max в метаданных файлов Parquet. | `1` | +| `input_format_parquet_bloom_filter_push_down` | При чтении файлов Parquet пропускать целые группы строк на основе выражений WHERE и фильтра Блума в метаданных Parquet. | `0` | +| `input_format_parquet_allow_missing_columns` | Разрешать отсутствующие столбцы при чтении входных данных в формате Parquet | `1` | +| `input_format_parquet_local_file_min_bytes_for_seek` | Минимальный объём данных в байтах при локальном чтении файла, начиная с которого используется seek вместо чтения с игнорированием (ignore) во входном формате Parquet | `8192` | +| `input_format_parquet_enable_row_group_prefetch` | Включить предварительную выборку групп строк при разборе Parquet. В настоящее время предварительная выборка поддерживается только для однопоточного разбора. | `1` | +| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | Пропускать столбцы с неподдерживаемыми типами при определении схемы для формата Parquet | `0` | +| `input_format_parquet_max_block_size` | Максимальный размер блока для считывателя Parquet. | `65409` | +| `input_format_parquet_prefer_block_bytes` | Средний размер блока в байтах, выдаваемый читателем Parquet | `16744704` | +| `input_format_parquet_enable_json_parsing` | При чтении файлов Parquet интерпретировать столбцы JSON как столбцы типа ClickHouse JSON Column. | `1` | +| `output_format_parquet_row_group_size` | Целевое количество строк в группе. | `1000000` | +| `output_format_parquet_row_group_size_bytes` | Целевой размер группы строк в байтах перед сжатием. | `536870912` | +| `output_format_parquet_string_as_string` | Используйте тип Parquet String вместо Binary для столбцов типа String. | `1` | +| `output_format_parquet_fixed_string_as_fixed_byte_array` | Используйте тип Parquet FIXED_LEN_BYTE_ARRAY вместо Binary для столбцов типа FixedString. | `1` | +| `output_format_parquet_version` | Версия формата Parquet для выходных данных. Поддерживаемые версии: 1.0, 2.4, 2.6 и 2.latest (по умолчанию) | `2.latest` | +| `output_format_parquet_compression_method` | Метод сжатия для формата вывода Parquet. Поддерживаемые кодеки: snappy, lz4, brotli, zstd, gzip, none (без сжатия) | `zstd` | +| `output_format_parquet_compliant_nested_types` | В схеме файла Parquet используйте имя 'element' вместо 'item' для элементов списка. Это исторический артефакт реализации библиотеки Arrow. Как правило, повышает совместимость, за исключением, возможно, некоторых старых версий Arrow. | `1` | +| `output_format_parquet_use_custom_encoder` | Используйте более быстрый кодировщик Parquet. | `1` | +| `output_format_parquet_parallel_encoding` | Выполнять кодирование Parquet в нескольких потоках. Требует включения параметра output_format_parquet_use_custom_encoder. | `1` | +| `output_format_parquet_data_page_size` | Целевой размер страницы в байтах до сжатия. | `1048576` | +| `output_format_parquet_batch_size` | Проверять размер страницы через каждые указанное количество строк. Рассмотрите возможность уменьшения значения, если средний размер значений в столбцах превышает несколько КБ. | `1024` | +| `output_format_parquet_write_page_index` | Добавить возможность записи индекса страниц в файлы Parquet. | `1` | +| `input_format_parquet_import_nested` | Устаревший параметр, ничего не делает. | `0` | +| `input_format_parquet_local_time_as_utc` | true | Определяет тип данных, используемый при определении схемы (schema inference) для временных меток Parquet с isAdjustedToUTC=false. Если true: DateTime64(..., 'UTC'), если false: DateTime64(...). Ни один из вариантов не является полностью корректным, так как в ClickHouse нет типа данных для локального времени по настенным часам (wall-clock time). Как ни парадоксально, значение 'true', вероятно, является менее некорректным вариантом, поскольку форматирование временной метки 'UTC' как String будет соответствовать правильному локальному времени. | \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md index abcd72138f0..2f71acebeb5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md @@ -6,8 +6,6 @@ title: 'ParquetMetadata' doc_type: 'reference' --- - - ## Описание {#description} Специальный формат для чтения метаданных файлов Parquet (https://parquet.apache.org/docs/file-format/metadata/). Всегда выводит одну строку со следующей структурой/содержимым: @@ -47,8 +45,6 @@ doc_type: 'reference' - `min` - минимальное значение в чанке столбца - `max` - максимальное значение в чанке столбца - - ## Пример использования {#example-usage} Пример: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md index 78a8c4448ef..157e14eb35a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md @@ -15,7 +15,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | ---- | ----- | --------- | | ✗ | ✔ | | - ## Описание {#description} Формат `Pretty` выводит данные в виде таблиц с использованием символов Unicode, @@ -26,8 +25,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; [NULL](/sql-reference/syntax.md) выводится как `ᴺᵁᴸᴸ`. - - ## Пример использования {#example-usage} Пример (для формата [`PrettyCompact`](./PrettyCompact.md)): @@ -97,7 +94,6 @@ FORMAT PrettyCompact └────────────┴─────────┘ ``` - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md index 4463d69f94f..ae1aac4e843 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md @@ -15,14 +15,11 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | ---- | ----- | --------- | | ✗ | ✔ | | - ## Описание {#description} Отличается от [Pretty](/interfaces/formats/Pretty) тем, что не используются [последовательности управляющих кодов ANSI](http://en.wikipedia.org/wiki/ANSI_escape_code). Это необходимо для отображения этого формата в браузере, а также для использования с утилитой командной строки `watch`. - - ## Пример использования {#example-usage} Пример: @@ -35,7 +32,6 @@ $ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events F [HTTP-интерфейс](../../../interfaces/http.md) можно использовать для отображения данного формата в браузере. ::: - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md index e81cb90cd69..a48c4aa07ad 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md @@ -94,7 +94,6 @@ Enum (так же, как Enum8 или Enum16) должен содержать ClickHouse считывает и выводит сообщения protobuf в формате `length-delimited`. Это означает, что перед каждым сообщением его длина должна быть записана как [целое число переменной длины (varint)](https://developers.google.com/protocol-buffers/docs/encoding#varints). - ## Пример использования {#example-usage} ### Чтение и запись данных {#basic-examples} @@ -119,7 +118,6 @@ message MessageType { }; ``` -
Генерация бинарного файла @@ -249,7 +247,6 @@ ENGINE = MergeTree() ORDER BY tuple() ``` - Вставьте данные в таблицу из командной строки: ```bash @@ -264,8 +261,7 @@ SELECT * FROM test.protobuf_messages INTO OUTFILE 'protobuf_message_from_clickho Имея Protobuf-схему, вы теперь можете десериализовать данные, которые ClickHouse записал в файл `protobuf_message_from_clickhouse.bin`. - -### Чтение и запись данных с использованием ClickHouse Cloud +### Чтение и запись данных с использованием ClickHouse Cloud {#basic-examples-cloud} В ClickHouse Cloud нельзя загрузить файл схемы Protobuf. Однако вы можете использовать параметр `format_protobuf_schema`, чтобы указать схему прямо в запросе. В этом примере показано, как читать сериализованные данные с вашей локальной @@ -293,8 +289,7 @@ ORDER BY tuple() * 'string': `format_schema` содержит буквальное содержимое схемы. * 'query': `format_schema` представляет собой запрос для получения схемы. - -### `format_schema_source='string'` +### `format_schema_source='string'` {#format-schema-source-string} Чтобы вставить данные в ClickHouse Cloud, указав схему в виде строки, выполните команду: @@ -314,8 +309,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### `format_schema_source='query'` +### `format_schema_source='query'` {#format-schema-source-query} Вы также можете хранить Protobuf-схему в таблице. @@ -351,8 +345,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### Использование автоматически сгенерированной схемы +### Использование автоматически сгенерированной схемы {#using-autogenerated-protobuf-schema} Если у вас нет внешней Protobuf-схемы для ваших данных, вы всё равно можете выводить и считывать данные в формате Protobuf, используя автоматически сгенерированную схему. Для этого используйте настройку `format_protobuf_use_autogenerated_schema`. @@ -381,7 +374,6 @@ SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerat В этом случае автоматически сгенерированная схема Protobuf будет сохранена в файле `path/to/schema/schema.capnp`. - ### Сброс кэша Protobuf {#basic-examples-cloud} Чтобы перезагрузить схему Protobuf, загруженную из [`format_schema_path`](/operations/server-configuration-parameters/settings.md/#format_schema_path), используйте оператор [`SYSTEM DROP ... FORMAT CACHE`](/sql-reference/statements/system.md/#system-drop-schema-format). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md index 466f1fb3c80..c1933204314 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md @@ -17,13 +17,10 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | ---- | ----- | ----- | | ✔ | ✔ | | - ## Описание {#description} Формат `ProtobufList` похож на формат [`Protobuf`](./Protobuf.md), но строки представлены в виде последовательности подсообщений, содержащихся в сообщении с фиксированным именем «Envelope». - - ## Пример использования {#example-usage} Например: @@ -51,5 +48,4 @@ message Envelope { }; ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md index 10d1de65b9f..27d9d1ee8d5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md @@ -6,8 +6,6 @@ title: 'RawBLOB' doc_type: 'reference' --- - - ## Описание {#description} Формат `RawBLOB` считывает все входные данные в одно значение. Можно разобрать только таблицу с одним полем типа [`String`](/sql-reference/data-types/string.md) или аналогичного типа. @@ -45,7 +43,6 @@ doc_type: 'reference' Код: 108. DB::Exception: Отсутствуют данные для вставки ``` - ## Пример использования {#example-usage} ```bash title="Query" @@ -58,5 +55,4 @@ $ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum f9725a22f9191e064120d718e26862a9 - ``` - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md index 41cbed34be1..843539c1ed9 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## Описание {#description} Формат `Regex` разбирает каждую строку импортируемых данных в соответствии с заданным регулярным выражением. @@ -29,8 +27,6 @@ doc_type: 'reference' Если регулярное выражение не соответствует строке и параметр [format_regexp_skip_unmatched](/operations/settings/settings-formats.md/#format_regexp_escaping_rule) равен 1, строка просто пропускается без ошибки. В противном случае генерируется исключение. - - ## Пример использования {#example-usage} Рассмотрим файл `data.tsv`: @@ -67,7 +63,6 @@ SELECT * FROM imp_regex_table; └────┴─────────┴────────┴────────────┘ ``` - ## Настройки формата {#format-settings} При работе с форматом `Regexp` можно использовать следующие настройки: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md index 9be534dffd9..ead7634c6c8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md @@ -15,13 +15,10 @@ import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settin | ---- | ----- | --------- | | ✔ | ✗ | | - ## Описание {#description} Аналогичен формату [`RowBinary`](./RowBinary.md), но с дополнительным байтом перед каждым столбцом, который указывает, следует ли использовать значение по умолчанию. - - ## Примеры использования {#example-usage} Примеры: @@ -39,7 +36,6 @@ SELECT * FROM FORMAT('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x * Для столбца `x` есть только один байт `01`, который указывает, что должно быть использовано значение по умолчанию, и после этого байта не передаётся никаких других данных. * Для столбца `y` данные начинаются с байта `00`, который указывает, что у столбца есть реальное значение, которое нужно прочитать из следующих данных `01000000`. - ## Настройки формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md index a9a66917156..69a11334276 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md @@ -13,14 +13,10 @@ doc_type: 'reference' |----------------|-----------------|-----------| | ✗ | ✔ | | - - ## Описание {#description} Выводит данные в виде последовательности операторов вида `INSERT INTO table (columns...) VALUES (...), (...) ...;`. - - ## Пример использования {#example-usage} Пример: @@ -39,7 +35,6 @@ INSERT INTO table (x, y, z) VALUES (8, 9, 'Привет'), (9, 10, 'Привет Для чтения данных, выводимых этим форматом, можно использовать входной формат [MySQLDump](../formats/MySQLDump.md). - ## Настройки формата {#format-settings} | Setting | Description | Default | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md index cbb4332f4b9..ebb15732260 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md @@ -13,8 +13,6 @@ doc_type: 'reference' |----------------|-----------------|-----------| | ✔ | ✔ | | - - ## Описание {#description} Аналогичен формату [`TabSeparated`](./TabSeparated.md), но выводит значение в формате `name=value`. @@ -58,7 +56,6 @@ x=1 y=\N [NULL](/sql-reference/syntax.md) форматируется как `\N`. - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -103,7 +100,6 @@ FORMAT TSKV Результат будет в табличном формате с разделителем табуляцией и двумя строками заголовков для названий столбцов и их типов: - ```tsv date=2022-04-30 season=2021 home_team=Sutton United away_team=Bradford City home_team_goals=1 away_team_goals=4 date=2022-04-30 season=2021 home_team=Swindon Town away_team=Barrow home_team_goals=2 away_team_goals=1 @@ -124,5 +120,4 @@ date=2022-05-07 season=2021 home_team=Stevenage Borough away_team=Salfor date=2022-05-07 season=2021 home_team=Walsall away_team=Swindon Town home_team_goals=0 away_team_goals=3 ``` - ## Настройки форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md index 51c9261eb3b..6cdfb7004b9 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------| | ✔ | ✔ | `TSV` | - - ## Описание {#description} В формате TabSeparated данные записываются построчно. Каждая строка содержит значения, разделённые символами табуляции. После каждого значения следует символ табуляции, за исключением последнего значения в строке, за которым следует символ перевода строки. Везде предполагается использование перевода строки в формате Unix. Последняя строка также должна заканчиваться переводом строки. Значения записываются в текстовом формате, без заключения в кавычки и с экранированием специальных символов. @@ -42,7 +40,6 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 2014-03-23 1406958 ``` - ## Форматирование данных {#tabseparated-data-formatting} Целые числа записываются в десятичной форме. Числа могут содержать дополнительный символ "+" в начале (он игнорируется при разборе и не записывается при форматировании). Неотрицательные числа не могут содержать знак минус. При чтении допускается интерпретировать пустую строку как ноль или (для знаковых типов) строку, состоящую только из знака минус, как ноль. Числа, которые не помещаются в соответствующий тип данных, могут быть разобраны как другое число, без сообщения об ошибке. @@ -108,7 +105,6 @@ SELECT * FROM nestedt FORMAT TSV 1 [1] ['a'] ``` - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -173,7 +169,6 @@ FORMAT TabSeparated 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Настройки формата {#format-settings} | Setting | Description | Default | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md index c921daf384d..b6c220e136a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md @@ -13,8 +13,6 @@ doc_type: 'reference' |----------------|-----------------|-----------------| | ✔ | ✔ | `TSVRaw`, `Raw` | - - ## Описание {#description} Отличается от формата [`TabSeparated`](/interfaces/formats/TabSeparated) тем, что строки записываются без экранирования. @@ -25,8 +23,6 @@ doc_type: 'reference' Сравнение форматов `TabSeparatedRaw` и `RawBlob` см. в разделе [Сравнение форматов Raw](../RawBLOB.md/#raw-formats-comparison). - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -91,5 +87,4 @@ FORMAT TabSeparatedRaw 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Настройки формата {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md index b8d3a48fa05..11b30a0d0b8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|-------|-----------------------------------| | ✔ | ✔ | `TSVRawWithNames`, `RawWithNames` | - - ## Описание {#description} Отличается от формата [`TabSeparatedWithNames`](./TabSeparatedWithNames.md) тем, @@ -24,8 +22,6 @@ doc_type: 'reference' При разборе данных в этом формате символы табуляции или перевода строки внутри отдельных полей не допускаются. ::: - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -92,5 +88,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md index 0b66aa7b6da..bd37159849e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|-------|---------------------------------------------------| | ✔ | ✔ | `TSVRawWithNamesAndNames`, `RawWithNamesAndNames` | - - ## Описание {#description} Отличается от формата [`TabSeparatedWithNamesAndTypes`](./TabSeparatedWithNamesAndTypes.md) @@ -24,8 +22,6 @@ doc_type: 'reference' При разборе этого формата табуляция и перевод строки внутри каждого поля не допускаются. ::: - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -72,7 +68,6 @@ FORMAT TabSeparatedRawWithNamesAndTypes Вывод будет в формате с разделителем табуляции и двумя строками заголовка: первая содержит имена столбцов, вторая — их типы: - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -95,5 +90,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md index 17ad9b2e978..a70bc8aac1f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------------------------------| | ✔ | ✔ | `TSVWithNames`, `RawWithNames` | - - ## Описание {#description} Отличается от формата [`TabSeparated`](./TabSeparated.md) тем, что имена столбцов записаны в первой строке. @@ -27,8 +25,6 @@ doc_type: 'reference' В противном случае первая строка будет пропущена. ::: - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -95,5 +91,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Настройки формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md index 31bb1ad9a45..d42242bf616 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md @@ -10,8 +10,6 @@ doc_type: 'reference' |----------------|-----------------|-----------------------------------------------| | ✔ | ✔ | `TSVWithNamesAndTypes`, `RawWithNamesAndTypes` | - - ## Описание {#description} Отличается от формата [`TabSeparated`](./TabSeparated.md) тем, что имена столбцов записываются в первую строку, а типы столбцов — во вторую. @@ -24,8 +22,6 @@ doc_type: 'reference' типы из входных данных будут сравниваться с типами соответствующих столбцов таблицы. В противном случае вторая строка будет пропущена. ::: - - ## Пример использования {#example-usage} ### Вставка данных {#inserting-data} @@ -72,7 +68,6 @@ FORMAT TabSeparatedWithNamesAndTypes Вывод будет в формате с разделителями табуляции и двумя строками заголовков для имен столбцов и их типов: - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -95,5 +90,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## Параметры формата {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md index 912a34bfe14..db3ef812686 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md @@ -13,8 +13,6 @@ doc_type: 'guide' |-------|--------|-------| | ✔ | ✔ | | - - ## Описание {#description} В случаях, когда вам требуется больше возможностей для настройки, чем предоставляют другие стандартные форматы, @@ -32,8 +30,6 @@ doc_type: 'guide' | `format_template_resultset_format` | Указывает строку формата для набора результатов [во встроенной спецификации](#inline_specification). | | Некоторые настройки других форматов (например, `output_format_json_quote_64bit_integers` при использовании экранирования `JSON` | | - - ## Настройки и правила экранирования {#settings-and-escaping-rules} ### format_template_row {#format_template_row} @@ -115,7 +111,6 @@ doc_type: 'guide' Если параметр `format_template_resultset` является пустой строкой, по умолчанию используется `${data}`. ::: - Для запросов INSERT формат позволяет пропускать некоторые столбцы или поля, если задан префикс или суффикс (см. пример). ### Встроенная спецификация {#inline_specification} @@ -133,8 +128,6 @@ doc_type: 'guide' - [`format_template_resultset`](#format_template_resultset) при использовании `format_template_resultset_format`. ::: - - ## Пример использования {#example-usage} Рассмотрим два примера того, как можно использовать формат `Template`: сначала для выборки данных, а затем для вставки данных. @@ -218,7 +211,6 @@ FORMAT Template Устали вручную форматировать таблицы Markdown? В этом примере мы рассмотрим, как можно использовать формат `Template` и настройки встроенной спецификации, чтобы решить простую задачу — выполнить `SELECT` по именам некоторых форматов ClickHouse из таблицы `system.formats` и отформатировать их как таблицу в формате Markdown. Это можно легко сделать, используя формат `Template` и настройки `format_template_row_format` и `format_template_resultset_format`. - В предыдущих примерах мы указывали строки шаблонов для результирующего набора и строк в отдельных файлах, а пути к этим файлам задавали с помощью настроек `format_template_resultset` и `format_template_row` соответственно. Здесь мы сделаем это прямо в запросе, потому что наш шаблон тривиален и состоит лишь из нескольких символов `|` и `-` для создания таблицы в формате Markdown. Шаблонную строку для результирующего набора мы зададим с помощью настройки `format_template_resultset_format`. Чтобы сделать заголовок таблицы, мы добавили `|ClickHouse Formats|\n|---|\n` перед `${data}`. Настройку `format_template_row_format` мы используем, чтобы задать шаблонную строку ``|`{0:XML}`|`` для наших строк. Формат `Template` вставит наши строки с заданным форматом в плейсхолдер `${data}`. В этом примере у нас только один столбец, но при необходимости вы можете добавить больше, добавив `{1:XML}`, `{2:XML}` и т. д. в шаблон строки, выбирая правило экранирования по необходимости. В этом примере мы используем правило экранирования `XML`. ```sql title="Query" diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md index 0193294a52d..cc0934adf48 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## Описание {#description} Аналогично формату [`Template`], но пропускает пробельные символы между разделителями и значениями во входном потоке. @@ -27,8 +25,6 @@ doc_type: 'reference' Этот формат предназначен только для ввода. ::: - - ## Пример использования {#example-usage} Следующий запрос можно использовать для вставки данных из приведённого выше примера вывода в формате [JSON](/interfaces/formats/JSON): @@ -50,5 +46,4 @@ FORMAT TemplateIgnoreSpaces {${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} ``` - ## Параметры форматирования {#format-settings} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md index 9af798e26ee..d48adc43d34 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## Описание {#description} Выводит каждое значение на отдельной строке с указанием имени столбца. Этот формат удобен для вывода одной или нескольких строк, если каждая строка содержит большое количество столбцов. Обратите внимание, что [`NULL`](/sql-reference/syntax.md) выводится как `ᴺᵁᴸᴸ`, чтобы было проще отличать строковое значение `NULL` от отсутствия значения. Столбцы JSON выводятся в удобочитаемом формате, а `NULL` выводится как `null`, поскольку это корректное значение JSON и его легко отличить от `"null"`. - - ## Пример использования {#example-usage} Пример: @@ -53,5 +49,4 @@ test: строка с «кавычками» и с особыми Этот формат подходит только для вывода результата запроса, но не для разбора (извлечения данных для вставки в таблицу). - ## Параметры форматирования {#format-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md index 5e104b99bf4..4817f4acb6c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## Описание {#description} Формат `XML` предназначен только для вывода и не подходит для парсинга. @@ -26,8 +24,6 @@ doc_type: 'reference' Массивы выводятся как `HelloWorld...`, а кортежи — как `HelloWorld...`. - - ## Пример использования {#example-usage} Пример: @@ -94,9 +90,6 @@ doc_type: 'reference' ``` - ## Параметры форматирования {#format-settings} - - ## XML {#xml} \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/grpc.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/grpc.md index 3ab5c3dd57c..af1745710d6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/grpc.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/grpc.md @@ -7,12 +7,8 @@ title: 'Интерфейс gRPC' doc_type: 'reference' --- - - # Интерфейс gRPC {#grpc-interface} - - ## Введение {#grpc-interface-introduction} ClickHouse поддерживает интерфейс [gRPC](https://grpc.io/). Это система удалённого вызова процедур с открытым исходным кодом, использующая HTTP/2 и [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers). Реализация gRPC в ClickHouse поддерживает: @@ -28,8 +24,6 @@ ClickHouse поддерживает интерфейс [gRPC](https://grpc.io/). Спецификация интерфейса приведена в файле [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto). - - ## Настройка gRPC {#grpc-interface-configuration} Чтобы использовать интерфейс gRPC, задайте `grpc_port` в основном [конфигурационном файле сервера](../operations/configuration-files.md). Дополнительные параметры конфигурации приведены в следующем примере: @@ -66,7 +60,6 @@ ClickHouse поддерживает интерфейс [gRPC](https://grpc.io/). ``` - ## Встроенный клиент {#grpc-client} Вы можете написать клиент на любом из языков программирования, поддерживаемых gRPC, используя предоставленную [спецификацию](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/http.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/http.md index 33fe0476d49..f29bb869432 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/http.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/http.md @@ -10,19 +10,14 @@ doc_type: 'reference' import PlayUI from '@site/static/images/play.png'; import Image from '@theme/IdealImage'; - # HTTP-интерфейс {#http-interface} - - ## Предварительные требования {#prerequisites} Для примеров в этой статье вам понадобится: - запущенный сервер ClickHouse - установленный `curl`. В Ubuntu или Debian выполните `sudo apt install curl` или обратитесь к этой [документации](https://curl.se/download.html) за инструкциями по установке. - - ## Обзор {#overview} HTTP-интерфейс позволяет использовать ClickHouse на любой платформе и с любого языка программирования в виде REST API. HTTP-интерфейс более ограничен, чем нативный интерфейс, но обладает лучшей поддержкой языков. @@ -43,7 +38,6 @@ Ok. См. также: [Особенности кодов ответа HTTP](#http_response_codes_caveats). - ## Веб-интерфейс пользователя {#web-ui} ClickHouse включает веб-интерфейс пользователя, доступ к которому можно получить по следующему адресу: @@ -70,7 +64,6 @@ $ curl 'http://localhost:8123/replicas_status' Ok. ``` - ## Выполнение запросов по HTTP/HTTPS {#querying} Для выполнения запросов по HTTP/HTTPS есть три варианта: @@ -164,7 +157,6 @@ ECT 1 wget -nv -O- 'http://localhost:8123/?query=SELECT 1, 2, 3 FORMAT JSON' ``` - ```response title="Response" { "meta": @@ -222,7 +214,6 @@ $ curl -X POST -F 'query=select {p1:UInt8} + {p2:UInt8}' -F "param_p1=3" -F "par 7 ``` - ## Запросы INSERT по HTTP/HTTPS {#insert-queries} Метод передачи данных `POST` используется для запросов `INSERT`. В этом случае вы можете указать начало запроса в параметре URL и использовать POST для передачи данных, которые нужно вставить. Вставляемыми данными может быть, например, дамп из MySQL в формате с разделителями табуляции. Таким образом, запрос `INSERT` заменяет `LOAD DATA LOCAL INFILE` из MySQL. @@ -289,7 +280,6 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- Для успешных запросов, не возвращающих таблицу данных, возвращается пустое тело ответа. - ## Сжатие {#compression} Сжатие можно использовать для уменьшения объема сетевого трафика при передаче больших объемов данных или для создания дампов, которые сразу сохраняются в сжатом виде. @@ -321,8 +311,6 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- Некоторые HTTP-клиенты могут по умолчанию распаковывать данные от сервера (для `gzip` и `deflate`), и вы можете получить уже распакованные данные, даже если правильно используете настройки сжатия. ::: - - ## Примеры {#examples-compression} Чтобы отправить сжатые данные на сервер: @@ -354,7 +342,6 @@ curl -sS "http://localhost:8123/?enable_http_compression=1" \ 2 ``` - ## База данных по умолчанию {#default-database} Вы можете использовать параметр `database` в URL или заголовок `X-ClickHouse-Database`, чтобы указать базу данных по умолчанию. @@ -375,7 +362,6 @@ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?databa По умолчанию в качестве базы данных по умолчанию используется та, которая указана в настройках сервера. Изначально это база данных с именем `default`. При необходимости вы всегда можете указать базу данных, добавив её имя и точку перед именем таблицы. - ## Аутентификация {#authentication} Имя пользователя и пароль можно указать одним из трёх способов: @@ -436,7 +422,6 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 * [Settings](/operations/settings/settings) * [SET](/sql-reference/statements/set) - ## Использование сессий ClickHouse в протоколе HTTP {#using-clickhouse-sessions-in-the-http-protocol} Вы также можете использовать сессии ClickHouse в протоколе HTTP. Для этого необходимо добавить к запросу `GET`-параметр `session_id`. В качестве идентификатора сессии можно использовать любую строку. @@ -478,7 +463,6 @@ X-ClickHouse-Progress: {"read_rows":"1000000","read_bytes":"8000000","total_rows HTTP‑интерфейс позволяет передавать внешние данные (внешние временные таблицы) для выполнения запросов. Дополнительные сведения см. в разделе [«External data for query processing»](/engines/table-engines/special/external-data). - ## Буферизация ответа {#response-buffering} Буферизацию ответа можно включить на стороне сервера. Для этого предусмотрены следующие параметры URL: @@ -505,7 +489,6 @@ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wa Используйте буферизацию, чтобы избежать ситуаций, когда ошибка обработки запроса возникает после того, как код ответа и HTTP-заголовки уже были отправлены клиенту. В таком случае сообщение об ошибке записывается в конце тела ответа, и на стороне клиента ошибка может быть обнаружена только при разборе ответа. ::: - ## Установка роли с помощью параметров запроса {#setting-role-with-query-parameters} Эта возможность была добавлена в ClickHouse 24.4. @@ -538,7 +521,6 @@ curl -sS "http://localhost:8123?role=my_role&role=my_other_role" --data-binary " В этом случае `?role=my_role&role=my_other_role` работает аналогично выполнению `SET ROLE my_role, my_other_role` перед выполнением запроса. - ## Особенности кодов ответа HTTP {#http_response_codes_caveats} Из-за ограничений протокола HTTP код ответа 200 не гарантирует, что запрос был успешно выполнен. @@ -624,7 +606,6 @@ $ curl -v -Ss "http://localhost:8123/?max_block_size=1&query=select+sleepEachRow 0,0 ``` - **исключение** rumfyutuqkncbgau Код: 395. DB::Exception: Значение, переданное в функцию 'throwIf', является ненулевым: при выполнении выражения 'FUNCTION throwIf(equals(__table1.number, 2_UInt8) :: 1) -> throwIf(equals(__table1.number, 2_UInt8)) UInt8 : 0'. (FUNCTION_THROW_IF_VALUE_IS_NON_ZERO) (версия 25.11.1.1) @@ -634,7 +615,6 @@ rumfyutuqkncbgau ``` ``` - ## Запросы с параметрами {#cli-queries-with-parameters} Вы можете создать запрос с параметрами и передавать им значения из соответствующих параметров HTTP-запроса. Для получения дополнительной информации см. раздел [Запросы с параметрами для CLI](../interfaces/cli.md#cli-queries-with-parameters). @@ -674,7 +654,6 @@ curl -sS "http://localhost:8123?param_arg1=abc%5C%09123" -d "SELECT splitByChar( ['abc','123'] ``` - ## Предопределённый HTTP-интерфейс {#predefined_http_interface} ClickHouse поддерживает выполнение специальных запросов через HTTP-интерфейс. Например, вы можете записать данные в таблицу следующим образом: @@ -706,7 +685,6 @@ ClickHouse также поддерживает предопределённый Теперь вы можете получать данные в формате Prometheus, обращаясь непосредственно по URL: - ```bash $ curl -v 'http://localhost:8123/predefined_query' * Trying ::1... @@ -733,25 +711,18 @@ $ curl -v 'http://localhost:8123/predefined_query' "Query" 1 ``` - # HELP "Merge" "Количество выполняемых фоновых слияний" {#help-merge-number-of-executing-background-merges} # TYPE "Merge" counter {#type-merge-counter} "Merge" 0 - - # HELP "PartMutation" "Количество мутаций (ALTER DELETE/UPDATE)" {#help-partmutation-number-of-mutations-alter-deleteupdate} # TYPE "PartMutation" counter {#type-partmutation-counter} "PartMutation" 0 - - # HELP "ReplicatedFetch" "Количество частей данных, получаемых из реплики" {#help-replicatedfetch-number-of-data-parts-being-fetched-from-replica} # TYPE "ReplicatedFetch" counter {#type-replicatedfetch-counter} "ReplicatedFetch" 0 - - # HELP "ReplicatedSend" "Количество частей данных, отправляемых на реплики" {#help-replicatedsend-number-of-data-parts-being-sent-to-replicas} # TYPE "ReplicatedSend" counter {#type-replicatedsend-counter} @@ -828,7 +799,6 @@ $ curl -v 'http://localhost:8123/predefined_query' Например: ``` - ```yaml @@ -920,7 +890,6 @@ max_final_threads 2 `http_response_headers` можно использовать для указания типа контента вместо `content_type`. - ```yaml @@ -1008,7 +977,6 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' Чтобы найти содержимое файла, отправленного клиенту: - ```yaml @@ -1105,7 +1073,6 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' ``` - ## HTTP заголовки ответа {#http-response-headers} ClickHouse позволяет настраивать пользовательские HTTP-заголовки ответа, которые могут применяться к любому настраиваемому обработчику. Эти заголовки можно задать с помощью настройки `http_response_headers`, которая принимает пары ключ-значение, представляющие имена заголовков и их значения. Эта возможность особенно полезна для реализации пользовательских заголовков безопасности, политик CORS или любых других требований к HTTP-заголовкам для всего HTTP-интерфейса ClickHouse. @@ -1142,7 +1109,6 @@ ClickHouse позволяет настраивать пользовательс ``` - ## Корректный JSON/XML-ответ при исключении во время HTTP‑стриминга {#valid-output-on-exception-http-streaming} Во время выполнения запроса по HTTP может произойти исключение, когда часть данных уже была отправлена. Обычно исключение отправляется клиенту в виде обычного текста. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/mysql.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/mysql.md index 32a5be2c707..c599c95edd8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/mysql.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/mysql.md @@ -14,7 +14,6 @@ import mysql1 from '@site/static/images/interfaces/mysql1.png'; import mysql2 from '@site/static/images/interfaces/mysql2.png'; import mysql3 from '@site/static/images/interfaces/mysql3.png'; - # Интерфейс MySQL {#mysql-interface} ClickHouse поддерживает сетевой протокол MySQL (MySQL wire protocol). Это позволяет отдельным клиентам, у которых нет нативных коннекторов для ClickHouse, использовать вместо них протокол MySQL. Работа была проверена со следующими BI-инструментами: @@ -37,8 +36,6 @@ ClickHouse поддерживает сетевой протокол MySQL (MySQL Эту настройку нельзя отключить, и в редких пограничных случаях она может приводить к отличиям в поведении между запросами, отправленными в обычный интерфейс запросов ClickHouse и интерфейс запросов MySQL. :::: - - ## Включение интерфейса MySQL в ClickHouse Cloud {#enabling-the-mysql-interface-on-clickhouse-cloud} 1. После создания сервиса ClickHouse Cloud нажмите кнопку `Connect`. @@ -63,8 +60,6 @@ ClickHouse поддерживает сетевой протокол MySQL (MySQL - - ## Создание нескольких пользователей MySQL в ClickHouse Cloud {#creating-multiple-mysql-users-in-clickhouse-cloud} По умолчанию существует встроенный пользователь `mysql4`, который использует тот же пароль, что и пользователь `default`. Часть `` — это первый сегмент имени хоста вашего ClickHouse Cloud. Такой формат необходим для работы с инструментами, которые реализуют безопасное подключение, но не передают [SNI-информацию в своем TLS-рукопожатии](https://www.cloudflare.com/learning/ssl/what-is-sni), из-за чего невозможно выполнить внутреннюю маршрутизацию без дополнительной подсказки в имени пользователя (консольный клиент MySQL является одним из таких инструментов). @@ -117,7 +112,6 @@ ERROR 2013 (HY000): Потеряно соединение с сервером My В этом случае убедитесь, что имя пользователя имеет формат `mysql4_`, как описано ([выше](#creating-multiple-mysql-users-in-clickhouse-cloud)). - ## Включение интерфейса MySQL в самостоятельно управляемом ClickHouse {#enabling-the-mysql-interface-on-self-managed-clickhouse} Добавьте параметр [mysql_port](../operations/server-configuration-parameters/settings.md#mysql_port) в файл конфигурации сервера. Например, вы можете указать порт в новом XML-файле в папке `config.d/` [folder](../operations/configuration-files): @@ -134,7 +128,6 @@ ERROR 2013 (HY000): Потеряно соединение с сервером My {} Application: Прослушивается протокол совместимости MySQL: 127.0.0.1:9004 ``` - ## Подключение MySQL к ClickHouse {#connect-mysql-to-clickhouse} Следующая команда демонстрирует, как подключить клиент MySQL `mysql` к ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md index 4a059e5217e..8f1305920a6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md @@ -10,14 +10,10 @@ ClickHouse может автоматически определять струк В этом документе описано, когда используется автоматическое определение схемы, как оно работает с различными входными форматами и какие настройки его контролируют. - - ## Использование {#usage} Автоматическое определение схемы используется, когда ClickHouse должен прочитать данные в определённом формате, но их структура неизвестна. - - ## Табличные функции [file](../sql-reference/table-functions/file.md), [s3](../sql-reference/table-functions/s3.md), [url](../sql-reference/table-functions/url.md), [hdfs](../sql-reference/table-functions/hdfs.md), [azureBlobStorage](../sql-reference/table-functions/azureBlobStorage.md). {#table-functions-file-s3-url-hdfs-azureblobstorage} Эти табличные функции имеют необязательный аргумент `structure`, задающий структуру входных данных. Если этот аргумент не указан или имеет значение `auto`, структура будет выведена из данных. @@ -65,7 +61,6 @@ DESCRIBE file('hobbies.jsonl') └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## Движки таблиц [File](../engines/table-engines/special/file.md), [S3](../engines/table-engines/integrations/s3.md), [URL](../engines/table-engines/special/url.md), [HDFS](../engines/table-engines/integrations/hdfs.md), [azureBlobStorage](../engines/table-engines/integrations/azureBlobStorage.md) {#table-engines-file-s3-url-hdfs-azureblobstorage} Если в запросе `CREATE TABLE` не указан список столбцов, структура таблицы будет автоматически определена по данным. @@ -108,7 +103,6 @@ DESCRIBE TABLE hobbies └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## clickhouse-local {#clickhouse-local} У `clickhouse-local` есть необязательный параметр `-S/--structure`, задающий структуру входных данных. Если этот параметр не указан или имеет значение `auto`, структура будет определена по данным. @@ -139,7 +133,6 @@ clickhouse-local --file='hobbies.jsonl' --table='hobbies' --query='SELECT * FROM 4 47 Brayan ['movies','skydiving'] ``` - ## Использование структуры из таблицы-вставки {#using-structure-from-insertion-table} Когда табличные функции `file/s3/url/hdfs` используются для вставки данных в таблицу, @@ -248,7 +241,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob В этом случае в запросе `SELECT` выполняются некоторые операции со столбцом `hobbies` перед его вставкой в таблицу, поэтому ClickHouse не может использовать структуру целевой таблицы и будет использовано автоматическое определение схемы. - ## Кэш автоопределения схемы {#schema-inference-cache} Для большинства форматов ввода автоопределение схемы читает часть данных, чтобы определить их структуру, и этот процесс может занять некоторое время. @@ -271,8 +263,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob Попробуем определить структуру примерного набора данных из S3 `github-2022.ndjson.gz` и посмотрим, как работает кэш автоопределения схемы: - - ```sql DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/github/github-2022.ndjson.gz') ``` @@ -416,7 +406,6 @@ SELECT count() FROM system.schema_inference_cache WHERE storage='S3' └─────────┘ ``` - ## Текстовые форматы {#text-formats} Для текстовых форматов ClickHouse читает данные построчно, извлекает значения столбцов в соответствии с форматом, @@ -488,7 +477,6 @@ DESC format(JSONEachRow, '{"arr" : [null, 42, null]}') └──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - Если массив содержит значения разных типов и параметр `input_format_json_infer_array_of_dynamic_from_array_of_different_types` включён (по умолчанию он включён), то его тип будет `Array(Dynamic)`: ```sql @@ -555,7 +543,6 @@ Map: В JSON можно читать объекты, значения которых имеют один и тот же тип, как значения типа Map. Примечание: это будет работать только в том случае, если настройки `input_format_json_read_objects_as_strings` и `input_format_json_try_infer_named_tuples_from_objects` отключены. - ```sql SET input_format_json_read_objects_as_strings = 0, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, '{"map" : {"key1" : 42, "key2" : 24, "key3" : 4}}') @@ -642,7 +629,6 @@ DESC format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello"}}, {"obj" : {"a" : 4 Результат: - ```response ┌─name─┬─type───────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ obj │ Tuple(a Nullable(Int64), b Nullable(String), c Array(Nullable(Int64)), d Tuple(e Nullable(Int64))) │ │ │ │ │ │ @@ -717,7 +703,6 @@ SELECT * FROM format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : Примечание: включение этой настройки будет иметь эффект только в том случае, если настройка `input_format_json_try_infer_named_tuples_from_objects` отключена. - ```sql SET input_format_json_read_objects_as_strings = 1, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, $$ @@ -819,7 +804,6 @@ SELECT arr, toTypeName(arr), JSONExtractArrayRaw(arr)[3] from format(JSONEachRow ##### input_format_json_infer_incomplete_types_as_strings {#input_format_json_infer_incomplete_types_as_strings} - Включение этого параметра позволяет использовать тип данных String для JSON-ключей, которые в выборке данных при определении схемы содержат только `Null`/`{}`/`[]`. В JSON-форматах любое значение может быть считано как String, если включены все соответствующие настройки (по умолчанию они включены), и мы можем избежать ошибок вида `Cannot determine type for column 'column_name' by first 25000 rows of data, most likely this column contains only Nulls or empty Arrays/Maps` при определении схемы, используя тип String для ключей с неизвестными типами. @@ -886,7 +870,6 @@ DESC format(CSV, 'Hello world!,World hello!') Date и DateTime: - ```sql DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"') ``` @@ -959,7 +942,6 @@ DESC format(CSV, $$"[{'key1' : [[42, 42], []], 'key2' : [[null], [42]]}]"$$) └──────┴───────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - Если ClickHouse не может определить тип значения в кавычках, потому что данные содержат только значения NULL, он будет интерпретировать его как String: ```sql @@ -1065,7 +1047,6 @@ DESC format(CSV, '42,42.42'); └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ### TSV/TSKV {#tsv-tskv} В форматах TSV/TSKV ClickHouse извлекает значение столбца из строки в соответствии с табуляцией как разделителем, а затем разбирает извлечённое значение с помощью @@ -1120,7 +1101,6 @@ DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000') └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - Массивы: ```sql @@ -1193,7 +1173,6 @@ DESC format(TSV, $$[{'key1' : [(42, 'Hello'), (24, NULL)], 'key2' : [(NULL, ',') └──────┴─────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - Если ClickHouse не может определить тип данных, потому что данные состоят только из значений NULL, он будет интерпретировать их как String: ```sql @@ -1283,7 +1262,6 @@ $$) **Примеры:** - Целые числа, вещественные числа, логические значения, строки: ```sql @@ -1362,7 +1340,6 @@ DESC format(Values, $$({'key1' : 42, 'key2' : 24})$$) └──────┴──────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - Вложенные массивы, кортежи и словари: ```sql @@ -1438,7 +1415,6 @@ $$) Пример автоопределения заголовка (когда включён `input_format_custom_detect_header`): - ```sql SET format_custom_row_before_delimiter = '', format_custom_row_after_delimiter = '\n', @@ -1520,7 +1496,6 @@ SET format_regexp = '^Line: value_1=(.+?), value_2=(.+?), value_3=(.+?)', format_regexp_escaping_rule = 'CSV' ``` - DESC format(Regexp, $$Line: value_1=42, value_2="Some string 1", value_3="[1, NULL, 3]" Line: value_1=2, value_2="Some string 2", value_3="[4, 5, NULL]"$$) @@ -1587,7 +1562,6 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul #### schema_inference_make_columns_nullable $ {#schema-inference-make-columns-nullable} - Управляет приведением выводимых типов к `Nullable` при выводе схемы для форматов без информации о nullability. Возможные значения: * 0 — выводимый тип никогда не будет `Nullable`, @@ -1654,7 +1628,6 @@ DESC format(JSONEachRow, $$ └─────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - #### input_format_try_infer_integers {#input-format-try-infer-integers} :::note @@ -1733,7 +1706,6 @@ DESC format(JSONEachRow, $$ **Примеры** - ```sql SET input_format_try_infer_datetimes = 0; DESC format(JSONEachRow, $$ @@ -1804,7 +1776,6 @@ DESC format(JSONEachRow, $$ Примечание: При разборе значений типов DateTime при выводе схемы учитывается настройка [date_time_input_format](/operations/settings/settings-formats.md#date_time_input_format) - #### input_format_try_infer_dates {#input-format-try-infer-dates} Если параметр включён, ClickHouse будет пытаться определить тип `Date` из строковых полей при автоматическом определении схемы для текстовых форматов. @@ -1879,7 +1850,6 @@ $$) └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## Самоописывающиеся форматы {#self-describing-formats} Самоописывающиеся форматы содержат информацию о структуре данных в самих данных: @@ -1969,7 +1939,6 @@ $$) В формате Avro ClickHouse считывает схему из данных и преобразует её в схему ClickHouse, используя следующие соответствия типов: - | Тип данных Avro | Тип данных ClickHouse | |-------------------------------------|---------------------------------------------------------------------------------| | `boolean` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2023,8 +1992,6 @@ $$) В формате Arrow ClickHouse считывает схему из данных и преобразует её в схему ClickHouse, используя следующие соответствия типов: - - | Тип данных Arrow | Тип данных ClickHouse | |----------------------------------|----------------------------------------------------------| | `BOOL` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2077,8 +2044,6 @@ $$) Формат Native используется внутри ClickHouse и содержит схему непосредственно в данных. При определении схемы ClickHouse считывает её из данных без каких-либо преобразований. - - ## Форматы с внешней схемой {#formats-with-external-schema} Такие форматы требуют наличия схемы, описывающей данные, в отдельном файле на определённом языке описания схем. @@ -2125,8 +2090,6 @@ $$) | `struct` | [Tuple](../sql-reference/data-types/tuple.md) | | `union(T, Void)`, `union(Void, T)` | [Nullable(T)](../sql-reference/data-types/nullable.md) | - - ## Строго типизированные бинарные форматы {#strong-typed-binary-formats} В таких форматах каждое сериализованное значение содержит информацию о своём типе (и, возможно, о своём имени), но нет информации о всей таблице. @@ -2173,8 +2136,6 @@ $$) По умолчанию все выведенные типы заключаются в `Nullable`, но это можно изменить с помощью настройки `schema_inference_make_columns_nullable`. - - ## Форматы с фиксированной схемой {#formats-with-constant-schema} Данные в таких форматах всегда имеют одну и ту же схему. @@ -2227,7 +2188,6 @@ DESC format(JSONAsObject, '{"x" : 42, "y" : "Hello, World!"}'); └──────┴──────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## Режимы определения схемы {#schema-inference-modes} Определение схемы по набору файлов данных может работать в двух разных режимах: `default` и `union`. @@ -2338,7 +2298,6 @@ DESC format(JSONAsObject, '{"x" : 42, "y" : "Hello, World!"}'); * Если ClickHouse не может определить схему по одному из файлов, будет сгенерировано исключение. * Если у вас много файлов, чтение схемы из всех них может занять много времени. - ## Автоматическое определение формата {#automatic-format-detection} Если формат данных не указан и его нельзя определить по расширению файла, ClickHouse попытается определить формат файла по его содержимому. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md index 206e892c193..59adbe14bad 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md @@ -7,12 +7,8 @@ title: 'Графические интерфейсы сторонних разр doc_type: 'reference' --- - - # Визуальные интерфейсы сторонних разработчиков {#visual-interfaces-from-third-party-developers} - - ## Open-source {#open-source} ### agx {#agx} @@ -117,8 +113,6 @@ doc_type: 'reference' ### LightHouse {#lighthouse} - - [LightHouse](https://github.com/VKCOM/lighthouse) — это легковесный веб-интерфейс для ClickHouse. Возможности: @@ -201,8 +195,6 @@ doc_type: 'reference' ### MindsDB Studio {#mindsdb} - - [MindsDB](https://mindsdb.com/) — это открытый AI-слой для баз данных, включая ClickHouse, который позволяет без лишних усилий разрабатывать, обучать и развертывать передовые модели машинного обучения. MindsDB Studio (GUI) позволяет обучать новые модели на данных из базы, интерпретировать предсказания модели, выявлять потенциальные смещения в данных, а также оценивать и визуализировать точность модели с помощью функции Explainable AI, чтобы быстрее адаптировать и настраивать ваши модели машинного обучения. ### DBM {#dbm} @@ -303,8 +295,6 @@ doc_type: 'reference' ### CKibana {#ckibana} - - [CKibana](https://github.com/TongchengOpenSource/ckibana) — это легковесный сервис, который позволяет легко искать, исследовать и визуализировать данные ClickHouse с использованием нативного интерфейса Kibana. Возможности: @@ -329,8 +319,6 @@ doc_type: 'reference' [Исходный код Telescope](https://github.com/iamtelescope/telescope) · [Демо-версия](https://demo.iamtelescope.net) - - ## Коммерческие решения {#commercial} ### DataGrip {#datagrip} @@ -412,8 +400,6 @@ SeekTable [бесплатен](https://www.seektable.com/help/cloud-pricing) д [TABLUM.IO](https://tablum.io/) — онлайн-инструмент для запросов и аналитики, предназначенный для ETL и визуализации. Он позволяет подключаться к ClickHouse, выполнять запросы к данным через гибкую SQL-консоль, а также загружать данные из статических файлов и сторонних сервисов. TABLUM.IO может визуализировать результаты в виде графиков и таблиц. - - Возможности: - ETL: загрузка данных из популярных баз данных, локальных и удалённых файлов, вызовы API. - Универсальная SQL-консоль с подсветкой синтаксиса и визуальным конструктором запросов. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md index 5bff9f19768..7168761b6df 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md @@ -7,12 +7,8 @@ title: 'Прокси-серверы от сторонних разработчи doc_type: 'reference' --- - - # Прокси-серверы сторонних разработчиков {#proxy-servers-from-third-party-developers} - - ## chproxy {#chproxy} [chproxy](https://github.com/Vertamedia/chproxy) — это HTTP‑прокси и балансировщик нагрузки для базы данных ClickHouse. @@ -25,8 +21,6 @@ doc_type: 'reference' Реализован на Go. - - ## KittenHouse {#kittenhouse} [KittenHouse](https://github.com/VKCOM/kittenhouse) предназначен для использования в качестве локального прокси между ClickHouse и сервером приложения в тех случаях, когда буферизация данных INSERT на стороне приложения невозможна или неудобна. @@ -39,8 +33,6 @@ doc_type: 'reference' Реализован на Go. - - ## ClickHouse-Bulk {#clickhouse-bulk} [ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) — это простой коллектор для вставки данных в ClickHouse. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/intro.md b/i18n/ru/docusaurus-plugin-content-docs/current/intro.md index 69e90ec44e0..041dd5d5a30 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/intro.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/intro.md @@ -14,7 +14,6 @@ import Image from '@theme/IdealImage'; ClickHouse® — высокопроизводительная колоночная система управления базами данных (СУБД) SQL для онлайн-аналитической обработки (OLAP). Она доступна как в виде [программного обеспечения с открытым исходным кодом](https://github.com/ClickHouse/ClickHouse), так и как [облачный сервис](https://clickhouse.com/cloud). - ## Что такое аналитика? {#what-are-analytics} Аналитика, также известная как OLAP (Online Analytical Processing), — это SQL‑запросы со сложными вычислениями (например, агрегациями, обработкой строк, арифметикой) по очень большим наборам данных. @@ -23,8 +22,6 @@ ClickHouse® — высокопроизводительная колоночна Во многих вариантах использования [аналитические запросы должны выполняться в режиме реального времени](https://clickhouse.com/engineering-resources/what-is-real-time-analytics), то есть возвращать результат за время менее одной секунды. - - ## Построчное и колоночное хранение данных {#row-oriented-vs-column-oriented-storage} Такой уровень производительности достигается только при правильной «ориентации» данных. @@ -65,51 +62,36 @@ LIMIT 8; **Колонко-ориентированная СУБД** - Поскольку значения каждого столбца хранятся на диске последовательно друг за другом, при выполнении приведённого выше запроса не загружаются лишние данные. Поскольку блочное хранение и передача данных с диска в память соответствуют характеру доступа к данным в аналитических запросах, с диска читаются только те столбцы, которые требуются для запроса, что позволяет избежать лишних операций ввода-вывода для неиспользуемых данных. Это [намного быстрее](https://benchmark.clickhouse.com/) по сравнению со строчно-ориентированным хранением, при котором считываются целые строки (включая столбцы, не относящиеся к запросу): - - ## Репликация данных и их целостность {#data-replication-and-integrity} ClickHouse использует асинхронную мультимастерную схему репликации, чтобы обеспечивать избыточное хранение данных на нескольких узлах. После записи на любую доступную реплику все остальные реплики в фоновом режиме получают свою копию. Система поддерживает одинаковое состояние данных на разных репликах. Восстановление после большинства сбоев выполняется автоматически или полуавтоматически в более сложных случаях. - - ## Ролевое управление доступом {#role-based-access-control} ClickHouse реализует управление учетными записями пользователей посредством SQL‑запросов и позволяет настраивать ролевое управление доступом, аналогичное описанному в стандарте ANSI SQL и реализованному в популярных системах управления реляционными базами данных. - - ## Поддержка SQL {#sql-support} ClickHouse поддерживает [декларативный язык запросов, основанный на SQL](/sql-reference), который во многих случаях соответствует стандарту ANSI SQL. Поддерживаемые конструкции запросов включают [GROUP BY](/sql-reference/statements/select/group-by), [ORDER BY](/sql-reference/statements/select/order-by), подзапросы в секции [FROM](/sql-reference/statements/select/from), конструкцию [JOIN](/sql-reference/statements/select/join), оператор [IN](/sql-reference/operators/in), [оконные функции](/sql-reference/window-functions) и скалярные подзапросы. - - ## Приблизительный расчет {#approximate-calculation} ClickHouse предоставляет возможности пожертвовать точностью ради производительности. Например, некоторые его агрегатные функции вычисляют приблизительное количество различных значений, медиану и квантили. Кроме того, запросы можно выполнять по выборке данных, чтобы быстро получить приблизительный результат. Наконец, агрегацию можно выполнять с ограниченным числом ключей вместо всех ключей. В зависимости от того, насколько смещено распределение ключей, это может дать достаточно точный результат при существенно меньших затратах ресурсов по сравнению с точным расчетом. - - ## Адаптивные алгоритмы соединения {#adaptive-join-algorithms} ClickHouse адаптивно выбирает алгоритм соединения: он начинает с быстрых хеш‑соединений и переходит к merge‑соединениям, если в запросе участвует более одной крупной таблицы. - - ## Высочайшая производительность запросов {#superior-query-performance} ClickHouse широко известен своей исключительно высокой скоростью выполнения запросов. Чтобы узнать, почему ClickHouse такой быстрый, см. руководство [Why is ClickHouse fast?](/concepts/why-clickhouse-is-so-fast.mdx). - - - ## Связанные ресурсы {#related-resources} - [Видео о финансовых функциях в ClickHouse](https://www.youtube.com/watch?v=BePLPVa0w_o) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md index c6961a4bbf3..9511b8ff371 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md @@ -6,16 +6,12 @@ title: 'Функции для работы с Geohash' doc_type: 'reference' --- - - ## Geohash {#geohash} [Geohash](https://en.wikipedia.org/wiki/Geohash) — это система геокодирования, которая разбивает поверхность Земли на ячейки сетки и кодирует каждую ячейку в короткую строку из букв и цифр. Это иерархическая структура данных, поэтому чем длиннее строка geohhash, тем точнее будет указано географическое местоположение. Если вам нужно вручную преобразовать географические координаты в строки geohash, вы можете использовать [geohash.org](http://geohash.co/). - - ## geohashEncode {#geohashencode} Кодирует широту и долготу в строку формата [geohash](#geohash). @@ -58,7 +54,6 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; └──────────────┘ ``` - ## geohashDecode {#geohashdecode} Декодирует любую строку, закодированную с помощью [geohash](#geohash), в значения долготы и широты. @@ -89,7 +84,6 @@ SELECT geohashDecode('ezs42') AS res; └─────────────────────────────────┘ ``` - ## geohashesInBox {#geohashesinbox} Возвращает массив строк, закодированных в [geohash](#geohash) с заданной точностью, которые находятся внутри заданного прямоугольника или пересекают его границы, по сути представляя собой 2D‑сетку, развёрнутую в одномерный массив. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md index 4bd3eb380c3..ac92107c82a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md @@ -6,8 +6,6 @@ title: 'Функции для работы с индексами H3' doc_type: 'reference' --- - - ## Индекс H3 {#h3-index} [H3](https://h3geo.org/) — это система географического индексирования, в которой поверхность Земли разбита на сетку равновеликих шестиугольных ячеек. Эта система иерархическая, т. е. каждый шестиугольник на верхнем уровне («родитель») может быть разделён на семь таких же, но меньших («потомки»), и так далее. @@ -20,8 +18,6 @@ doc_type: 'reference' Полное описание системы H3 доступно на [сайте Uber Engineering](https://www.uber.com/blog/h3/). - - ## h3IsValid {#h3isvalid} Проверяет, является ли число допустимым индексом [H3](#h3-index). @@ -57,7 +53,6 @@ SELECT h3IsValid(630814730351855103) AS h3IsValid; └───────────┘ ``` - ## h3GetResolution {#h3getresolution} Определяет разрешение заданного индекса [H3](#h3-index). @@ -93,7 +88,6 @@ SELECT h3GetResolution(639821929606596015) AS resolution; └────────────┘ ``` - ## h3EdgeAngle {#h3edgeangle} Вычисляет среднюю длину ребра шестиугольной ячейки [H3](#h3-index) в градусах. @@ -128,7 +122,6 @@ SELECT h3EdgeAngle(10) AS edgeAngle; └───────────────────────┘ ``` - ## h3EdgeLengthM {#h3edgelengthm} Вычисляет среднюю длину ребра шестиугольной ячейки [H3](#h3-index) в метрах. @@ -163,7 +156,6 @@ SELECT h3EdgeLengthM(15) AS edgeLengthM; └─────────────┘ ``` - ## h3EdgeLengthKm {#h3edgelengthkm} Вычисляет среднюю длину ребра шестиугольника [H3](#h3-index) в километрах. @@ -198,7 +190,6 @@ SELECT h3EdgeLengthKm(15) AS edgeLengthKm; └──────────────┘ ``` - ## geoToH3 {#geotoh3} Возвращает индекс точки [H3](#h3-index) для координат `(lat, lon)` с указанным разрешением. @@ -238,7 +229,6 @@ SELECT geoToH3(55.71290588, 37.79506683, 15) AS h3Index; └────────────────────┘ ``` - ## h3ToGeo {#h3togeo} Возвращает широту и долготу центроида, соответствующие указанному индексу [H3](#h3-index). @@ -275,7 +265,6 @@ SELECT h3ToGeo(644325524701193974) AS coordinates; └───────────────────────────────────────┘ ``` - ## h3ToGeoBoundary {#h3togeoboundary} Возвращает массив пар `(lat, lon)`, соответствующих границе указанного индекса H3. @@ -310,7 +299,6 @@ SELECT h3ToGeoBoundary(644325524701193974) AS coordinates; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3kRing {#h3kring} Перечисляет все шестиугольники [H3](#h3-index) в радиусе `k` от указанного шестиугольника в случайном порядке. @@ -352,7 +340,6 @@ SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index; └────────────────────┘ ``` - ## h3PolygonToCells {#h3polygontocells} Возвращает шестиугольники (при указанном разрешении), которые находятся внутри заданной геометрии — кольца или (мульти)полигона. @@ -397,7 +384,6 @@ SELECT h3PolygonToCells([(-122.4089866999972145,37.813318999983238),(-122.354473 └────────────────────┘ ``` - ## h3GetBaseCell {#h3getbasecell} Возвращает номер базовой ячейки индекса [H3](#h3-index). @@ -432,7 +418,6 @@ SELECT h3GetBaseCell(612916788725809151) AS basecell; └──────────┘ ``` - ## h3HexAreaM2 {#h3hexaream2} Возвращает среднюю площадь шестиугольника в квадратных метрах для указанного разрешения. @@ -467,7 +452,6 @@ SELECT h3HexAreaM2(13) AS area; └──────┘ ``` - ## h3HexAreaKm2 {#h3hexareakm2} Возвращает среднюю площадь шестигранной ячейки в квадратных километрах для заданного разрешения. @@ -502,7 +486,6 @@ SELECT h3HexAreaKm2(13) AS area; └───────────┘ ``` - ## h3IndexesAreNeighbors {#h3indexesareneighbors} Возвращает, являются ли указанные индексы [H3](#h3-index) соседними. @@ -539,7 +522,6 @@ SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n; └───┘ ``` - ## h3ToChildren {#h3tochildren} Возвращает массив дочерних индексов для заданного индекса [H3](#h3-index). @@ -575,7 +557,6 @@ SELECT h3ToChildren(599405990164561919, 6) AS children; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3ToParent {#h3toparent} Возвращает родительский (более крупного уровня) индекс, содержащий заданный индекс [H3](#h3-index). @@ -611,7 +592,6 @@ SELECT h3ToParent(599405990164561919, 3) AS parent; └────────────────────┘ ``` - ## h3ToString {#h3tostring} Преобразует представление индекса типа `H3Index` в строковое представление. @@ -644,7 +624,6 @@ SELECT h3ToString(617420388352917503) AS h3_string; └─────────────────┘ ``` - ## stringToH3 {#stringtoh3} Преобразует строковое представление в представление типа `H3Index` (UInt64). @@ -679,7 +658,6 @@ SELECT stringToH3('89184926cc3ffff') AS index; └────────────────────┘ ``` - ## h3GetResolution {#h3getresolution-1} Возвращает уровень разрешения индекса [H3](#h3-index). @@ -714,7 +692,6 @@ SELECT h3GetResolution(617420388352917503) AS res; └─────┘ ``` - ## h3IsResClassIII {#h3isresclassiii} Возвращает, имеет ли индекс [H3](#h3-index) разрешение с ориентацией класса III. @@ -750,7 +727,6 @@ SELECT h3IsResClassIII(617420388352917503) AS res; └─────┘ ``` - ## h3IsPentagon {#h3ispentagon} Возвращает, представляет ли этот индекс [H3](#h3-index) пятиугольную ячейку. @@ -786,7 +762,6 @@ SELECT h3IsPentagon(644721767722457330) AS pentagon; └──────────┘ ``` - ## h3GetFaces {#h3getfaces} Возвращает грани икосаэдра, пересекаемые заданным индексом [H3](#h3-index). @@ -821,7 +796,6 @@ SELECT h3GetFaces(599686042433355775) AS faces; └───────┘ ``` - ## h3CellAreaM2 {#h3cellaream2} Возвращает точную площадь указанной ячейки в квадратных метрах, соответствующей заданному входному H3-индексу. @@ -856,7 +830,6 @@ SELECT h3CellAreaM2(579205133326352383) AS area; └────────────────────┘ ``` - ## h3CellAreaRads2 {#h3cellarearads2} Возвращает точную площадь указанной ячейки в квадратных радианах, соответствующей заданному входному H3-индексу. @@ -891,7 +864,6 @@ SELECT h3CellAreaRads2(579205133326352383) AS area; └─────────────────────┘ ``` - ## h3ToCenterChild {#h3tocenterchild} Возвращает центральный дочерний (более детализированный) индекс [H3](#h3-index), содержащийся в заданном индексе [H3](#h3-index) на указанном уровне разрешения. @@ -927,7 +899,6 @@ SELECT h3ToCenterChild(577023702256844799,1) AS centerToChild; └────────────────────┘ ``` - ## h3ExactEdgeLengthM {#h3exactedgelengthm} Возвращает точную длину однонаправленного ребра, представленного заданным индексом h3, в метрах. @@ -962,7 +933,6 @@ SELECT h3ExactEdgeLengthM(1310277011704381439) AS exactEdgeLengthM;; └────────────────────┘ ``` - ## h3ExactEdgeLengthKm {#h3exactedgelengthkm} Возвращает точную длину однонаправленного ребра, представленного переданным h3‑индексом, в километрах. @@ -997,7 +967,6 @@ SELECT h3ExactEdgeLengthKm(1310277011704381439) AS exactEdgeLengthKm;; └────────────────────┘ ``` - ## h3ExactEdgeLengthRads {#h3exactedgelengthrads} Возвращает точную длину однонаправленного ребра, заданного входным h3-индексом, в радианах. @@ -1032,7 +1001,6 @@ SELECT h3ExactEdgeLengthRads(1310277011704381439) AS exactEdgeLengthRads;; └──────────────────────┘ ``` - ## h3NumHexagons {#h3numhexagons} Возвращает количество уникальных индексов H3 при заданном разрешении. @@ -1067,7 +1035,6 @@ SELECT h3NumHexagons(3) AS numHexagons; └─────────────┘ ``` - ## h3PointDistM {#h3pointdistm} Возвращает расстояние по дуге большого круга («great circle») или по формуле гаверсинуса («haversine») между парами точек GeoCoord (широта/долгота) в метрах. @@ -1103,7 +1070,6 @@ SELECT h3PointDistM(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistM; └───────────────────┘ ``` - ## h3PointDistKm {#h3pointdistkm} Возвращает расстояние по дуге большого круга (формула гаверсинусов, haversine) между парами точек GeoCoord (широта/долгота), в километрах. @@ -1139,7 +1105,6 @@ SELECT h3PointDistKm(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistKm; └───────────────────┘ ``` - ## h3PointDistRads {#h3pointdistrads} Возвращает расстояние по «большой окружности» (great-circle distance) или по формуле «haversine» между парами точек GeoCoord (широта/долгота) в радианах. @@ -1175,7 +1140,6 @@ SELECT h3PointDistRads(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistRads; └────────────────────┘ ``` - ## h3GetRes0Indexes {#h3getres0indexes} Возвращает массив всех индексов H3 разрешения 0. @@ -1206,7 +1170,6 @@ SELECT h3GetRes0Indexes AS indexes ; └─────────────────────────────────────────────┘ ``` - ## h3GetPentagonIndexes {#h3getpentagonindexes} Возвращает все пятиугольные индексы H3 на указанном разрешении. @@ -1241,7 +1204,6 @@ SELECT h3GetPentagonIndexes(3) AS indexes; └────────────────────────────────────────────────────────────────┘ ``` - ## h3Line {#h3line} Возвращает последовательность индексов между двумя заданными индексами. @@ -1277,7 +1239,6 @@ h3Line(start,end) └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3Distance {#h3distance} Возвращает расстояние в ячейках сетки между двумя указанными индексами. @@ -1315,7 +1276,6 @@ h3Distance(start,end) └──────────┘ ``` - ## h3HexRing {#h3hexring} Возвращает индексы шестиугольного кольца с центром в заданном исходном индексе h3Index и длиной k. @@ -1353,7 +1313,6 @@ h3HexRing(index, k) └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdge {#h3getunidirectionaledge} Возвращает однонаправленный индекс ребра H3 на основе указанных исходной и конечной ячеек и возвращает 0 при ошибке. @@ -1389,7 +1348,6 @@ h3GetUnidirectionalEdge(originIndex, destinationIndex) └─────────────────────┘ ``` - ## h3UnidirectionalEdgeIsValid {#h3unidirectionaledgeisvalid} Определяет, является ли заданный H3Index допустимым индексом однонаправленного ребра. Возвращает 1, если это индекс однонаправленного ребра, и 0 — в противном случае. @@ -1425,7 +1383,6 @@ h3UnidirectionalEdgeisValid(index) └────────────┘ ``` - ## h3GetOriginIndexFromUnidirectionalEdge {#h3getoriginindexfromunidirectionaledge} Возвращает индекс исходного шестиугольника по однонаправленному ребру H3Index. @@ -1460,7 +1417,6 @@ h3GetOriginIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetDestinationIndexFromUnidirectionalEdge {#h3getdestinationindexfromunidirectionaledge} Возвращает индекс конечного шестиугольника на основании однонаправленного ребра `H3Index`. @@ -1495,7 +1451,6 @@ h3GetDestinationIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetIndexesFromUnidirectionalEdge {#h3getindexesfromunidirectionaledge} Возвращает индексы исходной и конечной шестиугольных ячеек для заданного однонаправленного ребра H3Index. @@ -1535,7 +1490,6 @@ h3GetIndexesFromUnidirectionalEdge(edge) └─────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgesFromHexagon {#h3getunidirectionaledgesfromhexagon} Возвращает все однонаправленные рёбра для указанного `H3Index`. @@ -1570,7 +1524,6 @@ h3GetUnidirectionalEdgesFromHexagon(index) └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgeBoundary {#h3getunidirectionaledgeboundary} Возвращает координаты, определяющие ориентированное ребро. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md index 8254dbb5d9f..e80002670e4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md @@ -6,8 +6,6 @@ title: 'Функции для работы с многоугольниками' doc_type: 'reference' --- - - ## WKT {#wkt} Возвращает геометрический объект WKT (Well Known Text) из различных [геометрических типов данных](../../data-types/geo.md). Поддерживаемые WKT-объекты: @@ -75,7 +73,6 @@ SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10))) ``` - ## readWKTMultiPolygon {#readwktmultipolygon} Преобразует WKT (Well Known Text) MultiPolygon в тип MultiPolygon. @@ -101,7 +98,6 @@ SELECT MultiPolygon - ## readWKTPolygon {#readwktpolygon} Преобразует WKT (Well Known Text) MultiPolygon в тип Polygon. @@ -127,7 +123,6 @@ FORMAT Markdown Тип `Polygon`. - ## readWKTPoint {#readwktpoint} Функция `readWKTPoint` в ClickHouse разбирает представление геометрического объекта типа Point в формате Well-Known Text (WKT) и возвращает точку во внутреннем формате ClickHouse. @@ -156,7 +151,6 @@ SELECT readWKTPoint('POINT (1.2 3.4)'); (1.2,3.4) ``` - ## readWKTLineString {#readwktlinestring} Интерпретирует представление геометрии типа LineString в формате Well-Known Text (WKT) и возвращает его во внутреннем формате ClickHouse. @@ -185,7 +179,6 @@ SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKTMultiLineString {#readwktmultilinestring} Анализирует представление геометрии MultiLineString в формате Well-Known Text (WKT) и возвращает его во внутреннем формате ClickHouse. @@ -214,7 +207,6 @@ SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6) [[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] ``` - ## readWKTRing {#readwktring} Разбирает представление геометрии многоугольника в формате Well-Known Text (WKT) и возвращает кольцо (замкнутую ломаную линию, LineString) во внутреннем формате ClickHouse. @@ -243,7 +235,6 @@ SELECT readWKTRing('POLYGON ((1 1, 2 2, 3 3, 1 1))'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## polygonsWithinSpherical {#polygonswithinspherical} Возвращает true или false в зависимости от того, расположен ли один многоугольник полностью внутри другого. См. документацию: [https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html) @@ -258,7 +249,6 @@ SELECT polygonsWithinSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879 0 ``` - ## readWKBMultiPolygon {#readwkbmultipolygon} Преобразует мультиполигон в формате WKB (Well-Known Binary) в значение типа MultiPolygon. @@ -284,7 +274,6 @@ SELECT MultiPolygon - ## readWKBPolygon {#readwkbpolygon} Преобразует объект типа MultiPolygon в формате WKB (Well Known Binary) в объект типа Polygon. @@ -310,7 +299,6 @@ FORMAT Markdown Полигон - ## readWKBPoint {#readwkbpoint} Функция `readWKBPoint` в ClickHouse разбирает представление геометрии Point в формате Well-Known Binary (WKB) и возвращает точку во внутреннем формате ClickHouse. @@ -339,7 +327,6 @@ SELECT readWKBPoint(unhex('0101000000333333333333f33f3333333333330b40')); (1.2,3.4) ``` - ## readWKBLineString {#readwkblinestring} Парсит бинарное представление геометрии LineString в формате Well-Known Binary (WKB) и возвращает его во внутреннем формате ClickHouse. @@ -368,7 +355,6 @@ SELECT readWKBLineString(unhex('010200000004000000000000000000f03f000000000000f0 [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKBMultiLineString {#readwkbmultilinestring} Выполняет разбор представления геометрии MultiLineString в формате Well-Known Binary (WKB) и возвращает его во внутреннем формате ClickHouse. @@ -403,7 +389,6 @@ SELECT readWKBMultiLineString(unhex('0105000000020000000102000000030000000000000 UInt8, 0 — ложь, 1 — истина - ## polygonsDistanceSpherical {#polygonsdistancespherical} Вычисляет минимальное расстояние между двумя точками, где одна точка принадлежит первому полигону, а вторая — другому полигону. Spherical в названии функции означает, что координаты интерпретируются как координаты на идеальной сфере, что не соответствует форме Земли. Использование такой системы координат ускоряет выполнение, но, разумеется, снижает точность. @@ -426,7 +411,6 @@ SELECT polygonsDistanceSpherical([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsDistanceCartesian {#polygonsdistancecartesian} Вычисляет расстояние между двумя многоугольниками @@ -449,7 +433,6 @@ SELECT polygonsDistanceCartesian([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsEqualsCartesian {#polygonsequalscartesian} Возвращает значение `true`, если два многоугольника равны @@ -472,7 +455,6 @@ SELECT polygonsEqualsCartesian([[[(1., 1.), (1., 4.), (4., 4.), (4., 1.)]]], [[[ UInt8, 0 — ложь, 1 — истина - ## polygonsSymDifferenceSpherical {#polygonssymdifferencespherical} Вычисляет теоретико-множественную симметрическую разность (XOR) двух полигонов в пространстве @@ -495,7 +477,6 @@ Polygons MultiPolygon - ## polygonsSymDifferenceCartesian {#polygonssymdifferencecartesian} То же, что и `polygonsSymDifferenceSpherical`, но используются координаты в декартовой системе координат, которая ближе к модели реальной Земли. @@ -518,7 +499,6 @@ Polygons MultiPolygon - ## polygonsIntersectionSpherical {#polygonsintersectionspherical} Вычисляет пересечение (логическая операция AND) между полигонами, при этом координаты интерпретируются как сферические. @@ -541,7 +521,6 @@ Polygons MultiPolygon - ## polygonsWithinCartesian {#polygonswithincartesian} Возвращает true, если второй многоугольник находится внутри первого многоугольника. @@ -564,7 +543,6 @@ SELECT polygonsWithinCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], [[[ UInt8, 0 — ложь, 1 — истина - ## polygonsIntersectCartesian {#polygonsintersectcartesian} Возвращает true, если два многоугольника пересекаются (имеют общую область или границу). @@ -587,7 +565,6 @@ SELECT polygonsIntersectCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], UInt8: 0 — ложь, 1 — истина - ## polygonsIntersectSpherical {#polygonsintersectspherical} Возвращает true, если два многоугольника пересекаются (имеют общую площадь или общую границу). См. документацию: [https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html) @@ -610,7 +587,6 @@ SELECT polygonsIntersectSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535 UInt8: 0 — ложь, 1 — истина - ## polygonConvexHullCartesian {#polygonconvexhullcartesian} Вычисляет выпуклую оболочку. [Справка](https://www.boost.org/doc/libs/1_61_0/libs/geometry/doc/html/geometry/reference/algorithms/convex_hull.html) @@ -635,7 +611,6 @@ MultiPolygon Polygon - ## polygonAreaSpherical {#polygonareaspherical} Вычисляет площадь поверхности многоугольника. @@ -658,7 +633,6 @@ Polygon Float - ## polygonsUnionSpherical {#polygonsunionspherical} Вычисляет операцию объединения (OR). @@ -681,7 +655,6 @@ Polygons MultiPolygon - ## polygonPerimeterSpherical {#polygonperimeterspherical} Вычисляет периметр многоугольника. @@ -692,18 +665,12 @@ MultiPolygon Это многоугольник, описывающий территорию Зимбабве: - - ```text POLYGON((30.0107 -15.6462,30.0502 -15.6401,30.09 -15.6294,30.1301 -15.6237,30.1699 -15.6322,30.1956 -15.6491,30.2072 -15.6532,30.2231 -15.6497,30.231 -15.6447,30.2461 -15.6321,30.2549 -15.6289,30.2801 -15.6323,30.2962 -15.639,30.3281 -15.6524,30.3567 -15.6515,30.3963 -15.636,30.3977 -15.7168,30.3993 -15.812,30.4013 -15.9317,30.4026 -16.0012,30.5148 -16.0004,30.5866 -16,30.7497 -15.9989,30.8574 -15.9981,30.9019 -16.0071,30.9422 -16.0345,30.9583 -16.0511,30.9731 -16.062,30.9898 -16.0643,31.012 -16.0549,31.0237 -16.0452,31.0422 -16.0249,31.0569 -16.0176,31.0654 -16.0196,31.0733 -16.0255,31.0809 -16.0259,31.089 -16.0119,31.1141 -15.9969,31.1585 -16.0002,31.26 -16.0235,31.2789 -16.0303,31.2953 -16.0417,31.3096 -16.059,31.3284 -16.0928,31.3409 -16.1067,31.3603 -16.1169,31.3703 -16.1237,31.3746 -16.1329,31.3778 -16.1422,31.384 -16.1488,31.3877 -16.1496,31.3956 -16.1477,31.3996 -16.1473,31.4043 -16.1499,31.4041 -16.1545,31.4027 -16.1594,31.4046 -16.1623,31.4241 -16.1647,31.4457 -16.165,31.4657 -16.1677,31.4806 -16.178,31.5192 -16.1965,31.6861 -16.2072,31.7107 -16.2179,31.7382 -16.2398,31.7988 -16.3037,31.8181 -16.3196,31.8601 -16.3408,31.8719 -16.3504,31.8807 -16.368,31.8856 -16.4063,31.8944 -16.4215,31.9103 -16.4289,32.0141 -16.4449,32.2118 -16.4402,32.2905 -16.4518,32.3937 -16.4918,32.5521 -16.5534,32.6718 -16.5998,32.6831 -16.6099,32.6879 -16.6243,32.6886 -16.6473,32.6987 -16.6868,32.7252 -16.7064,32.7309 -16.7087,32.7313 -16.7088,32.7399 -16.7032,32.7538 -16.6979,32.7693 -16.6955,32.8007 -16.6973,32.862 -16.7105,32.8934 -16.7124,32.9096 -16.7081,32.9396 -16.6898,32.9562 -16.6831,32.9685 -16.6816,32.9616 -16.7103,32.9334 -16.8158,32.9162 -16.8479,32.9005 -16.8678,32.8288 -16.9351,32.8301 -16.9415,32.8868 -17.0382,32.9285 -17.1095,32.9541 -17.1672,32.9678 -17.2289,32.9691 -17.2661,32.9694 -17.2761,32.9732 -17.2979,32.9836 -17.3178,32.9924 -17.3247,33.0147 -17.3367,33.0216 -17.3456,33.0225 -17.3615,33.0163 -17.3772,33.0117 -17.384,32.9974 -17.405,32.9582 -17.4785,32.9517 -17.4862,32.943 -17.4916,32.9366 -17.4983,32.9367 -17.5094,32.9472 -17.5432,32.9517 -17.5514,32.9691 -17.5646,33.0066 -17.581,33.0204 -17.5986,33.0245 -17.6192,33.0206 -17.6385,33.0041 -17.6756,33.0002 -17.7139,33.0032 -17.7577,32.9991 -17.7943,32.9736 -17.8106,32.957 -17.818,32.9461 -17.8347,32.9397 -17.8555,32.9369 -17.875,32.9384 -17.8946,32.9503 -17.9226,32.9521 -17.9402,32.9481 -17.9533,32.9404 -17.96,32.9324 -17.9649,32.9274 -17.9729,32.929 -17.9823,32.9412 -17.9963,32.9403 -18.0048,32.9349 -18.0246,32.9371 -18.0471,32.9723 -18.1503,32.9755 -18.1833,32.9749 -18.1908,32.9659 -18.2122,32.9582 -18.2254,32.9523 -18.233,32.9505 -18.2413,32.955 -18.2563,32.9702 -18.2775,33.0169 -18.3137,33.035 -18.3329,33.0428 -18.352,33.0381 -18.3631,33.0092 -18.3839,32.9882 -18.4132,32.9854 -18.4125,32.9868 -18.4223,32.9995 -18.4367,33.003 -18.4469,32.9964 -18.4671,32.9786 -18.4801,32.9566 -18.4899,32.9371 -18.501,32.9193 -18.51,32.9003 -18.5153,32.8831 -18.5221,32.8707 -18.5358,32.8683 -18.5526,32.8717 -18.5732,32.8845 -18.609,32.9146 -18.6659,32.9223 -18.6932,32.9202 -18.7262,32.9133 -18.753,32.9025 -18.7745,32.8852 -18.7878,32.8589 -18.79,32.8179 -18.787,32.7876 -18.7913,32.6914 -18.8343,32.6899 -18.8432,32.6968 -18.8972,32.7032 -18.9119,32.7158 -18.9198,32.7051 -18.9275,32.6922 -18.9343,32.6825 -18.9427,32.6811 -18.955,32.6886 -18.9773,32.6903 -18.9882,32.6886 -19.001,32.6911 -19.0143,32.699 -19.0222,32.7103 -19.026,32.7239 -19.0266,32.786 -19.0177,32.8034 -19.0196,32.8142 -19.0238,32.82 -19.0283,32.823 -19.0352,32.8253 -19.0468,32.8302 -19.0591,32.8381 -19.0669,32.8475 -19.0739,32.8559 -19.0837,32.8623 -19.1181,32.8332 -19.242,32.8322 -19.2667,32.8287 -19.2846,32.8207 -19.3013,32.8061 -19.3234,32.7688 -19.3636,32.7665 -19.3734,32.7685 -19.4028,32.7622 -19.4434,32.7634 -19.464,32.7739 -19.4759,32.7931 -19.4767,32.8113 -19.4745,32.8254 -19.4792,32.8322 -19.5009,32.8325 -19.5193,32.8254 -19.5916,32.8257 -19.6008,32.8282 -19.6106,32.8296 -19.6237,32.8254 -19.6333,32.8195 -19.642,32.8163 -19.6521,32.8196 -19.6743,32.831 -19.6852,32.8491 -19.6891,32.8722 -19.6902,32.8947 -19.6843,32.9246 -19.6553,32.9432 -19.6493,32.961 -19.6588,32.9624 -19.6791,32.9541 -19.7178,32.9624 -19.7354,32.9791 -19.7514,33.0006 -19.7643,33.0228 -19.7731,33.0328 -19.7842,33.0296 -19.8034,33.0229 -19.8269,33.0213 -19.8681,33.002 -19.927,32.9984 -20.0009,33.0044 -20.0243,33.0073 -20.032,32.9537 -20.0302,32.9401 -20.0415,32.9343 -20.0721,32.9265 -20.0865,32.9107 -20.0911,32.8944 -20.094,32.8853 -20.103,32.8779 -20.1517,32.8729 -20.1672,32.8593 -20.1909,32.8571 -20.2006,32.8583 -20.2075,32.8651 -20.2209,32.8656 -20.2289,32.8584 -20.2595,32.853 -20.2739,32.8452 -20.2867,32.8008 -20.3386,32.7359 -20.4142,32.7044 -20.4718,32.6718 -20.5318,32.6465 -20.558,32.6037 -20.5648,32.5565 -20.5593,32.5131 -20.5646,32.4816 -20.603,32.4711 -20.6455,32.4691 -20.6868,32.4835 -20.7942,32.4972 -20.8981,32.491 -20.9363,32.4677 -20.9802,32.4171 -21.0409,32.3398 -21.1341,32.3453 -21.1428,32.3599 -21.1514,32.3689 -21.163,32.3734 -21.1636,32.3777 -21.1634,32.3806 -21.1655,32.3805 -21.1722,32.3769 -21.1785,32.373 -21.184,32.3717 -21.1879,32.4446 -21.3047,32.4458 -21.309,32.4472 -21.3137,32.4085 -21.2903,32.373 -21.3279,32.3245 -21.3782,32.2722 -21.4325,32.2197 -21.4869,32.1673 -21.5413,32.1148 -21.5956,32.0624 -21.65,32.01 -21.7045,31.9576 -21.7588,31.9052 -21.8132,31.8527 -21.8676,31.8003 -21.922,31.7478 -21.9764,31.6955 -22.0307,31.6431 -22.0852,31.5907 -22.1396,31.5382 -22.1939,31.4858 -22.2483,31.4338 -22.302,31.3687 -22.345,31.2889 -22.3973,31.2656 -22.3655,31.2556 -22.358,31.2457 -22.3575,31.2296 -22.364,31.2215 -22.3649,31.2135 -22.3619,31.1979 -22.3526,31.1907 -22.3506,31.1837 -22.3456,31.1633 -22.3226,31.1526 -22.3164,31.1377 -22.3185,31.1045 -22.3334,31.097 -22.3349,31.0876 -22.3369,31.0703 -22.3337,31.0361 -22.3196,30.9272 -22.2957,30.8671 -22.2896,30.8379 -22.2823,30.8053 -22.2945,30.6939 -22.3028,30.6743 -22.3086,30.6474 -22.3264,30.6324 -22.3307,30.6256 -22.3286,30.6103 -22.3187,30.6011 -22.3164,30.5722 -22.3166,30.5074 -22.3096,30.4885 -22.3102,30.4692 -22.3151,30.4317 -22.3312,30.4127 -22.3369,30.3721 -22.3435,30.335 -22.3447,30.3008 -22.337,30.2693 -22.3164,30.2553 -22.3047,30.2404 -22.2962,30.2217 -22.2909,30.197 -22.2891,30.1527 -22.2948,30.1351 -22.2936,30.1111 -22.2823,30.0826 -22.2629,30.0679 -22.2571,30.0381 -22.2538,30.0359 -22.2506,30.0345 -22.2461,30.0155 -22.227,30.0053 -22.2223,29.9838 -22.2177,29.974 -22.214,29.9467 -22.1983,29.9321 -22.1944,29.896 -22.1914,29.8715 -22.1793,29.8373 -22.1724,29.7792 -22.1364,29.7589 -22.1309,29.6914 -22.1341,29.6796 -22.1383,29.6614 -22.1265,29.6411 -22.1292,29.604 -22.1451,29.5702 -22.142,29.551 -22.146,29.5425 -22.1625,29.5318 -22.1724,29.5069 -22.1701,29.4569 -22.1588,29.4361 -22.1631,29.3995 -22.1822,29.378 -22.1929,29.3633 -22.1923,29.3569 -22.1909,29.3501 -22.1867,29.2736 -22.1251,29.2673 -22.1158,29.2596 -22.0961,29.2541 -22.0871,29.2444 -22.0757,29.2393 -22.0726,29.1449 -22.0753,29.108 -22.0692,29.0708 -22.051,29.0405 -22.0209,29.0216 -21.9828,29.0138 -21.9404,29.0179 -21.8981,29.0289 -21.8766,29.0454 -21.8526,29.0576 -21.8292,29.0553 -21.81,29.0387 -21.7979,28.9987 -21.786,28.9808 -21.7748,28.9519 -21.7683,28.891 -21.7649,28.8609 -21.7574,28.7142 -21.6935,28.6684 -21.68,28.6297 -21.6513,28.6157 -21.6471,28.5859 -21.6444,28.554 -21.6366,28.5429 -21.6383,28.5325 -21.6431,28.4973 -21.6515,28.4814 -21.6574,28.4646 -21.6603,28.4431 -21.6558,28.3618 -21.6163,28.3219 -21.6035,28.2849 -21.5969,28.1657 -21.5952,28.0908 -21.5813,28.0329 -21.5779,28.0166 -21.5729,28.0026 -21.5642,27.9904 -21.5519,27.9847 -21.5429,27.9757 -21.5226,27.9706 -21.5144,27.9637 -21.5105,27.9581 -21.5115,27.9532 -21.5105,27.9493 -21.5008,27.9544 -21.4878,27.9504 -21.482,27.9433 -21.4799,27.9399 -21.478,27.9419 -21.4685,27.9496 -21.4565,27.953 -21.4487,27.9502 -21.4383,27.9205 -21.3812,27.9042 -21.3647,27.8978 -21.3554,27.8962 -21.3479,27.8967 -21.3324,27.8944 -21.3243,27.885 -21.3102,27.8491 -21.2697,27.8236 -21.2317,27.7938 -21.1974,27.7244 -21.1497,27.7092 -21.1345,27.6748 -21.0901,27.6666 -21.0712,27.6668 -21.0538,27.679 -21.0007,27.6804 -20.9796,27.6727 -20.9235,27.6726 -20.9137,27.6751 -20.8913,27.6748 -20.8799,27.676 -20.8667,27.6818 -20.8576,27.689 -20.849,27.6944 -20.8377,27.7096 -20.7567,27.7073 -20.7167,27.6825 -20.6373,27.6904 -20.6015,27.7026 -20.5661,27.7056 -20.5267,27.6981 -20.5091,27.6838 -20.4961,27.666 -20.4891,27.6258 -20.4886,27.5909 -20.4733,27.5341 -20.483,27.4539 -20.4733,27.3407 -20.473,27.306 -20.4774,27.2684 -20.4958,27.284 -20.3515,27.266 -20.2342,27.2149 -20.1105,27.2018 -20.093,27.1837 -20.0823,27.1629 -20.0766,27.1419 -20.0733,27.1297 -20.0729,27.1198 -20.0739,27.1096 -20.0732,27.0973 -20.0689,27.0865 -20.0605,27.0692 -20.0374,27.0601 -20.0276,27.0267 -20.0101,26.9943 -20.0068,26.9611 -20.0072,26.9251 -20.0009,26.8119 -19.9464,26.7745 -19.9398,26.7508 -19.9396,26.731 -19.9359,26.7139 -19.9274,26.6986 -19.9125,26.6848 -19.8945,26.6772 -19.8868,26.6738 -19.8834,26.6594 -19.8757,26.6141 -19.8634,26.5956 -19.8556,26.5819 -19.8421,26.5748 -19.8195,26.5663 -19.8008,26.5493 -19.7841,26.5089 -19.7593,26.4897 -19.7519,26.4503 -19.7433,26.4319 -19.7365,26.4128 -19.7196,26.3852 -19.6791,26.3627 -19.6676,26.3323 -19.6624,26.3244 -19.6591,26.3122 -19.6514,26.3125 -19.6496,26.3191 -19.6463,26.3263 -19.6339,26.3335 -19.613,26.331 -19.605,26.3211 -19.592,26.3132 -19.5842,26.3035 -19.5773,26.2926 -19.5725,26.2391 -19.5715,26.1945 -19.5602,26.1555 -19.5372,26.1303 -19.5011,26.0344 -19.2437,26.0114 -19.1998,25.9811 -19.1618,25.9565 -19.1221,25.9486 -19.1033,25.9449 -19.0792,25.9481 -19.0587,25.9644 -19.0216,25.9678 -19.001,25.9674 -18.9999,25.9407 -18.9213,25.8153 -18.814,25.7795 -18.7388,25.7734 -18.6656,25.7619 -18.6303,25.7369 -18.6087,25.6983 -18.5902,25.6695 -18.566,25.6221 -18.5011,25.6084 -18.4877,25.5744 -18.4657,25.5085 -18.3991,25.4956 -18.3789,25.4905 -18.3655,25.4812 -18.3234,25.4732 -18.3034,25.4409 -18.2532,25.4088 -18.176,25.3875 -18.139,25.3574 -18.1158,25.3234 -18.0966,25.2964 -18.0686,25.255 -18.0011,25.2261 -17.9319,25.2194 -17.908,25.2194 -17.8798,25.2598 -17.7941,25.2667 -17.8009,25.2854 -17.8093,25.3159 -17.8321,25.3355 -17.8412,25.3453 -17.8426,25.3765 -17.8412,25.4095 -17.853,25.4203 -17.8549,25.4956 -17.8549,25.5007 -17.856,25.5102 -17.8612,25.5165 -17.8623,25.5221 -17.8601,25.5309 -17.851,25.5368 -17.8487,25.604 -17.8362,25.657 -17.8139,25.6814 -17.8115,25.6942 -17.8194,25.7064 -17.8299,25.7438 -17.8394,25.766 -17.8498,25.786 -17.8622,25.7947 -17.8727,25.8044 -17.8882,25.8497 -17.9067,25.8636 -17.9238,25.8475 -17.9294,25.8462 -17.9437,25.8535 -17.96,25.8636 -17.9716,25.9245 -17.999,25.967 -18.0005,25.9785 -17.999,26.0337 -17.9716,26.0406 -17.9785,26.0466 -17.9663,26.0625 -17.9629,26.0812 -17.9624,26.0952 -17.9585,26.0962 -17.9546,26.0942 -17.9419,26.0952 -17.9381,26.1012 -17.9358,26.1186 -17.9316,26.1354 -17.9226,26.1586 -17.9183,26.1675 -17.9136,26.203 -17.8872,26.2119 -17.8828,26.2211 -17.8863,26.2282 -17.8947,26.2339 -17.904,26.2392 -17.9102,26.2483 -17.9134,26.2943 -17.9185,26.3038 -17.9228,26.312 -17.9284,26.3183 -17.9344,26.3255 -17.936,26.3627 -17.9306,26.4086 -17.939,26.4855 -17.9793,26.5271 -17.992,26.5536 -17.9965,26.5702 -18.0029,26.5834 -18.0132,26.5989 -18.03,26.6127 -18.0412,26.6288 -18.0492,26.6857 -18.0668,26.7 -18.0692,26.7119 -18.0658,26.7406 -18.0405,26.7536 -18.033,26.7697 -18.029,26.794 -18.0262,26.8883 -17.9846,26.912 -17.992,26.9487 -17.9689,26.9592 -17.9647,27.0063 -17.9627,27.0213 -17.9585,27.0485 -17.9443,27.0782 -17.917,27.1154 -17.8822,27.149 -17.8425,27.1465 -17.8189,27.1453 -17.7941,27.147 -17.7839,27.1571 -17.7693,27.4221 -17.5048,27.5243 -17.4151,27.5773 -17.3631,27.6045 -17.3128,27.6249 -17.2333,27.6412 -17.1985,27.7773 -17.0012,27.8169 -16.9596,27.8686 -16.9297,28.023 -16.8654,28.1139 -16.8276,28.2125 -16.7486,28.2801 -16.7065,28.6433 -16.5688,28.6907 -16.5603,28.7188 -16.5603,28.7328 -16.5581,28.7414 -16.5507,28.7611 -16.5323,28.7693 -16.5152,28.8089 -16.4863,28.8225 -16.4708,28.8291 -16.4346,28.8331 -16.4264,28.8572 -16.3882,28.857 -16.3655,28.8405 -16.3236,28.8368 -16.3063,28.8403 -16.2847,28.8642 -16.2312,28.8471 -16.2027,28.8525 -16.1628,28.8654 -16.1212,28.871 -16.0872,28.8685 -16.0822,28.8638 -16.0766,28.8593 -16.0696,28.8572 -16.0605,28.8603 -16.0494,28.8741 -16.0289,28.8772 -16.022,28.8989 -15.9955,28.9324 -15.9637,28.9469 -15.9572,28.9513 -15.9553,28.9728 -15.9514,29.0181 -15.9506,29.0423 -15.9463,29.0551 -15.9344,29.0763 -15.8954,29.0862 -15.8846,29.1022 -15.8709,29.1217 -15.8593,29.1419 -15.8545,29.151 -15.8488,29.1863 -15.8128,29.407 -15.7142,29.4221 -15.711,29.5085 -15.7036,29.5262 -15.6928,29.5634 -15.6621,29.5872 -15.6557,29.6086 -15.6584,29.628 -15.6636,29.6485 -15.6666,29.6728 -15.6633,29.73 -15.6447,29.7733 -15.6381,29.8143 -15.6197,29.8373 -15.6148,29.8818 -15.6188,29.9675 -15.6415,30.0107 -15.6462)) ``` - - #### Использование функции polygonPerimeterSpherical {#usage-of-polygon-perimeter-spherical} - - ```sql SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15.640129), (30.090029, -15.629381), (30.130129, -15.623696), (30.16992, -15.632171), (30.195552, -15.649121), (30.207231, -15.653152), (30.223147, -15.649741), (30.231002, -15.644677), (30.246091, -15.632068), (30.254876, -15.628864), (30.280094, -15.632275), (30.296196, -15.639042), (30.32805, -15.652428), (30.356679, -15.651498), (30.396263, -15.635995), (30.39771, -15.716817), (30.39926, -15.812005), (30.401327, -15.931688), (30.402568, -16.001244), (30.514809, -16.000418), (30.586587, -16.000004), (30.74973, -15.998867), (30.857424, -15.998144), (30.901865, -16.007136), (30.942173, -16.034524), (30.958296, -16.05106), (30.973075, -16.062016), (30.989767, -16.06429), (31.012039, -16.054885), (31.023718, -16.045169), (31.042218, -16.024912), (31.056895, -16.017574), (31.065421, -16.019641), (31.073328, -16.025532), (31.080872, -16.025946), (31.089037, -16.01189), (31.1141, -15.996904), (31.15849, -16.000211), (31.259983, -16.023465), (31.278897, -16.030287), (31.29533, -16.041655), (31.309592, -16.059019), (31.328351, -16.092815), (31.340908, -16.106664), (31.360339, -16.116896), (31.37026, -16.123718), (31.374601, -16.132916), (31.377754, -16.142218), (31.384006, -16.148832), (31.387727, -16.149556), (31.395582, -16.147695), (31.399613, -16.147282), (31.404315, -16.149866), (31.404057, -16.154517), (31.402713, -16.159374), (31.404574, -16.162268), (31.424107, -16.164749), (31.445708, -16.164955), (31.465655, -16.167746), (31.480641, -16.177978), (31.519192, -16.196478), (31.686107, -16.207227), (31.710705, -16.217872), (31.738197, -16.239783), (31.798761, -16.303655), (31.818088, -16.319571), (31.86005, -16.340759), (31.871935, -16.35037), (31.88072, -16.368044), (31.88563, -16.406284), (31.894363, -16.421477), (31.910279, -16.428919), (32.014149, -16.444938), (32.211759, -16.440184), (32.290463, -16.45176), (32.393661, -16.491757), (32.5521, -16.553355), (32.671783, -16.599761), (32.6831, -16.609889), (32.687906, -16.624255), (32.68863, -16.647303), (32.698655, -16.686784), (32.725217, -16.706421), (32.73095, -16.708656), (32.731314, -16.708798), (32.739893, -16.703217), (32.753845, -16.697946), (32.769348, -16.695466), (32.800664, -16.697326), (32.862004, -16.710452), (32.893372, -16.712415), (32.909598, -16.708075), (32.93957, -16.689781), (32.95621, -16.683063), (32.968509, -16.681615999999998), (32.961585, -16.710348), (32.933369, -16.815768), (32.916213, -16.847911), (32.900503, -16.867755), (32.828776, -16.935141), (32.83012, -16.941549), (32.886757, -17.038184), (32.928512, -17.109497), (32.954143, -17.167168), (32.967786, -17.22887), (32.96909, -17.266115), (32.969439, -17.276102), (32.973212, -17.297909), (32.983599, -17.317753), (32.992384, -17.324678), (33.014656, -17.336667), (33.021633, -17.345555), (33.022459, -17.361471), (33.016258, -17.377181), (33.011651, -17.383991), (32.997448, -17.404983), (32.958174, -17.478467), (32.951663, -17.486218), (32.942981, -17.491593), (32.936573, -17.498311), (32.936676, -17.509369), (32.947218, -17.543166), (32.951663, -17.551434), (32.969129, -17.56456), (33.006646, -17.580993), (33.020392, -17.598563), (33.024526, -17.619233), (33.020599, -17.638457), (33.004063, -17.675561), (33.000238, -17.713905), (33.003184, -17.757726), (32.999102, -17.794313), (32.973573, -17.810643), (32.957037, -17.817981), (32.946082, -17.834724), (32.939674, -17.855498), (32.936883, -17.875032), (32.938433, -17.894566), (32.950267, -17.922574), (32.952128, -17.940247), (32.948149, -17.95327), (32.940397, -17.959988), (32.932439, -17.964949), (32.927375, -17.972907), (32.928977, -17.982312), (32.941224, -17.996265), (32.940294, -18.004843), (32.934919, -18.024583), (32.93709, -18.047114), (32.972282, -18.150261), (32.975537, -18.183333), (32.974865, -18.190775), (32.965925, -18.212169), (32.958174, -18.225398), (32.952283, -18.233046), (32.950525999999996, -18.241314), (32.95497, -18.256301), (32.970163, -18.277488), (33.016878, -18.313661), (33.034965, -18.332885), (33.042768, -18.352005), (33.038066, -18.363064), (33.00923, -18.383941), (32.988198, -18.41319), (32.985356, -18.412467), (32.986803, -18.422285), (32.999515, -18.436651), (33.003029, -18.446883), (32.996414, -18.46714), (32.978586, -18.48006), (32.956624, -18.489878), (32.937142, -18.50104), (32.919313, -18.510032), (32.900296, -18.515303), (32.88314, -18.522124), (32.870737, -18.535767), (32.868257, -18.552613), (32.871668, -18.57318), (32.884483, -18.609044), (32.914559, -18.665888), (32.92231, -18.693173), (32.920243, -18.726246), (32.913267, -18.753014), (32.902518, -18.774512), (32.885207, -18.787844), (32.858852, -18.790015), (32.817924, -18.787018), (32.787642, -18.791255), (32.69142, -18.83425), (32.68987, -18.843241), (32.696794, -18.897192), (32.703202, -18.911868), (32.71576, -18.919826), (32.705063, -18.927474), (32.692247, -18.934295), (32.682532, -18.942667), (32.681085, -18.954966), (32.68863, -18.97729), (32.690283, -18.988246), (32.68863, -19.000958), (32.691058, -19.01429), (32.698965, -19.022249), (32.710282, -19.025969), (32.723873, -19.026589), (32.785988, -19.017701), (32.803351, -19.019561), (32.814203, -19.023799), (32.819991, -19.028346), (32.822988, -19.035168), (32.825262, -19.046847), (32.830223, -19.059146), (32.83813, -19.066897), (32.847483, -19.073925), (32.855906, -19.083744), (32.862262, -19.118057), (32.83322, -19.241977), (32.832187, -19.266678), (32.828673, -19.284558), (32.820715, -19.301301), (32.806142, -19.323419), (32.768831, -19.363623), (32.766454, -19.373442), (32.768521, -19.402794), (32.762217, -19.443412), (32.763354, -19.463979), (32.773947, -19.475864), (32.793119, -19.476691), (32.811309, -19.474521), (32.825365, -19.479172), (32.832187, -19.500876), (32.832497000000004, -19.519273), (32.825365, -19.59162), (32.825675, -19.600818), (32.828156, -19.610636), (32.829603, -19.623659), (32.825365, -19.633271), (32.819474, -19.641952), (32.81627, -19.652081), (32.819629, -19.674302), (32.83105, -19.685154), (32.849137, -19.689081), (32.872184, -19.690218), (32.894715, -19.684327), (32.924584, -19.655285), (32.943188, -19.64929), (32.960964, -19.658799), (32.962411, -19.679056), (32.954143, -19.717813), (32.962411, -19.735383), (32.979051, -19.751403), (33.0006, -19.764322), (33.022769, -19.773107), (33.032795, -19.784166), (33.029642, -19.80339), (33.022873, -19.826851), (33.021322, -19.868088), (33.001995, -19.927), (32.998378, -20.000897), (33.004373, -20.024255), (33.007266, -20.032006), (32.95373, -20.030249), (32.940087, -20.041515), (32.934299, -20.072107), (32.926548, -20.086473), (32.910683, -20.091124), (32.894405, -20.094018), (32.88531, -20.10301), (32.877869, -20.151689), (32.872908, -20.167192), (32.859265, -20.190859), (32.857095, -20.200575), (32.858335, -20.207499), (32.865053, -20.220935), (32.86557, -20.228893), (32.858438, -20.259486), (32.852961, -20.273852), (32.845209, -20.286668), (32.800767, -20.338551), (32.735862, -20.414205), (32.704443, -20.471773), (32.671783, -20.531821), (32.646462, -20.557969), (32.603674, -20.56479), (32.556545, -20.559312), (32.513136, -20.564583), (32.481614, -20.603031), (32.471072, -20.645509), (32.469108, -20.68685), (32.483474, -20.794233), (32.49722, -20.898103), (32.491019, -20.936344), (32.467661, -20.980165), (32.417122, -21.040937), (32.339814, -21.134058), (32.345343, -21.142843), (32.359864, -21.151421), (32.368856, -21.162997), (32.373352, -21.163617), (32.377744, -21.16341), (32.380638, -21.165477), (32.380535, -21.172195), (32.376866, -21.178499), (32.37299, -21.183977), (32.37175, -21.187905), (32.444613, -21.304693), (32.445849, -21.308994), (32.447197, -21.313685), (32.408543, -21.290327), (32.37299, -21.327948), (32.324517, -21.378177), (32.272221, -21.432541), (32.219718, -21.486904), (32.167318, -21.541268), (32.114814, -21.595632), (32.062415, -21.649995), (32.010015, -21.704462), (31.957615, -21.758826), (31.905215, -21.813189), (31.852712, -21.867553), (31.800312, -21.92202), (31.747808, -21.976384), (31.695512, -22.030747), (31.643112, -22.085214), (31.590712, -22.139578), (31.538209, -22.193941), (31.485809, -22.248305), (31.433822, -22.302048), (31.36871, -22.345043), (31.288922, -22.39734), (31.265616, -22.365507), (31.255642, -22.357962), (31.24572, -22.357549), (31.229597, -22.363957), (31.221536, -22.364887), (31.213474, -22.36189), (31.197868, -22.352588), (31.190685, -22.350624), (31.183657, -22.34556), (31.163348, -22.322616), (31.152599, -22.316414), (31.137717, -22.318482), (31.10454, -22.333364), (31.097048, -22.334922), (31.087642, -22.336878), (31.07033, -22.333674), (31.036121, -22.319618), (30.927187, -22.295744), (30.867087, -22.289646), (30.83789, -22.282308), (30.805282, -22.294504), (30.693919, -22.302772), (30.674282, -22.30856), (30.647410999999998, -22.32644), (30.632424, -22.330677), (30.625551, -22.32861), (30.610307, -22.318688), (30.601108, -22.316414), (30.57217, -22.316621), (30.507367, -22.309593), (30.488454, -22.310213), (30.46923, -22.315071), (30.431713, -22.331194), (30.412696, -22.336878), (30.372078, -22.343493), (30.334975, -22.344733), (30.300765, -22.336982), (30.269346, -22.316414), (30.25529, -22.304736), (30.240407, -22.296157), (30.2217, -22.290886), (30.196999, -22.289129), (30.15266, -22.294814), (30.13509, -22.293574), (30.111113, -22.282308), (30.082587, -22.262878), (30.067911, -22.25709), (30.038145, -22.253783), (30.035872, -22.250579), (30.034528, -22.246135), (30.015511, -22.227014), (30.005279, -22.22226), (29.983782, -22.217713), (29.973963, -22.213992), (29.946678, -22.198282), (29.932105, -22.194355), (29.896035, -22.191358), (29.871489, -22.179265), (29.837331, -22.172444), (29.779246, -22.136374), (29.758886, -22.130896), (29.691448, -22.1341), (29.679614, -22.138338), (29.661424, -22.126452), (29.641064, -22.129242), (29.60396, -22.145055), (29.570164, -22.141955), (29.551043, -22.145986), (29.542517, -22.162522), (29.53182, -22.172444), (29.506912, -22.170067), (29.456889, -22.158801), (29.436115, -22.163142), (29.399528, -22.182159), (29.378031, -22.192908), (29.363250999999998, -22.192288), (29.356947, -22.190944000000002), (29.350074, -22.186707), (29.273644, -22.125108), (29.26734, -22.115807), (29.259588, -22.096066), (29.254111, -22.087074), (29.244395, -22.075706), (29.239331, -22.072605), (29.144867, -22.075292), (29.10797, -22.069194), (29.070763, -22.051004), (29.040532, -22.020929), (29.021567, -21.982791), (29.013815, -21.940417), (29.017949, -21.898145), (29.028905, -21.876648), (29.045441, -21.852567), (29.057637, -21.829209), (29.05526, -21.809985), (29.038723, -21.797893), (28.998726, -21.786008), (28.980846, -21.774845), (28.951907, -21.768334), (28.891032, -21.764924), (28.860853, -21.757379), (28.714195, -21.693507), (28.66841, -21.679968), (28.629704, -21.651339), (28.6157, -21.647101), (28.585934, -21.644414), (28.553998, -21.636559), (28.542939, -21.638316), (28.532501, -21.643071), (28.497309, -21.651546), (28.481393, -21.657437), (28.464598, -21.660331), (28.443101, -21.655783), (28.361762, -21.616302), (28.321919, -21.603486), (28.284867, -21.596872), (28.165702, -21.595218), (28.090771, -21.581266), (28.032893, -21.577855), (28.016563, -21.572894), (28.002559, -21.564212), (27.990415, -21.551913), (27.984731, -21.542922), (27.975739, -21.522561), (27.970571, -21.514396), (27.963698, -21.510469), (27.958066, -21.511502), (27.953208, -21.510469), (27.949281, -21.500754), (27.954448, -21.487835), (27.950418, -21.482047), (27.943338, -21.479876), (27.939876, -21.478016), (27.941943, -21.468508), (27.949642, -21.456519), (27.953001, -21.448664), (27.950211, -21.438329), (27.920549, -21.381174), (27.904219, -21.364741), (27.897811, -21.35544), (27.896157, -21.347895), (27.896674, -21.332392), (27.8944, -21.32433), (27.884995, -21.310171), (27.849132, -21.269657), (27.823604, -21.231726), (27.793838, -21.197413), (27.724385, -21.149664), (27.709192, -21.134471), (27.674775, -21.090133), (27.666611, -21.071219), (27.666817, -21.053753), (27.678961, -21.000733), (27.680356, -20.979649), (27.672657, -20.923528), (27.672605, -20.913709), (27.675085, -20.891282), (27.674775, -20.879913), (27.676016, -20.866684), (27.681803, -20.857589), (27.689038, -20.849011), (27.694412, -20.837744999999998), (27.709605, -20.756716), (27.707332, -20.716719), (27.682475, -20.637344), (27.690382, -20.60148), (27.702629, -20.566134), (27.705575, -20.526653), (27.698133, -20.509083), (27.683767, -20.49606), (27.66599, -20.489136), (27.625786, -20.488619), (27.590853, -20.473323), (27.534112, -20.483038), (27.45391, -20.473323), (27.340739, -20.473013), (27.306012, -20.477354), (27.268392, -20.49575), (27.283998, -20.35147), (27.266015, -20.234164), (27.214907, -20.110451), (27.201781, -20.092984), (27.183746, -20.082339), (27.16292, -20.076551), (27.141888, -20.073347), (27.129692, -20.072934), (27.119771, -20.073864), (27.109642, -20.073244), (27.097343, -20.068903), (27.086491, -20.060532), (27.069231, -20.03738), (27.060136, -20.027562), (27.02665, -20.010095), (26.9943, -20.006788), (26.961072, -20.007201), (26.925054, -20.000897), (26.811882, -19.94643), (26.774469, -19.939815), (26.750801, -19.939609), (26.730957, -19.935888), (26.713904, -19.927413), (26.698608, -19.91253), (26.684758, -19.894547), (26.67717, -19.886815), (26.673803, -19.883385), (26.659437, -19.875737), (26.614065, -19.863438), (26.595565, -19.855583), (26.581922, -19.842147), (26.574791, -19.819513), (26.566316, -19.800806), (26.549263, -19.784063), (26.508852, -19.759258), (26.489731, -19.75192), (26.450251, -19.743342), (26.431854, -19.73652), (26.412837, -19.71957), (26.385242, -19.679056), (26.362711, -19.667584), (26.332325, -19.662416), (26.324367, -19.659109), (26.312171, -19.651358), (26.312481, -19.649601), (26.319096, -19.646293), (26.326331, -19.633891), (26.333462, -19.613014), (26.330981, -19.604952), (26.32106, -19.592033), (26.313205, -19.584178), (26.30349, -19.577254), (26.292638, -19.572499), (26.239101, -19.571466), (26.194452, -19.560200000000002), (26.155488, -19.537153), (26.13027, -19.501082), (26.034359, -19.243734), (26.011414, -19.199809), (25.981132, -19.161775), (25.956534, -19.122088), (25.948576, -19.103277), (25.944855, -19.079196), (25.948059, -19.058732), (25.964389, -19.021629), (25.9678, -19.000958), (25.967449, -18.999925), (25.940721, -18.921273), (25.815251, -18.813993), (25.779491, -18.738752), (25.773393, -18.665578), (25.761921, -18.630335), (25.736909, -18.608734), (25.698255, -18.590234), (25.669523, -18.566049), (25.622084, -18.501143), (25.608442, -18.487708), (25.574439, -18.465693), (25.508499, -18.399134), (25.49558, -18.378877), (25.490516, -18.365545), (25.481163, -18.323377), (25.473204, -18.303429), (25.440855, -18.2532), (25.408816, -18.175995), (25.387525, -18.138995), (25.357449, -18.115844), (25.323446, -18.09662), (25.296368, -18.068612), (25.255026, -18.001122), (25.226088, -17.931876), (25.21937, -17.908001), (25.21937, -17.879786), (25.259781, -17.794107), (25.266705, -17.800928), (25.285412, -17.809299), (25.315901, -17.83214), (25.335538, -17.841235), (25.345254, -17.842579), (25.376466, -17.841235), (25.409539, -17.853018), (25.420288, -17.854878), (25.49558, -17.854878), (25.500748, -17.856015), (25.510153, -17.861183), (25.516458, -17.862319), (25.522142, -17.860149), (25.530927, -17.850951), (25.536818, -17.848677), (25.603997, -17.836171), (25.657017, -17.81395), (25.681409, -17.81147), (25.694224, -17.819428), (25.70642, -17.829867), (25.743834, -17.839375), (25.765951, -17.849814), (25.786002, -17.862216), (25.794683, -17.872655), (25.804399, -17.888158), (25.849667, -17.906658), (25.86362, -17.923814), (25.847497, -17.929395), (25.846153, -17.943658), (25.853490999999998, -17.959988), (25.86362, -17.971563), (25.924495, -17.998952), (25.966973, -18.000502), (25.978548, -17.998952), (26.033739, -17.971563), (26.04056, -17.978488), (26.046554, -17.966292), (26.062471, -17.962882), (26.081178, -17.962365), (26.095234, -17.958541), (26.096164, -17.954614), (26.0942, -17.941901), (26.095234, -17.938077), (26.101228, -17.935803), (26.118591, -17.931566), (26.135438, -17.922574), (26.158589, -17.918337), (26.167477, -17.913582), (26.203031, -17.887227), (26.211919, -17.882783), (26.221117, -17.886297), (26.228249, -17.894669), (26.233933, -17.903971), (26.239204, -17.910172), (26.248299, -17.913376), (26.294291, -17.918543), (26.3038, -17.922781), (26.311965, -17.928362), (26.318269, -17.934356), (26.325504, -17.93601), (26.362711, -17.930636), (26.408599, -17.939007), (26.485494, -17.979315), (26.527145, -17.992027), (26.553604, -17.996471), (26.570243, -18.002879), (26.583369, -18.013215), (26.598872, -18.029958), (26.612721, -18.041223), (26.628844, -18.049181), (26.685689, -18.066751), (26.700003, -18.069232), (26.71194, -18.065821), (26.740569, -18.0405), (26.753591, -18.032955), (26.769714, -18.029028), (26.794002, -18.026237), (26.88826, -17.984586), (26.912031, -17.992027), (26.94867, -17.968876), (26.95916, -17.964742), (27.006289, -17.962675), (27.021275, -17.958541), (27.048457, -17.944278), (27.078171, -17.916993), (27.11543, -17.882163), (27.149019, -17.842476), (27.146539, -17.818911), (27.145299, -17.794107), (27.146952, -17.783875), (27.157081, -17.769302), (27.422078, -17.504822), (27.524294, -17.415112), (27.577314, -17.363125), (27.604495, -17.312792), (27.624856, -17.233314), (27.641186, -17.198484), (27.777301, -17.001183), (27.816886, -16.959636), (27.868562, -16.929663), (28.022993, -16.865393), (28.113922, -16.827551), (28.21252, -16.748589), (28.280113, -16.706524), (28.643295, -16.568755), (28.690734, -16.56028), (28.718794, -16.56028), (28.73285, -16.55811), (28.741377, -16.550668), (28.761117, -16.532271), (28.769282, -16.515218), (28.808866, -16.486279), (28.822509, -16.470776), (28.829124, -16.434603), (28.833051, -16.426438), (28.857236, -16.388198), (28.857029, -16.36546), (28.840492, -16.323602), (28.836772, -16.306342), (28.840286, -16.284741), (28.86416, -16.231205), (28.847107, -16.202679), (28.852481, -16.162785), (28.8654, -16.121237), (28.870981, -16.087234), (28.868501, -16.08217), (28.86385, -16.076589), (28.859303, -16.069561), (28.857236, -16.060466), (28.860336, -16.049407), (28.874082, -16.028943), (28.877183, -16.022018), (28.898887, -15.995457), (28.932373, -15.963727), (28.946862, -15.957235), (28.951287, -15.955252), (28.972784, -15.951428), (29.018053, -15.950602), (29.042341, -15.946261), (29.055053, -15.934375), (29.076344, -15.895411), (29.086162, -15.884559), (29.102182, -15.870916), (29.121716, -15.859341), (29.141869, -15.854483), (29.150964, -15.848799), (29.186311, -15.812832), (29.406969, -15.714233), (29.422059, -15.711030000000001), (29.508462, -15.703588), (29.526239, -15.692839), (29.563446, -15.662144), (29.587217, -15.655736), (29.608559, -15.658422999999999), (29.62799, -15.663591), (29.648505, -15.666588), (29.672793, -15.663281), (29.73005, -15.644677), (29.773252, -15.638062), (29.814283, -15.619666), (29.837331, -15.614808), (29.881773, -15.618839), (29.967504, -15.641473), (30.010654, -15.646227)]), 6) ``` @@ -712,14 +679,10 @@ SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15 0.45539 ``` - - ### Входные параметры {#input-parameters-15} ### Возвращаемое значение {#returned-value-22} - - ## polygonsIntersectionCartesian {#polygonsintersectioncartesian} Вычисляет пересечение многоугольников. @@ -742,7 +705,6 @@ Polygons MultiPolygon - ## polygonAreaCartesian {#polygonareacartesian} Вычисляет площадь многоугольника @@ -765,7 +727,6 @@ Polygon Float64 - ## polygonPerimeterCartesian {#polygonperimetercartesian} Вычисляет периметр многоугольника. @@ -788,7 +749,6 @@ Polygon Float64 - ## polygonsUnionCartesian {#polygonsunioncartesian} Вычисляет объединение многоугольников. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md index e01a4704b49..57d6dee482c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md @@ -6,20 +6,14 @@ description: 'Документация по функциям для работы doc_type: 'reference' --- - - # Функции для работы с индексом S2 {#functions-for-working-with-s2-index} - - ## S2Index {#s2index} [S2](https://s2geometry.io/) — это система географического индексирования, в которой все географические данные представляются на поверхности сферы (аналогично глобусу). В библиотеке S2 точки представлены в виде индекса S2 — конкретного числа, которое кодирует точку на поверхности единичной сферы, в отличие от традиционных пар (широта, долгота). Чтобы получить индекс точки S2 для заданной точки в формате (широта, долгота), используйте функцию [geoToS2](#geotos2). Также можно использовать функцию [s2ToGeo](#s2togeo) для получения географических координат, соответствующих указанному индексу точки S2. - - ## geoToS2 {#geotos2} Возвращает индекс точки [S2](#s2index), соответствующий переданным координатам `(longitude, latitude)`. @@ -55,7 +49,6 @@ SELECT geoToS2(37.79506683, 55.71290588) AS s2Index; └─────────────────────┘ ``` - ## s2ToGeo {#s2togeo} Возвращает географические координаты `(longitude, latitude)`, соответствующие заданному индексу точки [S2](#s2index). @@ -92,7 +85,6 @@ SELECT s2ToGeo(4704772434919038107) AS s2Coodrinates; └──────────────────────────────────────┘ ``` - ## s2GetNeighbors {#s2getneighbors} Возвращает индексы соседних ячеек S2, соответствующие указанному [S2](#s2index). Каждая ячейка в системе S2 — это четырёхугольник, ограниченный четырьмя геодезическими линиями. Поэтому у каждой ячейки 4 соседа. @@ -127,7 +119,6 @@ SELECT s2GetNeighbors(5074766849661468672) AS s2Neighbors; └───────────────────────────────────────────────────────────────────────────────────┘ ``` - ## s2CellsIntersect {#s2cellsintersect} Определяет, пересекаются ли две указанные ячейки [S2](#s2index). @@ -163,7 +154,6 @@ SELECT s2CellsIntersect(9926595209846587392, 9926594385212866560) AS intersect; └───────────┘ ``` - ## s2CapContains {#s2capcontains} Определяет, содержит ли сферический сегмент (cap) точку S2. Сферический сегмент представляет собой часть сферы, отсечённую плоскостью. Он задаётся точкой на сфере и радиусом в градусах. @@ -201,7 +191,6 @@ SELECT s2CapContains(1157339245694594829, 1.0, 1157347770437378819) AS capContai └─────────────┘ ``` - ## s2CapUnion {#s2capunion} Определяет наименьшую сферическую шапку, которая содержит две заданные шапки. Сферическая шапка представляет собой часть сферы, отсечённую плоскостью. Она задаётся точкой на сфере и радиусом в градусах. @@ -238,7 +227,6 @@ SELECT s2CapUnion(3814912406305146967, 1.0, 1157347770437378819, 1.0) AS capUnio └────────────────────────────────────────┘ ``` - ## s2RectAdd {#s2rectadd} Увеличивает размер ограничивающего прямоугольника так, чтобы он включал заданную точку S2. В системе S2 прямоугольник представлен типом `S2Region`, называемым `S2LatLngRect`, который описывает прямоугольник в пространстве широты и долготы. @@ -276,7 +264,6 @@ SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) └───────────────────────────────────────────┘ ``` - ## s2RectContains {#s2rectcontains} Определяет, содержит ли заданный прямоугольник точку S2. В системе S2 прямоугольник представлен типом `S2Region` под названием `S2LatLngRect`, который задаёт прямоугольник в широтно-долготном пространстве. @@ -314,7 +301,6 @@ SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187 └──────────────┘ ``` - ## s2RectUnion {#s2rectunion} Возвращает наименьший прямоугольник, содержащий объединение этого прямоугольника и указанного прямоугольника. В системе S2 прямоугольник представлен типом `S2Region` с названием `S2LatLngRect`, который описывает прямоугольник в пространстве широта–долгота. @@ -351,7 +337,6 @@ SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815 └───────────────────────────────────────────┘ ``` - ## s2RectIntersection {#s2rectintersection} Возвращает наименьший прямоугольник, содержащий пересечение этого прямоугольника с заданным прямоугольником. В системе S2 прямоугольник представляется типом `S2Region` — `S2LatLngRect`, который описывает прямоугольник в пространстве географических координат (широта/долгота). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md index a7887149e99..da6794842a1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md @@ -6,12 +6,8 @@ title: 'Функции, реализующие оператор IN' doc_type: 'reference' --- - - # Функции для реализации оператора IN {#functions-for-implementing-the-in-operator} - - ## in, notIn, globalIn, globalNotIn {#in-notin-globalin-globalnotin} См. раздел [Операторы IN](/sql-reference/operators/in). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md index 9e0fcd99838..c6f81e08af8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md @@ -6,30 +6,20 @@ title: 'Функции машинного обучения' doc_type: 'reference' --- - - # Функции машинного обучения {#machine-learning-functions} - - ## evalMLMethod {#evalmlmethod} Для предсказаний с использованием обученных регрессионных моделей используется функция `evalMLMethod`. См. ссылку в разделе `linearRegression`. - - ## stochasticLinearRegression {#stochasticlinearregression} Агрегатная функция [stochasticLinearRegression](/sql-reference/aggregate-functions/reference/stochasticlinearregression) реализует метод стохастического градиентного спуска с использованием линейной модели и функции потерь MSE. Использует `evalMLMethod` для предсказаний на новых данных. - - ## stochasticLogisticRegression {#stochasticlogisticregression} Агрегатная функция [stochasticLogisticRegression](/sql-reference/aggregate-functions/reference/stochasticlogisticregression) реализует метод стохастического градиентного спуска для задачи бинарной классификации. Использует `evalMLMethod` для предсказания по новым данным. - - ## naiveBayesClassifier {#naivebayesclassifier} Классифицирует входной текст с использованием модели наивного Байеса с n-граммами и сглаживанием Лапласа. Перед использованием модель должна быть настроена в ClickHouse. @@ -130,7 +120,6 @@ SELECT naiveBayesClassifier('language', 'Как дела?'); **Руководство по обучению модели** - **Формат файла** В человекочитаемом формате, при `n=1` и режиме `token`, модель может выглядеть следующим образом: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md index a1b6f973e00..dedb38968fa 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md @@ -6,14 +6,10 @@ title: 'Функции NumericIndexedVector' doc_type: 'reference' --- - - # NumericIndexedVector {#numericindexedvector} NumericIndexedVector — это абстрактная структура данных, которая инкапсулирует вектор и реализует агрегирующие и покомпонентные операции над векторами. В качестве формата хранения в ней используется Bit-Sliced Index. Теоретические основы и сценарии использования описаны в статье [Large-Scale Metric Computation in Online Controlled Experiment Platform](https://arxiv.org/pdf/2405.08411). - - ## BSI {#bit-sliced-index} В методе хранения BSI (Bit-Sliced Index) данные сохраняются в формате [Bit-Sliced Index](https://dl.acm.org/doi/abs/10.1145/253260.253268), а затем сжимаются с помощью [Roaring Bitmap](https://github.com/RoaringBitmap/RoaringBitmap). Операции агрегации и покомпонентные операции выполняются непосредственно над сжатыми данными, что может значительно повысить эффективность хранения и выполнения запросов. @@ -27,8 +23,6 @@ NumericIndexedVector — это абстрактная структура дан - Механизм Bit-Sliced Index преобразует значение в двоичное представление. Для типов с плавающей запятой используется фиксированно-точечное представление, что может привести к потере точности. Точность можно настраивать, задавая количество бит для дробной части; по умолчанию используется 24 бита, чего достаточно для большинства сценариев. Вы можете задать количество бит для целой и дробной части при создании NumericIndexedVector с помощью агрегатной функции groupNumericIndexedVector с суффиксом `-State`. - Для индексов возможны три состояния: ненулевое значение, нулевое значение и отсутствие. В NumericIndexedVector хранятся только ненулевые и нулевые значения. Кроме того, при покомпонентных операциях между двумя NumericIndexedVector значение отсутствующего индекса трактуется как 0. В сценарии деления результат также равен нулю, если делитель равен нулю. - - ## Создание объекта numericIndexedVector {#create-numeric-indexed-vector-object} Существует два способа создать эту структуру: первый — использовать агрегатную функцию `groupNumericIndexedVector` с суффиксом `-State`. @@ -37,8 +31,6 @@ NumericIndexedVector — это абстрактная структура дан Второй способ — построить её из значения типа Map с помощью `numericIndexedVectorBuild`. Функция `groupNumericIndexedVectorState` позволяет настроить количество целых и дробных битов через параметры, в то время как `numericIndexedVectorBuild` такой возможности не предоставляет. - - ## groupNumericIndexedVector {#group-numeric-indexed-vector} Создает `NumericIndexedVector` из двух столбцов данных и возвращает сумму всех значений как значение типа `Float64`. Если добавить суффикс `State`, возвращает объект `NumericIndexedVector`. @@ -107,7 +99,6 @@ SELECT groupNumericIndexedVectorStateIf('BSI', 32, 0)(UserID, PlayTime, day = '2 Подробнее см. https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## numericIndexedVectorAllValueSum {#numericIndexedVectorAllValueSum} @@ -144,7 +135,6 @@ SELECT numericIndexedVectorAllValueSum(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorBuild {#numericIndexedVectorBuild} Появилось в: v25.7 @@ -179,7 +169,6 @@ SELECT numericIndexedVectorBuild(mapFromArrays([1, 2, 3], [10, 20, 30])) AS res, └─────┴────────────────────────────────────────────────────────────┘ ``` - ## numericIndexedVectorCardinality {#numericIndexedVectorCardinality} Впервые представлена в: v25.7 @@ -214,7 +203,6 @@ SELECT numericIndexedVectorCardinality(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorGetValue {#numericIndexedVectorGetValue} Появилась в версии: v25.7 @@ -250,7 +238,6 @@ SELECT numericIndexedVectorGetValue(numericIndexedVectorBuild(mapFromArrays([1, └─────┘ ``` - ## numericIndexedVectorPointwiseAdd {#numericIndexedVectorPointwiseAdd} Представлен в версии: v25.7 @@ -291,7 +278,6 @@ SELECT └───────────────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseDivide {#numericIndexedVectorPointwiseDivide} Введён в версии: v25.7 @@ -332,7 +318,6 @@ SELECT └─────────────┴─────────────────┘ ``` - ## numericIndexedVectorPointwiseEqual {#numericIndexedVectorPointwiseEqual} Введено в: v25.7 @@ -374,7 +359,6 @@ SELECT └───────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreater {#numericIndexedVectorPointwiseGreater} Появилась в версии: v25.7 @@ -416,7 +400,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreaterEqual {#numericIndexedVectorPointwiseGreaterEqual} Введено в: v25.7 @@ -458,7 +441,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseLess {#numericIndexedVectorPointwiseLess} Впервые представлено в: v25.7 @@ -500,7 +482,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseLessEqual {#numericIndexedVectorPointwiseLessEqual} Добавлена в: v25.7 @@ -542,7 +523,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseMultiply {#numericIndexedVectorPointwiseMultiply} Введено в: v25.7 @@ -583,7 +563,6 @@ SELECT └───────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseNotEqual {#numericIndexedVectorPointwiseNotEqual} Впервые появился в: v25.7 @@ -625,7 +604,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseSubtract {#numericIndexedVectorPointwiseSubtract} Представлено в: v25.7 @@ -666,7 +644,6 @@ SELECT └────────────────────────┴─────────────────┘ ``` - ## numericIndexedVectorShortDebugString {#numericIndexedVectorShortDebugString} Появилась в версии: v25.7 @@ -702,7 +679,6 @@ SELECT numericIndexedVectorShortDebugString(numericIndexedVectorBuild(mapFromArr res: {"vector_type":"BSI","index_type":"char8_t","value_type":"char8_t","integer_bit_num":8,"fraction_bit_num":0,"zero_indexes_info":{"cardinality":"0"},"non_zero_indexes_info":{"total_cardinality":"3","all_value_sum":60,"number_of_bitmaps":"8","bitmap_info":{"cardinality":{"0":"0","1":"2","2":"2","3":"2","4":"2","5":"0","6":"0","7":"0"}}}} ``` - ## numericIndexedVectorToMap {#numericIndexedVectorToMap} Впервые появилась в версии v25.7 diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md index 2f631d712db..391fcbcf39b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md @@ -7,8 +7,6 @@ title: 'Обычные функции' doc_type: 'reference' --- - - # Обычные функции {#regular-functions} Существует по меньшей мере\* два типа функций — обычные функции (их просто называют «функциями») и агрегатные функции. Это совершенно разные понятия. Обычные функции работают так, как если бы они применялись к каждой строке отдельно (для каждой строки результат функции не зависит от остальных строк). Агрегатные функции накапливают набор значений из различных строк (то есть зависят от всего набора строк). @@ -19,26 +17,18 @@ doc_type: 'reference' Существует третий тип функций, к которому относится функция ['arrayJoin'](../functions/array-join.md). Отдельно также можно упомянуть [табличные функции](../table-functions/index.md). ::: - - ## Строгая типизация {#strong-typing} В отличие от стандартного SQL, ClickHouse использует строгую типизацию. Другими словами, он не выполняет неявные преобразования типов. Каждая функция работает с определённым набором типов. Это означает, что иногда необходимо использовать функции приведения типов. - - ## Устранение общих подвыражений {#common-subexpression-elimination} Все выражения в запросе, которые имеют одинаковое AST (одинаковое представление или один и тот же результат синтаксического разбора), считаются эквивалентными по значению. Такие выражения объединяются и выполняются один раз. Идентичные подзапросы также устраняются таким образом. - - ## Типы результатов {#types-of-results} Все функции возвращают одно значение как результат (не несколько значений и не отсутствие значения). Тип возвращаемого значения обычно определяется только типами аргументов, а не их значениями. Исключениями являются функция tupleElement (оператор a.N) и функция toFixedString. - - ## Константы {#constants} Для упрощения реализации некоторые функции могут работать только с константами для части своих аргументов. Например, правый аргумент оператора LIKE должен быть константой. @@ -48,8 +38,6 @@ doc_type: 'reference' Функции могут быть реализованы по-разному для константных и неконстантных аргументов (выполняется разный код). При этом результаты для константы и для обычного столбца, содержащего одно и то же значение во всех строках, должны совпадать. - - ## Обработка NULL {#null-processing} Функции ведут себя следующим образом: @@ -57,14 +45,10 @@ doc_type: 'reference' - Если хотя бы один из аргументов функции имеет значение `NULL`, результат функции также будет `NULL`. - Особое поведение, описанное отдельно в описании каждой функции. В исходном коде ClickHouse эти функции имеют `UseDefaultImplementationForNulls=false`. - - ## Константность {#constancy} Функции не могут изменять значения своих аргументов — любые изменения отражаются только в возвращаемом результате. Поэтому результат вычисления отдельных функций не зависит от порядка, в котором функции указаны в запросе. - - ## Функции высшего порядка {#higher-order-functions} ### Оператор `->` и функции lambda(params, expr) {#arrow-operator-and-lambda} @@ -82,7 +66,6 @@ str -> str != Referer Для некоторых функций первый аргумент (лямбда-функцию) можно опустить. В этом случае подразумевается тождественное отображение. - ## Пользовательские функции (UDFs) {#user-defined-functions-udfs} ClickHouse поддерживает пользовательские функции. См. [UDFs](../functions/udf.md). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md index 04a984529e4..44947a01799 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md @@ -10,7 +10,6 @@ keywords: ['временное окно'] import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Функции временных окон {#time-window-functions} @@ -26,7 +25,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; См.: https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## hop {#hop} @@ -66,7 +64,6 @@ SELECT hop(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) ('2024-07-03 00:00:00','2024-07-05 00:00:00') ``` - ## hopEnd {#hopEnd} Добавлена в версии: v22.1 @@ -104,7 +101,6 @@ SELECT hopEnd(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-05 00:00:00 ``` - ## hopStart {#hopStart} Впервые появилась в: v22.1 @@ -142,7 +138,6 @@ SELECT hopStart(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-03 00:00:00 ``` - ## tumble {#tumble} Добавлено в: v21.12 @@ -177,7 +172,6 @@ SELECT tumble(now(), toIntervalDay('1')) ('2024-07-04 00:00:00','2024-07-05 00:00:00') ``` - ## tumbleEnd {#tumbleEnd} Добавлена в версии: v22.1 @@ -212,7 +206,6 @@ SELECT tumbleEnd(now(), toIntervalDay('1')) 2024-07-05 00:00:00 ``` - ## tumbleStart {#tumbleStart} Появилась в версии v22.1 @@ -249,7 +242,6 @@ SELECT tumbleStart(now(), toIntervalDay('1')) {/*AUTOGENERATED_END*/ } - ## Связанные материалы {#related-content} - [Руководства по сценариям использования временных рядов](/use-cases/time-series) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md index 8071f9df8d7..907f63e3fe5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md @@ -16,7 +16,6 @@ doc_type: 'reference' См.: https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## flattenTuple {#flattenTuple} @@ -57,7 +56,6 @@ SELECT flattenTuple(t) FROM tab; └────────────────┘ ``` - ## tuple {#tuple} Впервые представлена в версии: v @@ -92,7 +90,6 @@ SELECT tuple(1, 2) (1,2) ``` - ## tupleConcat {#tupleConcat} Впервые представлена в: v23.8 @@ -125,7 +122,6 @@ SELECT tupleConcat((1, 2), ('a',), (true, false)) (1, 2, 'a', true, false) ``` - ## tupleDivide {#tupleDivide} Появилась в: v21.11 @@ -163,7 +159,6 @@ SELECT tupleDivide((1, 2), (2, 3)) (0.5, 0.6666666666666666) ``` - ## tupleDivideByNumber {#tupleDivideByNumber} Добавлена в версии: v21.11 @@ -201,7 +196,6 @@ SELECT tupleDivideByNumber((1, 2), 0.5) (2, 4) ``` - ## tupleElement {#tupleElement} Введена в версии: v1.1 @@ -277,7 +271,6 @@ SELECT (1, 'hello').2 Здравствуйте ``` - ## tupleHammingDistance {#tupleHammingDistance} Впервые представлена в: v21.1 @@ -341,7 +334,6 @@ SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseIn 2 ``` - ## tupleIntDiv {#tupleIntDiv} Введено в: v23.8 @@ -387,7 +379,6 @@ SELECT tupleIntDiv((15, 10, 5), (5.5, 5.5, 5.5)) (2, 1, 0) ``` - ## tupleIntDivByNumber {#tupleIntDivByNumber} Впервые появилась в версии v23.8 @@ -433,7 +424,6 @@ SELECT tupleIntDivByNumber((15.2, 10.7, 5.5), 5.8) (2, 1, 0) ``` - ## tupleIntDivOrZero {#tupleIntDivOrZero} Добавлена в версии: v23.8 @@ -469,7 +459,6 @@ SELECT tupleIntDivOrZero((5, 10, 15), (0, 0, 0)) (0, 0, 0) ``` - ## tupleIntDivOrZeroByNumber {#tupleIntDivOrZeroByNumber} Введена в версии: v23.8 @@ -515,7 +504,6 @@ SELECT tupleIntDivOrZeroByNumber((15, 10, 5), 0) (0, 0, 0) ``` - ## tupleMinus {#tupleMinus} Добавлена в версии v21.11 @@ -551,7 +539,6 @@ SELECT tupleMinus((1, 2), (2, 3)) (-1, -1) ``` - ## tupleModulo {#tupleModulo} Появилась в версии: v23.8 @@ -585,7 +572,6 @@ SELECT tupleModulo((15, 10, 5), (5, 3, 2)) (0, 1, 1) ``` - ## tupleModuloByNumber {#tupleModuloByNumber} Добавлена в: v23.8 @@ -619,7 +605,6 @@ SELECT tupleModuloByNumber((15, 10, 5), 2) (1, 0, 1) ``` - ## tupleMultiply {#tupleMultiply} Появилась в версии: v21.11 @@ -653,7 +638,6 @@ SELECT tupleMultiply((1, 2), (2, 3)) (2, 6) ``` - ## tupleMultiplyByNumber {#tupleMultiplyByNumber} Введена в версии: v21.11 @@ -687,7 +671,6 @@ SELECT tupleMultiplyByNumber((1, 2), -2.1) (-2.1, -4.2) ``` - ## tupleNames {#tupleNames} Впервые появилась в версии: v @@ -717,7 +700,6 @@ SELECT tupleNames(tuple(1 as a, 2 as b)) ['a','b'] ``` - ## tupleNegate {#tupleNegate} Добавлена в версии: v21.11 @@ -750,7 +732,6 @@ SELECT tupleNegate((1, 2)) (-1, -2) ``` - ## tuplePlus {#tuplePlus} Появился в версии: v21.11 @@ -786,7 +767,6 @@ SELECT tuplePlus((1, 2), (2, 3)) (3, 5) ``` - ## tupleToNameValuePairs {#tupleToNameValuePairs} Впервые появилась в версии: v21.9 @@ -833,7 +813,6 @@ SELECT tupleToNameValuePairs(tuple(3, 2, 1)) {/*АВТОСГЕНЕРИРОВАНО_КОНЕЦ*/ } - ## untuple {#untuple} Выполняет синтаксическую подстановку элементов [tuple](/sql-reference/data-types/tuple) на месте вызова. @@ -910,7 +889,6 @@ SELECT untuple((* EXCEPT (v2, v3),)) FROM kv; └─────┴────┴────┴────┴───────────┘ ``` - ## Функции расстояния {#distance-functions} Все поддерживаемые функции описаны в [документации по функциям расстояния](../../sql-reference/functions/distance-functions.md). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md index e6f3a5d4717..b8f74b7fb65 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md @@ -1,5 +1,5 @@ --- -description: 'Документация по функциям кортежей и Map' +description: 'Документация по функциям Map и Tuple' sidebar_label: 'Map' slug: /sql-reference/functions/tuple-map-functions title: 'Функции Map' @@ -8,7 +8,7 @@ doc_type: 'reference' ## map {#map} -Создаёт значение типа [Map(key, value)](../data-types/map.md) из пар «ключ–значение». +Создаёт значение типа [Map(key, value)](../data-types/map.md) из пар ключ–значение. **Синтаксис** @@ -18,12 +18,12 @@ map(key1, value1[, key2, value2, ...]) **Аргументы** -* `key_n` — Ключи элементов Map. Любой тип, поддерживаемый как тип ключа для [Map](../data-types/map.md). -* `value_n` — Значения элементов Map. Любой тип, поддерживаемый как тип значения для [Map](../data-types/map.md). +* `key_n` — ключи элементов `Map`. Любой тип, поддерживаемый как тип ключа для [Map](../data-types/map.md). +* `value_n` — значения элементов `Map`. Любой тип, поддерживаемый как тип значения для [Map](../data-types/map.md). **Возвращаемое значение** -* Map, содержащий пары `key:value`. [Map(key, value)](../data-types/map.md). +* Тип `Map`, содержащий пары `key:value`. [Map(key, value)](../data-types/map.md). **Примеры** @@ -45,7 +45,7 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); ## mapFromArrays {#mapfromarrays} -Создает значение типа `Map` из массива (или `Map`) ключей и массива (или `Map`) значений. +Создает map из массива (или map) ключей и массива (или map) значений. Функция является удобной альтернативой синтаксису `CAST([...], 'Map(key_type, value_type)')`. Например, вместо того чтобы писать @@ -61,16 +61,16 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); mapFromArrays(keys, values) ``` -Псевдоним: `MAP_FROM_ARRAYS(keys, values)` +Alias: `MAP_FROM_ARRAYS(keys, values)` **Аргументы** -* `keys` — Массив или отображение ключей типа [Array](../data-types/array.md) или [Map](../data-types/map.md), из которых создаётся значение типа Map. Если `keys` — массив, допускаются типы `Array(Nullable(T))` или `Array(LowCardinality(Nullable(T)))` при условии, что он не содержит значения NULL. -* `values` — Массив или отображение значений типа [Array](../data-types/array.md) или [Map](../data-types/map.md), из которых создаётся значение типа Map. +* `keys` — массив или map ключей ([Array](../data-types/array.md) или [Map](../data-types/map.md)), из которых формируется результирующий map. Если `keys` — массив, допускаются типы `Array(Nullable(T))` или `Array(LowCardinality(Nullable(T)))` при условии, что он не содержит значения NULL. +* `values` — массив или map значений ([Array](../data-types/array.md) или [Map](../data-types/map.md)), из которых формируется результирующий map. **Возвращаемое значение** -* Отображение (Map) с ключами и значениями, сформированными из массива ключей и массива/отображения значений. +* Map, в котором ключи и значения сформированы из массива ключей и массива/map значений. **Пример** @@ -88,7 +88,7 @@ SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) └───────────────────────────────────────────┘ ``` -`mapFromArrays` также принимает аргументы типа [Map](../data-types/map.md). Во время выполнения они преобразуются в массив кортежей. +`mapFromArrays` также принимает аргументы типа [Map](../data-types/map.md). Во время выполнения они приводятся к массиву из кортежей. ```sql SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) @@ -117,7 +117,7 @@ SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3]) ## extractKeyValuePairs {#extractkeyvaluepairs} Преобразует строку с парами ключ-значение в [Map(String, String)](../data-types/map.md). -Разбор строки устойчив к «шуму» (например, в журналах/логах). +Парсинг устойчив к «шуму» (например, в файлах логов). Пары ключ-значение во входной строке состоят из ключа, за которым следует разделитель ключ-значение и значение. Пары ключ-значение разделяются разделителем пар. Ключи и значения могут быть заключены в кавычки. @@ -135,15 +135,15 @@ extractKeyValuePairs(data[, key_value_delimiter[, pair_delimiter[, quoting_chara **Аргументы** -* `data` — Строка, из которой извлекаются пары ключ–значение. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). -* `key_value_delimiter` — Одиночный символ, разделяющий ключи и значения. По умолчанию `:`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). -* `pair_delimiters` — Набор символов, разделяющих пары. По умолчанию ` `, `,` и `;`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). -* `quoting_character` — Одиночный символ, используемый в качестве символа-кавычки. По умолчанию `"`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). -* `unexpected_quoting_character_strategy` — Стратегия обработки символов-кавычек в неожиданных местах во время фаз `read_key` и `read_value`. Возможные значения: "invalid", "accept" и "promote". `invalid` отбросит ключ/значение и вернёт состояние `WAITING_KEY`. `accept` будет трактовать символ как обычный. `promote` переведёт состояние в `READ_QUOTED_{KEY/VALUE}` и продолжит обработку со следующего символа. +* `data` — строка, из которой извлекаются пары ключ-значение. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). +* `key_value_delimiter` — одиночный символ, разделяющий ключи и значения. По умолчанию `:`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). +* `pair_delimiters` — набор символов, разделяющих пары. По умолчанию ` `, `,` и `;`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). +* `quoting_character` — одиночный символ, используемый в качестве кавычки. По умолчанию `"`. [String](../data-types/string.md) или [FixedString](../data-types/fixedstring.md). +* `unexpected_quoting_character_strategy` — стратегия обработки кавычек в неожиданных местах на этапах `read_key` и `read_value`. Возможные значения: `invalid`, `accept` и `promote`. `invalid` отбросит ключ/значение и вернётся в состояние `WAITING_KEY`. `accept` будет обрабатывать символ как обычный. `promote` перейдёт в состояние `READ_QUOTED_{KEY/VALUE}` и начнёт обработку со следующего символа. **Возвращаемые значения** -* Набор пар ключ–значение. Тип: [Map(String, String)](../data-types/map.md) +* Массив пар ключ-значение. Тип: [Map(String, String)](../data-types/map.md) **Примеры** @@ -161,7 +161,7 @@ SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') A └─────────────────────────────────────────────────────────────────────────┘ ``` -С одинарной кавычкой `'` в качестве символа кавычания: +С одинарной кавычкой `'` в качестве символа цитирования: ```sql SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') AS kv @@ -175,7 +175,7 @@ SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:bra └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -Примеры параметра unexpected_quoting_character_strategy: +Примеры unexpected_quoting_character_strategy: unexpected_quoting_character_strategy=invalid @@ -243,7 +243,7 @@ SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') AS kv; └──────────────┘ ``` -Последовательности экранирования при отключённой поддержке экранирования: +Escape-последовательности при отсутствии поддержки: ```sql SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv @@ -257,7 +257,7 @@ SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv └────────────────────────┘ ``` -Чтобы восстановить строковые пары ключ–значение Map, сериализованные с помощью `toString`: +Чтобы восстановить пары ключ–значение строковой карты, сериализованные с помощью `toString`: ```sql SELECT @@ -270,7 +270,7 @@ FORMAT Vertical; Результат: ```response -Row 1: +Строка 1: ────── m: {'John':'33','Paula':'31'} map_serialized: {'John':'33','Paula':'31'} @@ -281,21 +281,21 @@ map_restored: {'John':'33','Paula':'31'} То же, что и `extractKeyValuePairs`, но с поддержкой экранирования. -Поддерживаемые последовательности экранирования: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` и `\0`. -Нестандартные последовательности экранирования возвращаются без изменений (включая обратную косую черту), за исключением следующих: -`\\`, `'`, `"`, «backtick» (обратная кавычка), `/`, `=` или управляющие символы ASCII (c <= 31). +Поддерживаемые escape-последовательности: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` и `\0`. +Нестандартные escape-последовательности возвращаются как есть (включая обратный слэш), за исключением следующих: +`\\`, `'`, `"`, `backtick` (обратная кавычка), `/`, `=` или управляющие символы ASCII (c <= 31). -Эта функция подходит для случаев, когда предварительное и последующее экранирование не подходят. Например, рассмотрим следующую -входную строку: `a: "aaaa\"bbb"`. Ожидаемый результат: `a: aaaa\"bbbb`. +Эта функция подходит для случаев, когда предварительное и последующее экранирование неприменимы. Например, рассмотрим следующую +входную строку: `a: "aaaa\"bbb"`. Ожидаемый вывод: `a: aaaa\"bbbb`. -* Предварительное экранирование: при предварительном экранировании получится: `a: "aaaa"bbb"`, а затем `extractKeyValuePairs` вернёт: `a: aaaa` -* Последующее экранирование: `extractKeyValuePairs` вернёт `a: aaaa\`, и последующее экранирование сохранит это без изменений. +* Предварительное экранирование: при предварительном экранировании будет получен вывод: `a: "aaaa"bbb"`, а `extractKeyValuePairs` затем вернёт: `a: aaaa` +* Последующее экранирование: `extractKeyValuePairs` вернёт `a: aaaa\`, и последующее экранирование оставит строку без изменений. -Начальные последовательности экранирования будут пропускаться в ключах и будут считаться некорректными для значений. +Начальные escape-последовательности будут пропущены в ключах и будут считаться недопустимыми для значений. **Примеры** -Последовательности экранирования при включённой поддержке экранирования: +Escape-последовательности при включённой поддержке экранирования: ```sql SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv @@ -311,7 +311,7 @@ SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv ## mapAdd {#mapadd} -Собирает все ключи и суммирует соответствующие им значения. +Собирает все ключи и суммирует соответствующие значения. **Синтаксис** @@ -321,11 +321,11 @@ mapAdd(arg1, arg2 [, ...]) **Аргументы** -Аргументы — это [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple) из двух [arrays](/sql-reference/data-types/array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые приводятся к одному типу ([Int64](/sql-reference/data-types/int-uint#integer-ranges), [UInt64](/sql-reference/data-types/int-uint#integer-ranges) или [Float64](/sql-reference/data-types/float)). Общий приведённый тип используется как тип результирующего массива. +Аргументы представляют собой [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple) из двух [arrays](/sql-reference/data-types/array), где элементы в первом массиве являются ключами, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь одинаковый тип, а все массивы значений должны содержать элементы, которые могут быть приведены к одному типу ([Int64](/sql-reference/data-types/int-uint#integer-ranges), [UInt64](/sql-reference/data-types/int-uint#integer-ranges) или [Float64](/sql-reference/data-types/float)). Общий приведённый тип используется как тип для результирующего массива. **Возвращаемое значение** -* В зависимости от аргументов функция возвращает один [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple), где первый массив содержит отсортированные ключи, а второй массив содержит значения. +* В зависимости от аргументов возвращается один [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple), где первый массив содержит отсортированные ключи, а второй массив — значения. **Пример** @@ -343,7 +343,7 @@ SELECT mapAdd(map(1,1), map(1,1)); └──────────────────────────────┘ ``` -Запрос с использованием кортежа: +Запрос с кортежем: ```sql SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) AS res, toTypeName(res) AS type; @@ -364,16 +364,16 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) AS res, toTy **Синтаксис** ```sql -mapSubtract(Кортеж(Массив, Массив), Кортеж(Массив, Массив) [, ...]) +mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) ``` **Аргументы** -Аргументы — это [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple) из двух [array](/sql-reference/data-types/array), где элементы первого массива являются ключами, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь одинаковый тип, а все массивы значений должны содержать элементы, которые могут быть приведены к одному типу ([Int64](/sql-reference/data-types/int-uint#integer-ranges), [UInt64](/sql-reference/data-types/int-uint#integer-ranges) или [Float64](/sql-reference/data-types/float)). Общий приведённый тип используется как тип результирующего массива. +Аргументы — это [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple) из двух [array](/sql-reference/data-types/array), где элементы первого массива представляют ключи, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые могут быть приведены к одному типу ([Int64](/sql-reference/data-types/int-uint#integer-ranges), [UInt64](/sql-reference/data-types/int-uint#integer-ranges) или [Float64](/sql-reference/data-types/float)). Общий приведённый тип используется как тип для результирующего массива. **Возвращаемое значение** -* В зависимости от аргументов функция возвращает один [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple), где первый массив содержит отсортированные ключи, а второй массив содержит значения. +* В зависимости от аргументов функция возвращает [map](../data-types/map.md) или [tuple](/sql-reference/data-types/tuple), где первый массив содержит отсортированные ключи, а второй массив содержит значения. **Пример** @@ -391,7 +391,7 @@ SELECT mapSubtract(map(1,1), map(1,1)); └───────────────────────────────────┘ ``` -Запрос с картой кортежей: +Запрос с отображением кортежей: ```sql SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) AS res, toTypeName(res) AS type; @@ -407,11 +407,11 @@ SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt3 ## mapPopulateSeries {#mappopulateseries} -Заполняет отсутствующие пары ключ-значение в `map` с целочисленными ключами. -Чтобы можно было расширять диапазон ключей за пределы наибольшего значения, можно задать максимальный ключ. -Более точно, функция возвращает `map`, в которой ключи образуют последовательность от наименьшего до наибольшего ключа (или до аргумента `max`, если он указан) с шагом 1 и соответствующими значениями. -Если значение для ключа не задано, в качестве результата используется значение по умолчанию. -Если ключи повторяются, с ключом связывается только первое значение (в порядке появления). +Заполняет отсутствующие пары ключ–значение в отображении (map) с целочисленными ключами. +Чтобы можно было расширять множество ключей за пределы наибольшего значения, можно задать максимальный ключ. +Более конкретно, функция возвращает отображение, в котором ключи образуют последовательность от наименьшего до наибольшего ключа (или аргумента `max`, если он указан) с шагом 1 и соответствующими значениями. +Если для ключа не задано значение, используется значение по умолчанию. +В случае повторяющихся ключей с каждым ключом связывается только первое значение (в порядке появления). **Синтаксис** @@ -420,13 +420,13 @@ mapPopulateSeries(map[, max]) mapPopulateSeries(keys, values[, max]) ``` -Для аргументов-массивов количество элементов в `keys` и `values` должно быть одинаковым для каждой строки. +Для аргументов-массивов количество элементов в `keys` и `values` должно совпадать для каждой строки. **Аргументы** -В качестве аргументов используется либо [Map](../data-types/map.md), либо две [Array](/sql-reference/data-types/array), где первый массив содержит ключи, а второй — значения для каждого ключа. +Аргументы — это [Map](../data-types/map.md) или два массива [Array](/sql-reference/data-types/array), где первый массив содержит ключи, а второй — значения для каждого ключа. -Массивы для отображения: +Отображаемые массивы: * `map` — Map с целочисленными ключами. [Map](../data-types/map.md). @@ -434,11 +434,11 @@ mapPopulateSeries(keys, values[, max]) * `keys` — Массив ключей. [Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges)). * `values` — Массив значений. [Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges)). -* `max` — Максимальное значение ключа. Необязательный аргумент. [Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges). +* `max` — Максимальное значение ключа. Необязательный параметр. [Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges). **Возвращаемое значение** -* В зависимости от аргументов возвращается [Map](../data-types/map.md) или [Tuple](/sql-reference/data-types/tuple) из двух [Array](/sql-reference/data-types/array): ключи в отсортированном порядке и значения, соответствующие этим ключам. +* В зависимости от аргументов — [Map](../data-types/map.md) или [Tuple](/sql-reference/data-types/tuple) из двух [Array](/sql-reference/data-types/array): ключи в отсортированном порядке и значения, соответствующие этим ключам. **Пример** @@ -456,7 +456,7 @@ SELECT mapPopulateSeries(map(1, 10, 5, 20), 6); └─────────────────────────────────────────┘ ``` -Запрос с сопоставлением массивов: +Запрос с сопоставленными массивами: ```sql SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type; @@ -472,10 +472,10 @@ SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type ## mapKeys {#mapkeys} -Возвращает ключи заданного `Map`. +Возвращает ключи заданной `Map`. -Эта функция может быть оптимизирована с помощью настройки [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns). -При включённой настройке функция читает только подколонку [keys](/sql-reference/data-types/map#reading-subcolumns-of-map) вместо всей `Map`. +Эту функцию можно оптимизировать с помощью настройки [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns). +При включённой настройке функция читает только подстолбец [keys](/sql-reference/data-types/map#reading-subcolumns-of-map) вместо всей `Map`. Запрос `SELECT mapKeys(m) FROM table` преобразуется в `SELECT m.keys FROM table`. **Синтаксис** @@ -486,11 +486,11 @@ mapKeys(map) **Аргументы** -* `map` — значение типа [Map](../data-types/map.md). +* `map` — отображение. [Map](../data-types/map.md). **Возвращаемое значение** -* Массив, содержащий все ключи из `map`. [Array](../data-types/array.md). +* Массив, содержащий все ключи из отображения `map`. [Array](../data-types/array.md). **Пример** @@ -515,7 +515,7 @@ SELECT mapKeys(a) FROM tab; ## mapContains {#mapcontains} -Возвращает, содержится ли заданный ключ в заданном отображении. +Возвращает, содержится ли заданный ключ в указанном отображении. **Синтаксис** @@ -527,7 +527,7 @@ mapContains(map, key) **Аргументы** -* `map` — отображение. [Map](../data-types/map.md). +* `map` — Map. [Map](../data-types/map.md). * `key` — ключ. Тип должен совпадать с типом ключа в `map`. **Возвращаемое значение** @@ -566,12 +566,12 @@ mapContainsKeyLike(map, pattern) **Аргументы** -* `map` — карта. [Map](../data-types/map.md). -* `pattern` — строковый шаблон для сопоставления. +* `map` — Map. [Map](../data-types/map.md). +* `pattern` - Строковый шаблон для сопоставления. **Возвращаемое значение** -* `1`, если `map` содержит ключ, соответствующий заданному шаблону, `0` — в противном случае. +* `1`, если `map` содержит `key`, соответствующий заданному шаблону, `0` — если не содержит. **Пример** @@ -596,7 +596,7 @@ SELECT mapContainsKeyLike(a, 'a%') FROM tab; ## mapExtractKeyLike {#mapextractkeylike} -Для `map` со строковыми ключами и шаблоном `LIKE` функция возвращает `map`, содержащую элементы, ключи которых соответствуют этому шаблону. +Для map со строковыми ключами и шаблоном `LIKE` эта функция возвращает map с элементами, ключи которых соответствуют заданному шаблону. **Синтаксис** @@ -606,12 +606,12 @@ mapExtractKeyLike(map, pattern) **Аргументы** -* `map` — значение типа [Map](../data-types/map.md). -* `pattern` - строковый шаблон для сопоставления. +* `map` — Map. [Map](../data-types/map.md). +* `pattern` - Строковый шаблон для сопоставления. **Возвращаемое значение** -* Map, содержащий элементы, ключ которых соответствует указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустой Map. +* Map, содержащая элементы, ключи которых соответствуют указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустая Map. **Пример** @@ -636,7 +636,7 @@ SELECT mapExtractKeyLike(a, 'a%') FROM tab; ## mapValues {#mapvalues} -Возвращает значения заданной карты. +Возвращает значения указанной карты (Map). Эта функция может быть оптимизирована с помощью настройки [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns). При включённой настройке функция считывает только подстолбец [values](/sql-reference/data-types/map#reading-subcolumns-of-map) вместо всей карты. @@ -650,11 +650,11 @@ mapValues(map) **Аргументы** -* `map` — отображение. [Map](../data-types/map.md). +* `map` — `Map`. [Map](../data-types/map.md). **Возвращаемое значение** -* Массив, содержащий все значения отображения `map`. [Array](../data-types/array.md). +* Массив, содержащий все значения из `map`. [Array](../data-types/array.md). **Пример** @@ -679,7 +679,7 @@ SELECT mapValues(a) FROM tab; ## mapContainsValue {#mapcontainsvalue} -Возвращает, содержится ли заданное значение в указанном отображении (map). +Возвращает, содержится ли заданный ключ в указанной карте. **Синтаксис** @@ -691,8 +691,8 @@ mapContainsValue(map, value) **Аргументы** -* `map` — Значение типа Map. [Map](../data-types/map.md). -* `value` — Значение. Тип должен совпадать с типом значений в `map`. +* `map` — Map. [Map](../data-types/map.md). +* `value` — Значение. Тип должен совпадать с типом значения `map`. **Возвращаемое значение** @@ -725,17 +725,17 @@ SELECT mapContainsValue(a, '11') FROM tab; **Синтаксис** ```sql -mapСодержитЗначениеПоШаблону(map, pattern) +mapContainsValueLike(map, pattern) ``` **Аргументы** -* `map` — карта. См. [Map](../data-types/map.md). -* `pattern` - строковый шаблон для сопоставления. +* `map` — Map. [Map](../data-types/map.md). +* `pattern` - Строковый шаблон для сопоставления. **Возвращаемое значение** -* `1`, если `map` содержит `value`, удовлетворяющее указанному шаблону, иначе `0`. +* `1`, если `map` содержит `value`, соответствующее указанному шаблону, `0` в противном случае. **Пример** @@ -760,7 +760,7 @@ SELECT mapContainsValueLike(a, 'a%') FROM tab; ## mapExtractValueLike {#mapextractvaluelike} -Для карты со строковыми значениями и шаблоном LIKE эта функция возвращает карту с элементами, значения которых соответствуют шаблону. +Получив map со строковыми значениями и шаблоном LIKE, функция возвращает map с элементами, чьи значения соответствуют этому шаблону. **Синтаксис** @@ -770,12 +770,12 @@ mapExtractValueLike(map, pattern) **Аргументы** -* `map` — значение типа Map. См. [Map](../data-types/map.md). -* `pattern` - строковый шаблон для сопоставления. +* `map` — Map. [Map](../data-types/map.md). +* `pattern` - Строковый шаблон для сопоставления. **Возвращаемое значение** -* Map, содержащий элементы, значения которых соответствуют указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустой Map. +* Map, содержащая элементы, значения которых соответствуют указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустая Map. **Пример** @@ -800,7 +800,7 @@ SELECT mapExtractValueLike(a, 'a%') FROM tab; ## mapApply {#mapapply} -Применяет функцию к каждому элементу карты (Map). +Применяет функцию к каждому элементу карты. **Синтаксис** @@ -810,12 +810,12 @@ mapApply(func, map) **Аргументы** -* `func` — [лямбда-функция](/sql-reference/functions/overview#higher-order-functions). +* `func` — [lambda-функция](/sql-reference/functions/overview#higher-order-functions). * `map` — [Map](../data-types/map.md). **Возвращаемое значение** -* Возвращает отображение, полученное из исходного отображения путём применения `func(map1[i], ..., mapN[i])` к каждому элементу. +* Возвращает объект Map, полученный из исходного объекта Map путём применения `func(map1[i], ..., mapN[i])` к каждому элементу. **Пример** @@ -842,7 +842,7 @@ FROM ## mapFilter {#mapfilter} -Фильтрует отображение (map), применяя функцию к каждому его элементу. +Фильтрует map, применяя функцию к каждому элементу карты. **Синтаксис** @@ -852,12 +852,12 @@ mapFilter(func, map) **Аргументы** -* `func` — [лямбда-функция](/sql-reference/functions/overview#higher-order-functions). +* `func` - [лямбда-функция](/sql-reference/functions/overview#higher-order-functions). * `map` — [Map](../data-types/map.md). **Возвращаемое значение** -* Возвращает объект типа Map, который содержит только те элементы `map`, для которых `func(map1[i], ..., mapN[i])` возвращает значение, отличное от 0. +* Возвращает map, содержащий только те элементы из `map`, для которых `func(map1[i], ..., mapN[i])` возвращает значение, не равное 0. **Пример** @@ -917,8 +917,8 @@ SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map; ## mapConcat {#mapconcat} -Объединяет несколько отображений (map) на основе равенства их ключей. -Если элементы с одинаковым ключом присутствуют более чем в одном входном отображении, все элементы добавляются в результирующее отображение, но только первый из них доступен через оператор `[]`. +Объединяет несколько map на основе совпадения их ключей. +Если элементы с одинаковым ключом присутствуют более чем в одной входной map, все элементы добавляются в результирующую map, но только первый элемент доступен через оператор `[]`. **Синтаксис** @@ -932,7 +932,7 @@ mapConcat(maps) **Возвращаемое значение** -* Возвращает значение типа Map, полученное конкатенацией значений Map, переданных в качестве аргументов. +* Возвращает значение типа Map, полученное объединением карт, переданных в качестве аргументов. **Примеры** @@ -966,11 +966,11 @@ SELECT mapConcat(map('key1', 1, 'key2', 2), map('key1', 3)) AS map, map['key1']; ## mapExists([func,], map) {#mapexistsfunc-map} -Возвращает 1, если в `map` есть хотя бы одна пара ключ–значение, для которой `func(key, value)` возвращает значение, отличное от 0. В противном случае возвращает 0. +Возвращает 1, если в `map` есть хотя бы одна пара ключ-значение, для которой `func(key, value)` возвращает что-либо, отличное от 0. В противном случае возвращает 0. :::note -`mapExists` — это [функция высшего порядка](/sql-reference/functions/overview#higher-order-functions). -Ей можно передать лямбда-функцию в качестве первого аргумента. +`mapExists` — [функция высшего порядка](/sql-reference/functions/overview#higher-order-functions). +Вы можете передать ей лямбда-функцию в качестве первого аргумента. ::: **Пример** @@ -991,11 +991,11 @@ SELECT mapExists((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapAll([func,] map) {#mapallfunc-map} -Возвращает 1, если `func(key, value)` возвращает значение, отличное от 0, для всех пар ключ–значение в `map`. В противном случае возвращает 0. +Возвращает 1, если `func(key, value)` возвращает значение, отличное от 0, для всех пар «ключ–значение» в `map`. В противном случае возвращает 0. :::note -Учтите, что `mapAll` — это [функция высшего порядка](/sql-reference/functions/overview#higher-order-functions). -Ей можно передать лямбда-функцию в качестве первого аргумента. +Обратите внимание, что `mapAll` — это [функция высшего порядка](/sql-reference/functions/overview#higher-order-functions). +В качестве первого аргумента ей можно передать лямбда-функцию. ::: **Пример** @@ -1016,8 +1016,8 @@ SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapSort([func,], map) {#mapsortfunc-map} -Сортирует элементы `map` по возрастанию. -Если указана функция `func`, порядок сортировки определяется результатом применения этой функции к ключам и значениям `map`. +Сортирует элементы карты по возрастанию. +Если указана функция `func`, порядок сортировки определяется результатом применения `func` к ключам и значениям карты. **Примеры** @@ -1041,12 +1041,12 @@ SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; └──────────────────────────────┘ ``` -Для получения дополнительных сведений см. [справочник](/sql-reference/functions/array-functions#arraySort) по функции `arraySort`. +Подробнее см. [справочник](/sql-reference/functions/array-functions#arraySort) по функции `arraySort`. ## mapPartialSort {#mappartialsort} -Сортирует элементы `map` по возрастанию с дополнительным аргументом `limit`, позволяющим выполнить частичную сортировку. -Если указана функция `func`, порядок сортировки определяется результатом применения `func` к ключам и значениям `map`. +Сортирует элементы карты в порядке возрастания с дополнительным аргументом `limit`, который позволяет выполнять частичную сортировку. +Если указана функция `func`, порядок сортировки определяется результатом применения функции `func` к ключам и значениям карты. **Синтаксис** @@ -1056,13 +1056,13 @@ mapPartialSort([func,] limit, map) **Аргументы** -* `func` – необязательная функция, применяемая к ключам и значениям `map`. [Lambda function](/sql-reference/functions/overview#higher-order-functions). -* `limit` – элементы в диапазоне [1..limit] сортируются. [(U)Int](../data-types/int-uint.md). -* `map` – отображение (map) для сортировки. [Map](../data-types/map.md). +* `func` – необязательная функция, применяемая к ключам и значениям отображения. [Lambda function](/sql-reference/functions/overview#higher-order-functions). +* `limit` – количество элементов, которые будут отсортированы (элементы с позициями в диапазоне [1..limit]). [(U)Int](../data-types/int-uint.md). +* `map` – отображение для сортировки. [Map](../data-types/map.md). **Возвращаемое значение** -* Частично отсортированное отображение (map). [Map](../data-types/map.md). +* Частично отсортированное отображение. [Map](../data-types/map.md). **Пример** @@ -1079,7 +1079,7 @@ SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ## mapReverseSort([func,], map) {#mapreversesortfunc-map} Сортирует элементы map в порядке убывания. -Если указана функция `func`, порядок сортировки определяется результатом функции `func`, применяемой к ключам и значениям map. +Если указана функция `func`, порядок сортировки определяется результатом её применения к ключам и значениям map. **Примеры** @@ -1103,12 +1103,12 @@ SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; └──────────────────────────────┘ ``` -Подробнее см. описание функции [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort). +Подробнее см. функцию [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort). ## mapPartialReverseSort {#mappartialreversesort} -Сортирует элементы map в порядке убывания, при этом дополнительный аргумент `limit` позволяет выполнить частичную сортировку. -Если указана функция `func`, порядок сортировки определяется результатом применения функции `func` к ключам и значениям map. +Сортирует элементы map в порядке убывания с дополнительным аргументом `limit`, позволяющим выполнять частичную сортировку. +Если указана функция `func`, порядок сортировки определяется результатом функции `func`, применённой к ключам и значениям map. **Синтаксис** @@ -1118,13 +1118,13 @@ mapPartialReverseSort([func,] limit, map) **Аргументы** -* `func` – Необязательная функция, применяемая к ключам и значениям карты. [Lambda function](/sql-reference/functions/overview#higher-order-functions). -* `limit` – Сортируются элементы с индексами в диапазоне [1..limit]. [(U)Int](../data-types/int-uint.md). -* `map` – Карта для сортировки. [Map](../data-types/map.md). +* `func` – необязательная функция, применяемая к ключам и значениям map. [Lambda function](/sql-reference/functions/overview#higher-order-functions). +* `limit` – сортируются элементы с индексами в диапазоне [1..limit]. [(U)Int](../data-types/int-uint.md). +* `map` – объект типа Map для сортировки. [Map](../data-types/map.md). **Возвращаемое значение** -* Частично отсортированная карта. [Map](../data-types/map.md). +* Частично отсортированный объект типа Map. [Map](../data-types/map.md). **Пример** @@ -1139,11 +1139,1073 @@ SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ``` {/* - Внутреннее содержимое тегов ниже заменяется на этапе сборки фреймворка документации - материалами, сгенерированными из system.functions. Пожалуйста, не изменяйте и не удаляйте эти теги. + Содержимое тегов ниже заменяется во время сборки фреймворка документации + документацией, сгенерированной на основе system.functions. Пожалуйста, не изменяйте и не удаляйте эти теги. См.: https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } {/*AUTOGENERATED_START*/ } +## extractKeyValuePairs {#extractKeyValuePairs} + +Введено в: v + +Извлекает пары ключ-значение из произвольной строки. Строка не обязана строго соответствовать формату пар ключ-значение; + +она может содержать «шум» (например, файлы журналов / логи). Формат пар ключ-значение, который нужно интерпретировать, задаётся через аргументы функции. + +Пара ключ-значение состоит из ключа, за которым следует `key_value_delimiter`, и значения. Также поддерживаются ключи и значения, заключённые в кавычки. Пары ключ-значение должны быть разделены разделителями пар ключ-значение. + +**Синтаксис** + +```sql + extractKeyValuePairs(data, [key_value_delimiter], [pair_delimiter], [quoting_character]) +``` + +**Аргументы** + +* `data` - Строка, из которой извлекаются пары ключ-значение. [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). + * `key_value_delimiter` - Символ, используемый в качестве разделителя между ключом и значением. По умолчанию `:`. [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). + * `pair_delimiters` - Набор символов, используемых в качестве разделителей между парами. По умолчанию `\space`, `,` и `;`. [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). + * `quoting_character` - Символ, используемый в качестве символа кавычек. По умолчанию `"`. [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). + * `unexpected_quoting_character_strategy` - Стратегия обработки символов кавычек в неожиданных местах во время фаз `read_key` и `read_value`. Возможные значения: `invalid`, `accept` и `promote`. `invalid` отбросит ключ/значение и вернёт состояние `WAITING_KEY`. `accept` будет трактовать его как обычный символ. `promote` переведёт в состояние `READ_QUOTED_{KEY/VALUE}` и начнёт со следующего символа. Значение по умолчанию — `INVALID`. + +**Возвращаемые значения** + +* Извлечённые пары ключ-значение в Map(String, String). + +**Примеры** + +Запрос: + +**Простой пример** + +```sql + arthur :) select extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + Query id: f9e0ca6f-3178-4ee2-aa2c-a5517abb9cee + + ┌─kv──────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │ + └─────────────────────────────────────────────────────────────────────────┘ +``` + +**Одинарная кавычка как символ обрамления** + +```sql + arthur :) select extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + Идентификатор запроса: 0e22bf6b-9844-414a-99dc-32bf647abd5e + + ┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Примеры unexpected_quoting_character_strategy: + +unexpected_quoting_character_strategy=invalid + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +unexpected_quoting_character_strategy=accept + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'name"abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv─────────────────┐ + │ {'name"abc"':'5'} │ + └────────────────────┘ +``` + +unexpected_quoting_character_strategy=promote + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv───────────┐ + │ {'abc':'5'} │ + └──────────────┘ +``` + +**Escape-последовательности при отключённой поддержке экранирования** + +```sql + arthur :) select extractKeyValuePairs('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv + + Query id: e9fd26ee-b41f-4a11-b17f-25af6fd5d356 + + ┌─kv────────────────────┐ + │ {'age':'a\\x0A\\n\\0'} │ + └───────────────────────┘ +``` + +**Синтаксис** + +```sql +``` + +**Псевдонимы**: `str_to_map`, `mapFromString` + +**Аргументы** + +* Отсутствуют. + +**Возвращаемое значение** + +**Примеры** + +## extractKeyValuePairsWithEscaping {#extractKeyValuePairsWithEscaping} + +Введена в: v + +Та же функция, что и `extractKeyValuePairs`, но с поддержкой экранирования. + +Поддерживаемые последовательности экранирования: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` и `\0`. +Нестандартные последовательности экранирования возвращаются без изменений (включая обратный слеш), за исключением следующих: +`\\`, `'`, `"`, `backtick`, `/`, `=` или управляющие символы ASCII (`c <= 31`). + +Эта функция подходит для случаев, когда предварительное и последующее экранирование неприменимы. Например, рассмотрим следующую +входную строку: `a: "aaaa\"bbb"`. Ожидаемый результат: `a: aaaa\"bbbb`. + +* Предварительное экранирование: при предварительном экранировании результат будет: `a: "aaaa"bbb"`, а затем `extractKeyValuePairs` вернёт: `a: aaaa` + * Последующее экранирование: `extractKeyValuePairs` вернёт `a: aaaa\`, и последующее экранирование сохранит это без изменений. + +Начальные последовательности экранирования в ключах будут пропущены и будут считаться недопустимыми для значений. + +**Последовательности экранирования при включённой поддержке экранирования** + +```sql + arthur :) select extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv + + Query id: 44c114f0-5658-4c75-ab87-4574de3a1645 + + ┌─kv───────────────┐ + │ {'age':'a\n\n\0'} │ + └──────────────────┘ +``` + +**Синтаксис** + +```sql +``` + +**Аргументы** + +* Нет. + +**Возвращаемое значение** + +**Примеры** + +## map {#map} + +Добавлена в: v21.1 + +Создаёт значение типа `Map(key, value)` из пар ключ–значение. + +**Синтаксис** + +```sql +map(key1, value1[, key2, value2, ...]) +``` + +**Аргументы** + +* `key_n` — ключи элементов map. [`Any`](/sql-reference/data-types) +* `value_n` — значения элементов map. [`Any`](/sql-reference/data-types) + +**Возвращаемое значение** + +Возвращает map с парами ключ:значение. [`Map(Any, Any)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT map('key1', number, 'key2', number * 2) FROM numbers(3) +``` + +```response title=Response +{'key1':0,'key2':0} +{'key1':1,'key2':2} +{'key1':2,'key2':4} +``` + +## mapAdd {#mapAdd} + +Добавлена в версии: v20.7 + +Собирает все ключи и суммирует соответствующие значения. + +**Синтаксис** + +```sql +mapAdd(arg1[, arg2, ...]) +``` + +**Аргументы** + +* `arg1[, arg2, ...]` — значения типов `Map` или `Tuple` из двух массивов, в которых элементы первого массива представляют ключи, а второй массив содержит значения для каждого ключа. [`Map(K, V)`](/sql-reference/data-types/map) или [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**Возвращаемое значение** + +Возвращает значение типа `Map` или `Tuple`, где первый массив содержит отсортированные ключи, а второй массив — соответствующие им значения. [`Map(K, V)`](/sql-reference/data-types/map) или [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**Примеры** + +**Для типа Map** + +```sql title=Query +SELECT mapAdd(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:2} +``` + +**С использованием кортежа** + +```sql title=Query +SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) +``` + +```response title=Response +([1, 2], [2, 2]) +``` + +## mapAll {#mapAll} + +Введена в: v23.4 + +Проверяет, выполняется ли условие для всех пар ключ–значение в map. +`mapAll` — это функция высшего порядка. +В качестве первого аргумента ей можно передать лямбда-функцию. + +**Синтаксис** + +```sql +mapAll([func,] map) +``` + +**Аргументы** + +* `func` — лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — проверяемая структура Map. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает `1`, если все пары ключ-значение удовлетворяют условию, иначе `0`. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapAll((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +0 +``` + +## mapApply {#mapApply} + +Впервые представлена в: v22.3 + +Применяет функцию к каждому элементу map. + +**Синтаксис** + +```sql +mapApply(func, map) +``` + +**Аргументы** + +* `func` — лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — map, к которому применяется функция. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает новый map, полученный из исходного map посредством применения `func` к каждому элементу. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapApply((k, v) -> (k, v * 2), map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k1':2,'k2':4} +``` + +## mapConcat {#mapConcat} + +Появилась в версии: v23.4 + +Объединяет несколько значений типа `Map` по совпадающим ключам. +Если элементы с одинаковым ключом присутствуют более чем в одном входном значении `Map`, все элементы добавляются в результирующее значение `Map`, но через оператор [] доступен только первый. + +**Синтаксис** + +```sql +mapConcat(maps) +``` + +**Аргументы** + +* `maps` — произвольное количество отображений типа [`Map`](/sql-reference/data-types/map). + +**Возвращаемое значение** + +Возвращает `Map`, полученный объединением карт, переданных в качестве аргументов. [`Map`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapConcat(map('k1', 'v1'), map('k2', 'v2')) +``` + +```response title=Response +{'k1':'v1','k2':'v2'} +``` + +## mapContainsKey {#mapContainsKey} + +Введена в версии: v21.2 + +Определяет, содержится ли ключ в `map`. + +**Синтаксис** + +```sql +mapContains(map, key) +``` + +**Псевдонимы**: `mapContains` + +**Аргументы** + +* `map` — отображение, в котором выполняется поиск. [`Map(K, V)`](/sql-reference/data-types/map) +* `key` — ключ для поиска. Тип должен совпадать с типом ключа отображения. [`Any`](/sql-reference/data-types) + +**Возвращаемое значение** + +Возвращает 1, если отображение содержит ключ, и 0, если не содержит. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapContainsKey(map('k1', 'v1', 'k2', 'v2'), 'k1') +``` + +```response title=Response +1 +``` + +## mapContainsKeyLike {#mapContainsKeyLike} + +Добавлено в версии: v23.4 + +Проверяет, содержит ли `map` ключ, соответствующий заданному шаблону `LIKE`. + +**Синтаксис** + +```sql +mapContainsKeyLike(map, pattern) +``` + +**Аргументы** + +* `map` — Карта, в которой выполняется поиск. [`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — Шаблон для сопоставления с ключами. [`const String`](/sql-reference/data-types/string) + +**Возвращаемое значение** + +Возвращает `1`, если `map` содержит ключ, соответствующий `pattern`, иначе `0`. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsKeyLike(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└─────────────────────────────┘ +``` + +## mapContainsValue {#mapContainsValue} + +Впервые представлена в: v25.6 + +Определяет, содержится ли значение в отображении (map). + +**Синтаксис** + +```sql +mapContainsValue(map, value) +``` + +**Аргументы** + +* `map` — отображение, в котором выполняется поиск. [`Map(K, V)`](/sql-reference/data-types/map) +* `value` — значение, которое требуется найти. Тип должен совпадать с типом значений отображения. [`Any`](/sql-reference/data-types) + +**Возвращаемое значение** + +Возвращает `1`, если отображение содержит это значение, и `0` в противном случае. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapContainsValue(map('k1', 'v1', 'k2', 'v2'), 'v1') +``` + +```response title=Response +1 +``` + +## mapContainsValueLike {#mapContainsValueLike} + +Впервые появилась в версии: v25.5 + +Проверяет, содержит ли отображение (map) значение, соответствующее шаблону `LIKE`. + +**Синтаксис** + +```sql +mapContainsValueLike(map, pattern) +``` + +**Аргументы** + +* `map` — карта, в которой выполняется поиск. [`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — шаблон для сопоставления значений. [`const String`](/sql-reference/data-types/string) + +**Возвращаемое значение** + +Возвращает `1`, если `map` содержит значение, соответствующее `pattern`, иначе `0`. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsV⋯ke(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└──────────────────────────┘ +``` + +## mapExists {#mapExists} + +Введена в версии: v23.4 + +Проверяет, выполняется ли условие хотя бы для одной пары ключ–значение в типе данных `Map`. +`mapExists` — это функция высшего порядка. +В качестве первого аргумента ей можно передать лямбда-функцию. + +**Синтаксис** + +```sql +mapExists([func,] map) +``` + +**Аргументы** + +* `func` — необязательный параметр. Лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — отображение для проверки. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает `1`, если хотя бы одна пара ключ-значение удовлетворяет условию, иначе `0`. [`UInt8`](/sql-reference/data-types/int-uint) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapExists((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +1 +``` + +## mapExtractKeyLike {#mapExtractKeyLike} + +Добавлена в версии v23.4 + +Для карты со строковыми ключами и шаблоном `LIKE` эта функция возвращает карту с элементами, ключи которых соответствуют шаблону. + +**Синтаксис** + +```sql +mapExtractKeyLike(map, pattern) +``` + +**Аргументы** + +* `map` — Карта, из которой выполняется извлечение. [`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — Шаблон для сопоставления с ключами карты. [`const String`](/sql-reference/data-types/string) + +**Возвращаемое значение** + +Возвращает карту, содержащую элементы, ключ которых соответствует указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустая карта. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractKeyLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└────────────────────────────┘ +``` + +## mapExtractValueLike {#mapExtractValueLike} + +Впервые появилась в: v25.5 + +Для заданного `map` со строковыми значениями и шаблоном `LIKE` эта функция возвращает `map` с элементами, значения которых соответствуют шаблону. + +**Синтаксис** + +```sql +mapExtractValueLike(map, pattern) +``` + +**Аргументы** + +* `map` — карта, из которой выполняется извлечение. [`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — шаблон для сопоставления значений. [`const String`](/sql-reference/data-types/string) + +**Возвращаемое значение** + +Возвращает карту, содержащую элементы, значение которых соответствует указанному шаблону. Если ни один элемент не соответствует шаблону, возвращается пустая карта. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractValueLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└──────────────────────────────┘ +``` + +## mapFilter {#mapFilter} + +Появилась в версии: v22.3 + +Фильтрует `map`, применяя функцию к каждому её элементу. + +**Синтаксис** + +```sql +mapFilter(func, map) +``` + +**Аргументы** + +* `func` — лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — отображение (map), которое нужно отфильтровать. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает отображение (map), содержащее только те элементы, для которых `func` возвращает значение, отличное от `0`. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapFilter((k, v) -> v > 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k2':2} +``` + +## mapFromArrays {#mapFromArrays} + +Введена в версии: v23.3 + +Создаёт Map из массива или Map с ключами и массива или Map со значениями. +Функция является удобной альтернативой синтаксису `CAST([...], 'Map(key_type, value_type)')`. + +**Синтаксис** + +```sql +mapFromArrays(keys, values) +``` + +**Псевдонимы**: `MAP_FROM_ARRAYS` + +**Аргументы** + +* `keys` — Массив или `Map` с ключами, из которых создаётся отображение. [`Array`](/sql-reference/data-types/array) или [`Map`](/sql-reference/data-types/map) +* `values` — Массив или `Map` со значениями, из которых создаётся отображение. [`Array`](/sql-reference/data-types/array) или [`Map`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает отображение с ключами и значениями, построенными на основе массива ключей и массива/`Map` значений. [`Map`](/sql-reference/data-types/map) + +**Примеры** + +**Базовое использование** + +```sql title=Query +SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) +``` + +```response title=Response +{'a':1,'b':2,'c':3} +``` + +**Для входных данных типа map** + +```sql title=Query +SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) +``` + +```response title=Response +{1:('a', 1), 2:('b', 2), 3:('c', 3)} +``` + +## mapKeys {#mapKeys} + +Добавлено в версии: v21.2 + +Возвращает ключи указанного столбца типа Map. +Эта функция может быть оптимизирована путём включения настройки [`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns). +При включённой настройке функция читает только подстолбец `keys` вместо всего столбца Map. +Запрос `SELECT mapKeys(m) FROM table` преобразуется в `SELECT m.keys FROM table`. + +**Синтаксис** + +```sql +mapKeys(map) +``` + +**Аргументы** + +* `map` — отображение, из которого извлекаются ключи. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает массив, содержащий все ключи отображения. [`Array(T)`](/sql-reference/data-types/array) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapKeys(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['k1','k2'] +``` + +## mapPartialReverseSort {#mapPartialReverseSort} + +Добавлена в версии: v23.4 + +Сортирует элементы map по убыванию с дополнительным аргументом `limit`, который позволяет выполнять частичную сортировку. +Если указана функция `func`, порядок сортировки определяется результатом применения функции `func` к ключам и значениям map. + +**Синтаксис** + +```sql +mapPartialReverseSort([func,] limit, map) +``` + +**Аргументы** + +* `func` — Необязательный параметр. Лямбда-функция. [`Лямбда-функция`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — Сортируются элементы в диапазоне `[1..limit]`. [`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — Отображение (map), которое требуется отсортировать. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает частично отсортированное отображение (map) по убыванию. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapPartialSort {#mapPartialSort} + +Введена в версии v23.4 + +Сортирует элементы map по возрастанию с дополнительным аргументом limit, который позволяет выполнять частичную сортировку. +Если указана функция func, порядок сортировки определяется результатом применения функции func к ключам и значениям map. + +**Синтаксис** + +```sql +mapPartialSort([func,] limit, map) +``` + +**Аргументы** + +* `func` — Необязательный аргумент. Лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — Сортируются элементы в диапазоне `[1..limit]`. [`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — Отображение (map) для сортировки. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает частично отсортированное отображение. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapPopulateSeries {#mapPopulateSeries} + +Введена в: v20.10 + +Заполняет отсутствующие пары ключ-значение в `map` с целочисленными ключами. +Чтобы можно было продолжить последовательность ключей за пределы наибольшего значения, можно указать максимальный ключ. +Более точно, функция возвращает `map`, в котором ключи образуют последовательность от наименьшего до наибольшего ключа (или до аргумента `max`, если он указан) с шагом 1 и соответствующими значениями. +Если для ключа не задано значение, используется значение по умолчанию. +Если ключи повторяются, с ключом связывается только первое значение (в порядке появления). + +**Синтаксис** + +```sql +mapPopulateSeries(map[, max]) | mapPopulateSeries(keys, values[, max]) +``` + +**Аргументы** + +* `map` — Map с целочисленными ключами. [`Map((U)Int*, V)`](/sql-reference/data-types/map) +* `keys` — Массив ключей. [`Array(T)`](/sql-reference/data-types/array) +* `values` — Массив значений. [`Array(T)`](/sql-reference/data-types/array) +* `max` — Необязательный параметр. Максимальное значение ключа. [`Int8`](/sql-reference/data-types/int-uint) или [`Int16`](/sql-reference/data-types/int-uint) или [`Int32`](/sql-reference/data-types/int-uint) или [`Int64`](/sql-reference/data-types/int-uint) или [`Int128`](/sql-reference/data-types/int-uint) или [`Int256`](/sql-reference/data-types/int-uint) + +**Возвращаемое значение** + +Возвращает Map или кортеж из двух массивов, в котором первый содержит ключи в отсортированном порядке, а второй — значения для соответствующих ключей. [`Map(K, V)`](/sql-reference/data-types/map) или [`Tuple(Array(UInt*), Array(Any))`](/sql-reference/data-types/tuple) + +**Примеры** + +**С типом Map** + +```sql title=Query +SELECT mapPopulateSeries(map(1, 10, 5, 20), 6) +``` + +```response title=Response +{1:10, 2:0, 3:0, 4:0, 5:20, 6:0} +``` + +**С сопоставленными массивами** + +```sql title=Query +SELECT mapPopulateSeries([1, 2, 4], [11, 22, 44], 5) +``` + +```response title=Response +([1, 2, 3, 4, 5], [11, 22, 0, 44, 0]) +``` + +## mapReverseSort {#mapReverseSort} + +Введена в версии: v23.4 + +Сортирует элементы `map` в порядке убывания. +Если указана функция `func`, порядок сортировки определяется результатом применения функции `func` к ключам и значениям `map`. + +**Синтаксис** + +```sql +mapReverseSort([func,] map) +``` + +**Аргументы** + +* `func` — необязательная лямбда‑функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — отображение для сортировки. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает отображение, отсортированное по убыванию. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapReverseSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapSort {#mapSort} + +Впервые добавлена в: v23.4 + +Сортирует элементы map по возрастанию. +Если указана функция func, порядок сортировки определяется результатом применения функции func к ключам и значениям map. + +**Синтаксис** + +```sql +mapSort([func,] map) +``` + +**Аргументы** + +* `func` — Необязательная лямбда-функция. [`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — Map для сортировки. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает Map, отсортированный по возрастанию. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapSubtract {#mapSubtract} + +Появилась в версии v20.7 + +Собирает все ключи и вычитает соответствующие им значения. + +**Синтаксис** + +```sql +mapSubtract(arg1[, arg2, ...]) +``` + +**Аргументы** + +* `arg1[, arg2, ...]` — значения типа `Map` или кортежи из двух массивов, в которых элементы первого массива являются ключами, а второй массив содержит значения, соответствующие каждому ключу. [`Map(K, V)`](/sql-reference/data-types/map) или [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**Возвращаемое значение** + +Возвращает одно значение типа Map или кортеж, где первый массив содержит отсортированные ключи, а второй массив — соответствующие им значения. [`Map(K, V)`](/sql-reference/data-types/map) или [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**Примеры** + +**С типом Map** + +```sql title=Query +SELECT mapSubtract(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:0} +``` + +**С отображением с кортежами в качестве ключей** + +```sql title=Query +SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) +``` + +```response title=Response +([1, 2], [-1, 0]) +``` + +## mapUpdate {#mapUpdate} + +Впервые появилась в: v22.3 + +Для двух `map` возвращает первую `map`, в которой значения заменены на значения из второй `map` для соответствующих ключей. + +**Синтаксис** + +```sql +mapUpdate(map1, map2) +``` + +**Аргументы** + +* `map1` — Отображение, которое нужно обновить. [`Map(K, V)`](/sql-reference/data-types/map) +* `map2` — Отображение, используемое для обновления. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает `map1`, в котором значения для соответствующих ключей обновлены значениями из `map2`. [`Map(K, V)`](/sql-reference/data-types/map) + +**Примеры** + +**Базовое использование** + +```sql title=Query +SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) +``` + +```response title=Response +{'key3':0,'key1':10,'key2':10} +``` + +## mapValues {#mapValues} + +Введена в версии: v21.2 + +Возвращает значения заданной карты. +Эту функцию можно оптимизировать, включив настройку [`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns). +При включённой настройке функция читает только подстолбец `values` вместо всей карты. +Запрос `SELECT mapValues(m) FROM table` преобразуется в `SELECT m.values FROM table`. + +**Синтаксис** + +```sql +mapValues(map) +``` + +**Аргументы** + +* `map` — отображение, из которого извлекаются значения. [`Map(K, V)`](/sql-reference/data-types/map) + +**Возвращаемое значение** + +Возвращает массив, содержащий все значения из отображения. [`Array(T)`](/sql-reference/data-types/array) + +**Примеры** + +**Пример использования** + +```sql title=Query +SELECT mapValues(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['v1','v2'] +``` + {/*AUTOGENERATED_END*/ } diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md index f996c531469..c9dd3e9ec74 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md @@ -6,12 +6,8 @@ title: 'Функции преобразования типов данных' doc_type: 'reference' --- - - # Функции преобразования типов {#type-conversion-functions} - - ## Общие проблемы при преобразовании данных {#common-issues-with-data-conversion} ClickHouse в целом использует [то же поведение, что и программы на C++](https://en.cppreference.com/w/cpp/language/implicit_conversion). @@ -54,7 +50,6 @@ SETTINGS cast_keep_nullable = 1 └──────────────────┴─────────────────────┴──────────────────┘ ``` - ## Примечания к функциям `toString` {#to-string-functions} Семейство функций `toString` позволяет выполнять преобразования между числами, строками (но не фиксированными строками), датами и датами со временем. @@ -65,8 +60,6 @@ SETTINGS cast_keep_nullable = 1 - При преобразовании дат со временем в числа или наоборот дате со временем соответствует количество секунд, прошедших с начала эпохи Unix. - Функция `toString` с аргументом типа `DateTime` может принимать второй аргумент типа `String`, содержащий имя часового пояса, например: `Europe/Amsterdam`. В этом случае время форматируется в соответствии с указанным часовым поясом. - - ## Примечания о функциях `toDate`/`toDateTime` {#to-date-and-date-time-functions} Форматы даты и даты со временем для функций `toDate`/`toDateTime` определены следующим образом: @@ -116,7 +109,6 @@ LIMIT 10 См. также функцию [`toUnixTimestamp`](#toUnixTimestamp). - ## toBool {#tobool} Преобразует входное значение в значение типа [`Bool`](../data-types/boolean.md). В случае ошибки вызывает исключение. @@ -167,7 +159,6 @@ toBool('false'): false toBool('FALSE'): false ``` - ## toInt8 {#toint8} Преобразует входное значение в значение типа [`Int8`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -234,7 +225,6 @@ toInt8('-8'): -8 * [`toInt8OrNull`](#toInt8OrNull). * [`toInt8OrDefault`](#toint8ordefault). - ## toInt8OrZero {#toint8orzero} Как и функция [`toInt8`](#toint8), эта функция преобразует входное значение в значение типа [Int8](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -297,7 +287,6 @@ toInt8OrZero('abc'): 0 * [`toInt8OrNull`](#toInt8OrNull). * [`toInt8OrDefault`](#toint8ordefault). - ## toInt8OrNull {#toInt8OrNull} Подобно функции [`toInt8`](#toint8), эта функция преобразует входное значение к типу [Int8](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -360,7 +349,6 @@ toInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toInt8OrZero`](#toint8orzero). * [`toInt8OrDefault`](#toint8ordefault). - ## toInt8OrDefault {#toint8ordefault} Как и [`toInt8`](#toint8), эта функция преобразует входное значение в значение типа [Int8](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -428,7 +416,6 @@ toInt8OrDefault('abc', CAST('-1', 'Int8')): -1 * [`toInt8OrZero`](#toint8orzero). * [`toInt8OrNull`](#toInt8OrNull). - ## toInt16 {#toint16} Преобразует входное значение в значение типа [`Int16`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -495,7 +482,6 @@ toInt16('-16'): -16 * [`toInt16OrNull`](#toint16ornull). * [`toInt16OrDefault`](#toint16ordefault). - ## toInt16OrZero {#toint16orzero} Аналогично функции [`toInt16`](#toint16), эта функция преобразует входное значение в значение типа [Int16](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -558,7 +544,6 @@ toInt16OrZero('abc'): 0 * [`toInt16OrNull`](#toint16ornull). * [`toInt16OrDefault`](#toint16ordefault). - ## toInt16OrNull {#toint16ornull} Как и [`toInt16`](#toint16), эта функция преобразует входное значение в значение типа [Int16](../data-types/int-uint.md), но при ошибке возвращает `NULL`. @@ -621,7 +606,6 @@ toInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toInt16OrZero`](#toint16orzero). * [`toInt16OrDefault`](#toint16ordefault). - ## toInt16OrDefault {#toint16ordefault} Подобно функции [`toInt16`](#toint16), эта функция преобразует входное значение в значение типа [Int16](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -689,7 +673,6 @@ toInt16OrDefault('abc', CAST('-1', 'Int16')): -1 * [`toInt16OrZero`](#toint16orzero). * [`toInt16OrNull`](#toint16ornull). - ## toInt32 {#toint32} Преобразует входное значение в значение типа [`Int32`](../data-types/int-uint.md). В случае ошибки вызывает исключение. @@ -756,7 +739,6 @@ toInt32('-32'): -32 * [`toInt32OrNull`](#toint32ornull). * [`toInt32OrDefault`](#toint32ordefault). - ## toInt32OrZero {#toint32orzero} Подобно функции [`toInt32`](#toint32), эта функция преобразует входное значение к типу [Int32](../data-types/int-uint.md), но возвращает `0` в случае ошибки. @@ -819,7 +801,6 @@ toInt32OrZero('abc'): 0 * [`toInt32OrNull`](#toint32ornull). * [`toInt32OrDefault`](#toint32ordefault). - ## toInt32OrNull {#toint32ornull} Аналогично функции [`toInt32`](#toint32), эта функция преобразует входное значение в значение типа [Int32](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -882,7 +863,6 @@ toInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrDefault`](#toint32ordefault). - ## toInt32OrDefault {#toint32ordefault} Подобно функции [`toInt32`](#toint32), эта функция преобразует входное значение в значение типа [Int32](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -950,7 +930,6 @@ toInt32OrDefault('abc', CAST('-1', 'Int32')): -1 * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrNull`](#toint32ornull). - ## toInt64 {#toint64} Преобразует входное значение в значение типа [`Int64`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -1017,7 +996,6 @@ toInt64('-64'): -64 * [`toInt64OrNull`](#toint64ornull). * [`toInt64OrDefault`](#toint64ordefault). - ## toInt64OrZero {#toint64orzero} Как и [`toInt64`](#toint64), эта функция преобразует входное значение в значение типа [Int64](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -1080,7 +1058,6 @@ toInt64OrZero('abc'): 0 * [`toInt64OrNull`](#toint64ornull). * [`toInt64OrDefault`](#toint64ordefault). - ## toInt64OrNull {#toint64ornull} Аналогично функции [`toInt64`](#toint64), эта функция преобразует входное значение в значение типа [Int64](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -1143,7 +1120,6 @@ toInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toInt64OrZero`](#toint64orzero). * [`toInt64OrDefault`](#toint64ordefault). - ## toInt64OrDefault {#toint64ordefault} Как и [`toInt64`](#toint64), эта функция преобразует входное значение в значение типа [Int64](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -1211,7 +1187,6 @@ toInt64OrDefault('abc', CAST('-1', 'Int64')): -1 * [`toInt64OrZero`](#toint64orzero). * [`toInt64OrNull`](#toint64ornull). - ## toInt128 {#toint128} Преобразует входное значение в значение типа [`Int128`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -1277,7 +1252,6 @@ toInt128('-128'): -128 * [`toInt128OrNull`](#toint128ornull). * [`toInt128OrDefault`](#toint128ordefault). - ## toInt128OrZero {#toint128orzero} Подобно функции [`toInt128`](#toint128), преобразует входное значение в значение типа [Int128](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -1340,7 +1314,6 @@ toInt128OrZero('abc'): 0 * [`toInt128OrNull`](#toint128ornull). * [`toInt128OrDefault`](#toint128ordefault). - ## toInt128OrNull {#toint128ornull} Как и [`toInt128`](#toint128), эта функция преобразует входное значение в значение типа [Int128](../data-types/int-uint.md), но возвращает `NULL` в случае ошибки. @@ -1403,7 +1376,6 @@ toInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toInt128OrZero`](#toint128orzero). * [`toInt128OrDefault`](#toint128ordefault). - ## toInt128OrDefault {#toint128ordefault} Как и [`toInt128`](#toint128), эта функция преобразует входное значение в значение типа [Int128](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -1472,7 +1444,6 @@ toInt128OrDefault('abc', CAST('-1', 'Int128')): -1 * [`toInt128OrZero`](#toint128orzero). * [`toInt128OrNull`](#toint128ornull). - ## toInt256 {#toint256} Преобразует входное значение в значение типа [`Int256`](../data-types/int-uint.md). В случае ошибки генерирует исключение. @@ -1538,7 +1509,6 @@ toInt256('-256'): -256 * [`toInt256OrNull`](#toint256ornull). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrZero {#toint256orzero} Как и [`toInt256`](#toint256), эта функция преобразует входное значение в значение типа [Int256](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -1601,7 +1571,6 @@ toInt256OrZero('abc'): 0 * [`toInt256OrNull`](#toint256ornull). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrNull {#toint256ornull} Подобно функции [`toInt256`](#toint256), эта функция преобразует входное значение в значение типа [Int256](../data-types/int-uint.md), но возвращает `NULL` в случае ошибки. @@ -1664,7 +1633,6 @@ toInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toInt256OrZero`](#toint256orzero). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrDefault {#toint256ordefault} Как и [`toInt256`](#toint256), эта функция преобразует входное значение в значение типа [Int256](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -1732,7 +1700,6 @@ toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 * [`toInt256OrZero`](#toint256orzero). * [`toInt256OrNull`](#toint256ornull). - ## toUInt8 {#touint8} Преобразует входное значение в значение типа [`UInt8`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -1799,7 +1766,6 @@ toUInt8('8'): 8 * [`toUInt8OrNull`](#touint8ornull). * [`toUInt8OrDefault`](#touint8ordefault). - ## toUInt8OrZero {#touint8orzero} Как и [`toUInt8`](#touint8), эта функция преобразует входное значение в значение типа [UInt8](../data-types/int-uint.md), но при ошибке возвращает `0`. @@ -1862,7 +1828,6 @@ toUInt8OrZero('abc'): 0 * [`toUInt8OrNull`](#touint8ornull). * [`toUInt8OrDefault`](#touint8ordefault). - ## toUInt8OrNull {#touint8ornull} Подобно функции [`toUInt8`](#touint8), эта функция преобразует входное значение в тип [UInt8](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -1925,7 +1890,6 @@ toUInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt8OrZero`](#touint8orzero). * [`toUInt8OrDefault`](#touint8ordefault). - ## toUInt8OrDefault {#touint8ordefault} Как и функция [`toUInt8`](#touint8), эта функция преобразует входное значение в значение типа [UInt8](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -1993,7 +1957,6 @@ toUInt8OrDefault('abc', CAST('0', 'UInt8')): 0 * [`toUInt8OrZero`](#touint8orzero). * [`toUInt8OrNull`](#touint8ornull). - ## toUInt16 {#touint16} Преобразует входное значение в значение типа [`UInt16`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -2060,7 +2023,6 @@ toUInt16('16'): 16 * [`toUInt16OrNull`](#touint16ornull). * [`toUInt16OrDefault`](#touint16ordefault). - ## toUInt16OrZero {#touint16orzero} Аналогично функции [`toUInt16`](#touint16), эта функция преобразует входное значение в значение типа [UInt16](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -2123,7 +2085,6 @@ toUInt16OrZero('abc'): 0 * [`toUInt16OrNull`](#touint16ornull). * [`toUInt16OrDefault`](#touint16ordefault). - ## toUInt16OrNull {#touint16ornull} Аналогично функции [`toUInt16`](#touint16), эта функция преобразует входное значение в значение типа [UInt16](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -2186,7 +2147,6 @@ toUInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt16OrZero`](#touint16orzero). * [`toUInt16OrDefault`](#touint16ordefault). - ## toUInt16OrDefault {#touint16ordefault} Подобно [`toUInt16`](#touint16), эта функция преобразует входное значение в значение типа [UInt16](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -2254,7 +2214,6 @@ toUInt16OrDefault('abc', CAST('0', 'UInt16')): 0 * [`toUInt16OrZero`](#touint16orzero). * [`toUInt16OrNull`](#touint16ornull). - ## toUInt32 {#touint32} Преобразует входное значение в значение типа [`UInt32`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -2321,7 +2280,6 @@ toUInt32('32'): 32 * [`toUInt32OrNull`](#touint32ornull). * [`toUInt32OrDefault`](#touint32ordefault). - ## toUInt32OrZero {#touint32orzero} Аналогично функции [`toUInt32`](#touint32), преобразует входное значение в тип [UInt32](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -2385,7 +2343,6 @@ toUInt32OrZero('abc'): 0 * [`toUInt32OrNull`](#touint32ornull). * [`toUInt32OrDefault`](#touint32ordefault). - ## toUInt32OrNull {#touint32ornull} Как и [`toUInt32`](#touint32), эта функция преобразует входное значение в значение типа [UInt32](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -2449,7 +2406,6 @@ toUInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt32OrZero`](#touint32orzero). * [`toUInt32OrDefault`](#touint32ordefault). - ## toUInt32OrDefault {#touint32ordefault} Как и функция [`toUInt32`](#touint32), эта функция преобразует входное значение в значение типа [UInt32](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -2517,7 +2473,6 @@ toUInt32OrDefault('abc', CAST('0', 'UInt32')): 0 * [`toUInt32OrZero`](#touint32orzero). * [`toUInt32OrNull`](#touint32ornull). - ## toUInt64 {#touint64} Преобразует входное значение в значение типа [`UInt64`](../data-types/int-uint.md). В случае ошибки вызывает исключение. @@ -2584,7 +2539,6 @@ toUInt64('64'): 64 * [`toUInt64OrNull`](#touint64ornull). * [`toUInt64OrDefault`](#touint64ordefault). - ## toUInt64OrZero {#touint64orzero} Как и [`toUInt64`](#touint64), эта функция преобразует входное значение в значение типа [UInt64](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -2647,7 +2601,6 @@ toUInt64OrZero('abc'): 0 * [`toUInt64OrNull`](#touint64ornull). * [`toUInt64OrDefault`](#touint64ordefault). - ## toUInt64OrNull {#touint64ornull} Как и функция [`toUInt64`](#touint64), эта функция преобразует входное значение в значение типа [UInt64](../data-types/int-uint.md), но в случае ошибки возвращает `NULL`. @@ -2710,7 +2663,6 @@ toUInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt64OrZero`](#touint64orzero). * [`toUInt64OrDefault`](#touint64ordefault). - ## toUInt64OrDefault {#touint64ordefault} Как и [`toUInt64`](#touint64), эта функция преобразует входное значение к типу [UInt64](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -2778,7 +2730,6 @@ toUInt64OrDefault('abc', CAST('0', 'UInt64')): 0 * [`toUInt64OrZero`](#touint64orzero). * [`toUInt64OrNull`](#touint64ornull). - ## toUInt128 {#touint128} Преобразует входное значение в значение типа [`UInt128`](../data-types/int-uint.md). В случае ошибки вызывает исключение. @@ -2844,7 +2795,6 @@ toUInt128('128'): 128 * [`toUInt128OrNull`](#touint128ornull). * [`toUInt128OrDefault`](#touint128ordefault). - ## toUInt128OrZero {#touint128orzero} Подобно функции [`toUInt128`](#touint128), эта функция преобразует входное значение к типу [UInt128](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -2907,7 +2857,6 @@ toUInt128OrZero('abc'): 0 * [`toUInt128OrNull`](#touint128ornull). * [`toUInt128OrDefault`](#touint128ordefault). - ## toUInt128OrNull {#touint128ornull} Аналогично функции [`toUInt128`](#touint128), эта функция преобразует входное значение в значение типа [UInt128](../data-types/int-uint.md), но при возникновении ошибки возвращает `NULL`. @@ -2970,7 +2919,6 @@ toUInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt128OrZero`](#touint128orzero). * [`toUInt128OrDefault`](#touint128ordefault). - ## toUInt128OrDefault {#touint128ordefault} Аналогично функции [`toUInt128`](#toint128), эта функция преобразует входное значение в значение типа [UInt128](../data-types/int-uint.md), но при ошибке возвращает значение по умолчанию. @@ -3039,7 +2987,6 @@ toUInt128OrDefault('abc', CAST('0', 'UInt128')): 0 * [`toUInt128OrZero`](#touint128orzero). * [`toUInt128OrNull`](#touint128ornull). - ## toUInt256 {#touint256} Преобразует входное значение в значение типа [`UInt256`](../data-types/int-uint.md). В случае ошибки выбрасывает исключение. @@ -3105,7 +3052,6 @@ toUInt256('256'): 256 * [`toUInt256OrNull`](#touint256ornull). * [`toUInt256OrDefault`](#touint256ordefault). - ## toUInt256OrZero {#touint256orzero} Как и функция [`toUInt256`](#touint256), эта функция преобразует входное значение в значение типа [UInt256](../data-types/int-uint.md), но в случае ошибки возвращает `0`. @@ -3168,7 +3114,6 @@ toUInt256OrZero('abc'): 0 * [`toUInt256OrNull`](#touint256ornull). * [`toUInt256OrDefault`](#touint256ordefault). - ## toUInt256OrNull {#touint256ornull} Как и функция [`toUInt256`](#touint256), эта функция преобразует входное значение в значение типа [UInt256](../data-types/int-uint.md), но при ошибке возвращает `NULL`. @@ -3231,7 +3176,6 @@ toUInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt256OrZero`](#touint256orzero). * [`toUInt256OrDefault`](#touint256ordefault). - ## toUInt256OrDefault {#touint256ordefault} Как и [`toUInt256`](#touint256), эта функция преобразует входное значение в значение типа [UInt256](../data-types/int-uint.md), но в случае ошибки возвращает значение по умолчанию. @@ -3299,7 +3243,6 @@ toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0 * [`toUInt256OrZero`](#touint256orzero). * [`toUInt256OrNull`](#touint256ornull). - ## toFloat32 {#tofloat32} Преобразует входное значение в значение типа [`Float32`](../data-types/float.md). В случае ошибки выбрасывает исключение. @@ -3357,7 +3300,6 @@ toFloat32('NaN'): nan * [`toFloat32OrNull`](#tofloat32ornull). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrZero {#tofloat32orzero} Как и функция [`toFloat32`](#tofloat32), эта функция преобразует входное значение к значению типа [Float32](../data-types/float.md), но в случае ошибки возвращает `0`. @@ -3410,7 +3352,6 @@ toFloat32OrZero('abc'): 0 * [`toFloat32OrNull`](#tofloat32ornull). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrNull {#tofloat32ornull} Как и [`toFloat32`](#tofloat32), эта функция преобразует входное значение в тип [Float32](../data-types/float.md), но возвращает `NULL` в случае ошибки. @@ -3463,7 +3404,6 @@ toFloat32OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrDefault {#tofloat32ordefault} Как и [`toFloat32`](#tofloat32), эта функция преобразует входное значение в значение типа [Float32](../data-types/float.md), но в случае ошибки возвращает значение по умолчанию. @@ -3521,7 +3461,6 @@ toFloat32OrDefault('abc', CAST('0', 'Float32')): 0 * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrNull`](#tofloat32ornull). - ## toFloat64 {#tofloat64} Преобразует входное значение в значение типа [`Float64`](../data-types/float.md). В случае ошибки выбрасывает исключение. @@ -3579,7 +3518,6 @@ toFloat64('NaN'): nan * [`toFloat64OrNull`](#tofloat64ornull). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrZero {#tofloat64orzero} Аналогично функции [`toFloat64`](#tofloat64), эта функция преобразует входное значение в значение типа [Float64](../data-types/float.md), но в случае ошибки возвращает `0`. @@ -3632,7 +3570,6 @@ toFloat64OrZero('abc'): 0 * [`toFloat64OrNull`](#tofloat64ornull). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrNull {#tofloat64ornull} Подобно [`toFloat64`](#tofloat64), эта функция преобразует входное значение в значение типа [Float64](../data-types/float.md), но в случае ошибки возвращает `NULL`. @@ -3685,7 +3622,6 @@ toFloat64OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrDefault {#tofloat64ordefault} Как и функция [`toFloat64`](#tofloat64), эта функция преобразует входное значение в значение типа [Float64](../data-types/float.md), но в случае ошибки возвращает значение по умолчанию. @@ -3743,7 +3679,6 @@ toFloat64OrDefault('abc', CAST('0', 'Float64')): 0 * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrNull`](#tofloat64ornull). - ## toBFloat16 {#tobfloat16} Преобразует входное значение в значение типа [`BFloat16`](/sql-reference/data-types/float#bfloat16). @@ -3791,7 +3726,6 @@ SELECT toBFloat16('42.7'); * [`toBFloat16OrZero`](#tobfloat16orzero). * [`toBFloat16OrNull`](#tobfloat16ornull). - ## toBFloat16OrZero {#tobfloat16orzero} Преобразует строковое входное значение в значение типа [`BFloat16`](/sql-reference/data-types/float#bfloat16). @@ -3845,7 +3779,6 @@ SELECT toBFloat16OrZero('12.3456789'); * [`toBFloat16`](#tobfloat16). * [`toBFloat16OrNull`](#tobfloat16ornull). - ## toBFloat16OrNull {#tobfloat16ornull} Преобразует входное строковое значение в значение типа [`BFloat16`](/sql-reference/data-types/float#bfloat16), @@ -3899,7 +3832,6 @@ SELECT toBFloat16OrNull('12.3456789'); * [`toBFloat16`](#tobfloat16). * [`toBFloat16OrZero`](#tobfloat16orzero). - ## toDate {#todate} Приводит аргумент к типу данных [Date](../data-types/date.md). @@ -4003,7 +3935,6 @@ SELECT toDate(10000000000.) Функцию `toDate` можно также записать другими способами: - ```sql SELECT now() AS time, @@ -4018,7 +3949,6 @@ SELECT └─────────────────────┴───────────────┴─────────────┴─────────────────────┘ ``` - ## toDateOrZero {#todateorzero} То же, что и [toDate](#todate), но при получении некорректного аргумента возвращает нижнюю границу типа [Date](../data-types/date.md). Поддерживается только аргумент типа [String](../data-types/string.md). @@ -4039,7 +3969,6 @@ SELECT toDateOrZero('2022-12-30'), toDateOrZero(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrNull {#todateornull} То же, что и [toDate](#todate), но возвращает `NULL`, если передан некорректный аргумент. Поддерживается только аргумент типа [String](../data-types/string.md). @@ -4060,7 +3989,6 @@ SELECT toDateOrNull('2022-12-30'), toDateOrNull(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrDefault {#todateordefault} Аналог функции [toDate](#todate), но при неуспешном преобразовании возвращает значение по умолчанию — либо второй аргумент (если он указан), либо нижнюю границу типа [Date](../data-types/date.md). @@ -4087,7 +4015,6 @@ SELECT toDateOrDefault('2022-12-30'), toDateOrDefault('', '2023-01-01'::Date); └───────────────────────────────┴─────────────────────────────────────────────────┘ ``` - ## toDateTime {#todatetime} Преобразует входное значение в значение типа [DateTime](../data-types/datetime.md). @@ -4129,7 +4056,6 @@ SELECT toDateTime('2022-12-30 13:44:17'), toDateTime(1685457500, 'UTC'); └───────────────────────────────────┴───────────────────────────────┘ ``` - ## toDateTimeOrZero {#todatetimeorzero} То же, что и [toDateTime](#todatetime), но возвращает нижнюю границу типа [DateTime](../data-types/datetime.md), если передан недопустимый аргумент. Поддерживается только аргумент типа [String](../data-types/string.md). @@ -4150,7 +4076,6 @@ SELECT toDateTimeOrZero('2022-12-30 13:44:17'), toDateTimeOrZero(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrNull {#todatetimeornull} Такая же функция, как [toDateTime](#todatetime), но возвращает `NULL`, если передан некорректный аргумент. Поддерживается только аргумент типа [String](../data-types/string.md). @@ -4171,7 +4096,6 @@ SELECT toDateTimeOrNull('2022-12-30 13:44:17'), toDateTimeOrNull(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrDefault {#todatetimeordefault} Аналогично [toDateTime](#todatetime), но в случае неуспешного преобразования возвращает значение по умолчанию — либо третий аргумент (если он указан), либо нижнюю границу типа [DateTime](../data-types/datetime.md). @@ -4198,7 +4122,6 @@ SELECT toDateTimeOrDefault('2022-12-30 13:44:17'), toDateTimeOrDefault('', 'UTC' └────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────┘ ``` - ## toDate32 {#todate32} Преобразует аргумент в тип данных [Date32](../data-types/date32.md). Если значение выходит за допустимый диапазон, `toDate32` возвращает граничные значения, поддерживаемые [Date32](../data-types/date32.md). Если аргумент имеет тип [Date](../data-types/date.md), учитываются его границы диапазона. @@ -4255,7 +4178,6 @@ SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value); └────────────┴────────────────────────────────────────────┘ ``` - ## toDate32OrZero {#todate32orzero} То же, что и [toDate32](#todate32), но возвращает минимальное значение типа [Date32](../data-types/date32.md), если передан некорректный аргумент. @@ -4276,7 +4198,6 @@ SELECT toDate32OrZero('1899-01-01'), toDate32OrZero(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrNull {#todate32ornull} То же, что и [toDate32](#todate32), но возвращает `NULL`, если получен некорректный аргумент. @@ -4297,7 +4218,6 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrDefault {#todate32ordefault} Преобразует аргумент к типу данных [Date32](../data-types/date32.md). Если значение выходит за допустимый диапазон, `toDate32OrDefault` возвращает нижнюю границу диапазона значений, поддерживаемого [Date32](../data-types/date32.md). Если аргумент имеет тип [Date](../data-types/date.md), его границы также учитываются. Возвращает значение по умолчанию, если получен некорректный аргумент. @@ -4320,7 +4240,6 @@ SELECT └─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` - ## toDateTime64 {#todatetime64} Преобразует входной аргумент в значение типа [DateTime64](../data-types/datetime64.md). @@ -4391,7 +4310,6 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN └─────────────────────────┴─────────────────────────────────────────────────────────────────────┘ ``` - ## toDateTime64OrZero {#todatetime64orzero} Подобно функции [toDateTime64](#todatetime64), эта функция преобразует входное значение в значение типа [DateTime64](../data-types/datetime64.md), но при некорректном аргументе возвращает минимальное значение [DateTime64](../data-types/datetime64.md). @@ -4434,7 +4352,6 @@ SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg * [toDateTime64OrNull](#todatetime64ornull). * [toDateTime64OrDefault](#todatetime64ordefault). - ## toDateTime64OrNull {#todatetime64ornull} Подобно функции [toDateTime64](#todatetime64), эта функция преобразует входное значение в значение типа [DateTime64](../data-types/datetime64.md), но возвращает `NULL`, если передан некорректный аргумент. @@ -4479,7 +4396,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero). * [toDateTime64OrDefault](#todatetime64ordefault). - ## toDateTime64OrDefault {#todatetime64ordefault} Как и [toDateTime64](#todatetime64), эта функция преобразует входное значение в значение типа [DateTime64](../data-types/datetime64.md), @@ -4527,7 +4443,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero). * [toDateTime64OrNull](#todatetime64ornull). - ## toDecimal32 {#todecimal32} Преобразует входное значение в значение типа [`Decimal(9, S)`](../data-types/decimal.md) с масштабом `S`. В случае ошибки выбрасывает исключение. @@ -4600,7 +4515,6 @@ type_c: Decimal(9, 3) * [`toDecimal32OrNull`](#todecimal32ornull). * [`toDecimal32OrDefault`](#todecimal32ordefault). - ## toDecimal32OrZero {#todecimal32orzero} Как и [`toDecimal32`](#todecimal32), эта функция преобразует входное значение в значение типа [Decimal(9, S)](../data-types/decimal.md), но в случае ошибки возвращает `0`. @@ -4666,7 +4580,6 @@ toTypeName(b): Decimal(9, 5) * [`toDecimal32OrNull`](#todecimal32ornull). * [`toDecimal32OrDefault`](#todecimal32ordefault). - ## toDecimal32OrNull {#todecimal32ornull} Как и [`toDecimal32`](#todecimal32), эта функция преобразует входное значение в значение типа [Nullable(Decimal(9, S))](../data-types/decimal.md), но в случае ошибки возвращает `0`. @@ -4732,7 +4645,6 @@ toTypeName(b): Nullable(Decimal(9, 5)) * [`toDecimal32OrZero`](#todecimal32orzero). * [`toDecimal32OrDefault`](#todecimal32ordefault). - ## toDecimal32OrDefault {#todecimal32ordefault} Подобно [`toDecimal32`](#todecimal32), эта функция преобразует входное значение в значение типа [Decimal(9, S)](../data-types/decimal.md), но в случае ошибки возвращает значение по умолчанию. @@ -4805,7 +4717,6 @@ toTypeName(b): Decimal(9, 0) * [`toDecimal32OrZero`](#todecimal32orzero). * [`toDecimal32OrNull`](#todecimal32ornull). - ## toDecimal64 {#todecimal64} Преобразует входное значение в значение типа [`Decimal(18, S)`](../data-types/decimal.md) с масштабом `S`. В случае ошибки выбрасывает исключение. @@ -4878,7 +4789,6 @@ type_c: Decimal(18, 3) * [`toDecimal64OrNull`](#todecimal64ornull). * [`toDecimal64OrDefault`](#todecimal64ordefault). - ## toDecimal64OrZero {#todecimal64orzero} Как и [`toDecimal64`](#todecimal64), эта функция преобразует входное значение в значение типа [Decimal(18, S)](../data-types/decimal.md), но при ошибке возвращает `0`. @@ -4944,7 +4854,6 @@ toTypeName(b): Decimal(18, 18) * [`toDecimal64OrNull`](#todecimal64ornull). * [`toDecimal64OrDefault`](#todecimal64ordefault). - ## toDecimal64OrNull {#todecimal64ornull} Как и функция [`toDecimal64`](#todecimal64), эта функция преобразует входное значение в значение типа [Nullable(Decimal(18, S))](../data-types/decimal.md), но в случае ошибки возвращает `0`. @@ -5010,7 +4919,6 @@ toTypeName(b): Nullable(Decimal(18, 18)) * [`toDecimal64OrZero`](#todecimal64orzero). * [`toDecimal64OrDefault`](#todecimal64ordefault). - ## toDecimal64OrDefault {#todecimal64ordefault} Как и [`toDecimal64`](#todecimal64), эта функция преобразует входное значение в значение типа [Decimal(18, S)](../data-types/decimal.md), но при ошибке возвращает значение по умолчанию. @@ -5083,7 +4991,6 @@ toTypeName(b): Decimal(18, 0) * [`toDecimal64OrZero`](#todecimal64orzero). * [`toDecimal64OrNull`](#todecimal64ornull). - ## toDecimal128 {#todecimal128} Преобразует входное значение в значение типа [`Decimal(38, S)`](../data-types/decimal.md) с масштабом `S`. В случае ошибки выбрасывает исключение. @@ -5156,7 +5063,6 @@ type_c: Decimal(38, 3) * [`toDecimal128OrNull`](#todecimal128ornull). * [`toDecimal128OrDefault`](#todecimal128ordefault). - ## toDecimal128OrZero {#todecimal128orzero} Аналогично функции [`toDecimal128`](#todecimal128), эта функция преобразует входное значение в тип [Decimal(38, S)](../data-types/decimal.md), но в случае ошибки возвращает `0`. @@ -5222,7 +5128,6 @@ toTypeName(b): Decimal(38, 38) * [`toDecimal128OrNull`](#todecimal128ornull). * [`toDecimal128OrDefault`](#todecimal128ordefault). - ## toDecimal128OrNull {#todecimal128ornull} Аналогично функции [`toDecimal128`](#todecimal128), эта функция преобразует входное значение в значение типа [Nullable(Decimal(38, S))](../data-types/decimal.md), однако в случае ошибки возвращает `0`. @@ -5288,7 +5193,6 @@ toTypeName(b): Nullable(Decimal(38, 38)) * [`toDecimal128OrZero`](#todecimal128orzero). * [`toDecimal128OrDefault`](#todecimal128ordefault). - ## toDecimal128OrDefault {#todecimal128ordefault} Как и функция [`toDecimal128`](#todecimal128), эта функция преобразует входное значение в тип [Decimal(38, S)](../data-types/decimal.md), но в случае ошибки возвращает значение по умолчанию. @@ -5361,7 +5265,6 @@ toTypeName(b): Decimal(38, 0) * [`toDecimal128OrZero`](#todecimal128orzero). * [`toDecimal128OrNull`](#todecimal128ornull). - ## toDecimal256 {#todecimal256} Преобразует входное значение в значение типа [`Decimal(76, S)`](../data-types/decimal.md) с масштабом `S`. В случае ошибки выбрасывает исключение. @@ -5434,7 +5337,6 @@ type_c: Decimal(76, 3) * [`toDecimal256OrNull`](#todecimal256ornull). * [`toDecimal256OrDefault`](#todecimal256ordefault). - ## toDecimal256OrZero {#todecimal256orzero} Аналогично [`toDecimal256`](#todecimal256), эта функция преобразует входное значение в значение типа [Decimal(76, S)](../data-types/decimal.md), но в случае ошибки возвращает `0`. @@ -5500,7 +5402,6 @@ toTypeName(b): Decimal(76, 76) * [`toDecimal256OrNull`](#todecimal256ornull). * [`toDecimal256OrDefault`](#todecimal256ordefault). - ## toDecimal256OrNull {#todecimal256ornull} Как и [`toDecimal256`](#todecimal256), эта функция преобразует входное значение в значение типа [Nullable(Decimal(76, S))](../data-types/decimal.md), но при ошибке возвращает `0`. @@ -5566,7 +5467,6 @@ toTypeName(b): Nullable(Decimal(76, 76)) * [`toDecimal256OrZero`](#todecimal256orzero). * [`toDecimal256OrDefault`](#todecimal256ordefault). - ## toDecimal256OrDefault {#todecimal256ordefault} Как и функция [`toDecimal256`](#todecimal256), эта функция преобразует входное значение в значение типа [Decimal(76, S)](../data-types/decimal.md), но в случае ошибки возвращает значение по умолчанию. @@ -5639,7 +5539,6 @@ toTypeName(b): Decimal(76, 0) * [`toDecimal256OrZero`](#todecimal256orzero). * [`toDecimal256OrNull`](#todecimal256ornull). - ## toString {#tostring} Преобразует значения в их строковое представление. @@ -5684,7 +5583,6 @@ LIMIT 10; └─────────────────────┴───────────────────┴─────────────────────┘ ``` - ## toFixedString {#tofixedstring} Преобразует аргумент типа [String](../data-types/string.md) в тип [FixedString(N)](../data-types/fixedstring.md) (строку фиксированной длины N). @@ -5721,7 +5619,6 @@ SELECT toFixedString('foo', 8) AS s; └───────────────┘ ``` - ## toStringCutToZero {#tostringcuttozero} Принимает аргумент типа String или FixedString. Возвращает строку типа String, усечённую на первом найденном нулевом байте. @@ -5762,7 +5659,6 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut; └────────────┴───────┘ ``` - ## toDecimalString {#todecimalstring} Преобразует числовое значение в значение типа String с количеством дробных знаков в результирующей строке, заданным пользователем. @@ -5801,7 +5697,6 @@ SELECT toDecimalString(CAST('64.32', 'Float64'), 5); └─────────────────────────────────────────────┘ ``` - ## reinterpretAsUInt8 {#reinterpretasuint8} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа UInt8. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входное значение, результат не имеет смысла. @@ -5840,7 +5735,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt16 {#reinterpretasuint16} Выполняет байтовую переинтерпретацию, рассматривая входное значение как значение типа UInt16. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить тип входного значения, результат не имеет осмысленного значения. @@ -5879,7 +5773,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt32 {#reinterpretasuint32} Выполняет переинтерпретацию байтов, рассматривая входное значение как значение типа UInt32. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить значение входного типа, результат не имеет смысла. @@ -5918,7 +5811,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt64 {#reinterpretasuint64} Выполняет побайтовую переинтерпретацию, трактуя входное значение как значение типа UInt64. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входной тип, результат не имеет смысла. @@ -5957,7 +5849,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt128 {#reinterpretasuint128} Выполняет побайтовую переинтерпретацию, трактуя входное значение как значение типа UInt128. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить тип входного значения, результат не имеет смысла. @@ -5996,7 +5887,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt256 {#reinterpretasuint256} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа UInt256. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входное значение, результат не имеет смысла. @@ -6035,7 +5925,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt8 {#reinterpretasint8} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Int8. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить значение входного типа, результат не имеет смысла. @@ -6074,7 +5963,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt16 {#reinterpretasint16} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Int16. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить тип входного значения, результат будет произвольным. @@ -6113,7 +6001,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt32 {#reinterpretasint32} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Int32. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить тип входного значения, результат становится бессмысленным. @@ -6152,7 +6039,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt64 {#reinterpretasint64} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Int64. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входное значение, результат будет бессмысленным. @@ -6191,7 +6077,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt128 {#reinterpretasint128} Выполняет переинтерпретацию байтов, рассматривая входное значение как значение типа Int128. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить значение исходного типа, результат не имеет смысла. @@ -6230,7 +6115,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt256 {#reinterpretasint256} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Int256. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входное значение, полученное значение не имеет смысла. @@ -6269,7 +6153,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsFloat32 {#reinterpretasfloat32} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Float32. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может корректно представить исходное, результат не имеет смысла. @@ -6304,7 +6187,6 @@ SELECT reinterpretAsUInt32(toFloat32(0.2)) AS x, reinterpretAsFloat32(x); └────────────┴─────────────────────────┘ ``` - ## reinterpretAsFloat64 {#reinterpretasfloat64} Выполняет побайтовую переинтерпретацию, рассматривая входное значение как значение типа Float64. В отличие от [`CAST`](#cast), функция не пытается сохранить исходное значение — если целевой тип не может представить входной тип, результат будет некорректным. @@ -6339,7 +6221,6 @@ SELECT reinterpretAsUInt64(toFloat64(0.2)) AS x, reinterpretAsFloat64(x); └─────────────────────┴─────────────────────────┘ ``` - ## reinterpretAsDate {#reinterpretasdate} Принимает значение типа String, FixedString или числовое значение и интерпретирует байты как число в порядке байтов хоста (little-endian). Возвращает дату, соответствующую количеству дней, прошедших с начала эпохи Unix. @@ -6380,7 +6261,6 @@ SELECT reinterpretAsDate(65), reinterpretAsDate('A'); └───────────────────────┴────────────────────────┘ ``` - ## reinterpretAsDateTime {#reinterpretasdatetime} Эти функции принимают строку и интерпретируют байты, расположенные в начале строки, как число в порядке байтов хоста (little endian). Возвращают дату и время, интерпретируемые как количество секунд с начала эпохи Unix. @@ -6421,7 +6301,6 @@ SELECT reinterpretAsDateTime(65), reinterpretAsDateTime('A'); └───────────────────────────┴────────────────────────────┘ ``` - ## reinterpretAsString {#reinterpretasstring} Эта функция принимает число, дату или дату со временем и возвращает строку, содержащую байты, представляющие соответствующее значение в порядке байт хоста (little-endian). Нулевые байты удаляются с конца. Например, значение типа UInt32, равное 255, представляется строкой длиной один байт. @@ -6458,7 +6337,6 @@ SELECT └────────────────────────────────────────────────────────┴───────────────────────────────────────────┘ ``` - ## reinterpretAsFixedString {#reinterpretasfixedstring} Эта функция принимает на вход число, дату или дату со временем и возвращает значение типа FixedString, содержащее байты, представляющие соответствующее значение в порядке байтов хоста (little-endian). Нулевые байты отбрасываются с конца. Например, значение типа UInt32, равное 255, — это FixedString длиной в один байт. @@ -6495,7 +6373,6 @@ SELECT └─────────────────────────────────────────────────────────────┴────────────────────────────────────────────────┘ ``` - ## reinterpretAsUUID {#reinterpretasuuid} :::note @@ -6556,7 +6433,6 @@ SELECT uuid = uuid2; └─────────────────────┘ ``` - ## reinterpret {#reinterpret} Использует ту же последовательность байт в памяти для значения `x` и интерпретирует её как значение целевого типа. @@ -6608,7 +6484,6 @@ SELECT reinterpret(x'3108b4403108d4403108b4403108d440', 'Array(Float32)') AS str └────────────────────────────┘ ``` - ## CAST {#cast} Преобразует входное значение к указанному типу данных. В отличие от функции [reinterpret](#reinterpret), `CAST` пытается представить то же значение, используя новый тип данных. Если преобразование невозможно, выбрасывается исключение. @@ -6714,7 +6589,6 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null; * Настройка [cast_keep_nullable](../../operations/settings/settings.md/#cast_keep_nullable) - ## accurateCast(x, T) {#accuratecastx-t} Преобразует `x` к типу данных `T`. @@ -6749,7 +6623,6 @@ SELECT accurateCast(-1, 'UInt8') AS uint8; Код: 70. DB::Exception: Получено от localhost:9000. DB::Exception: Значение в столбце Int8 невозможно безопасно преобразовать в тип UInt8: При обработке accurateCast(-1, 'UInt8') AS uint8. ``` - ## accurateCastOrNull(x, T) {#accuratecastornullx-t} Преобразует входное значение `x` в указанный тип данных `T`. Всегда возвращает тип [Nullable](../data-types/nullable.md) и значение [NULL](/sql-reference/syntax#null), если результат приведения не может быть представлен в целевом типе. @@ -6802,7 +6675,6 @@ SELECT └───────┴──────┴──────────────┘ ``` - ## accurateCastOrDefault(x, T[, default_value]) {#accuratecastordefaultx-t-default_value} Преобразует входное значение `x` в указанный тип данных `T`. Возвращает значение типа по умолчанию или `default_value` (если оно задано), если результат приведения не может быть представлен в целевом типе. @@ -6859,7 +6731,6 @@ SELECT └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ ``` - ## toInterval {#toInterval} Создаёт значение типа данных [Interval](../../sql-reference/data-types/special-data-types/interval.md) из числового значения и единицы измерения интервала (например, 'second' или 'day'). @@ -6907,7 +6778,6 @@ SELECT toDateTime('2025-01-01 00:00:00') + toInterval(1, 'hour') └────────────────────────────────────────────────────────────┘ ``` - ## toIntervalYear {#tointervalyear} Возвращает интервал продолжительностью `n` лет типа данных [IntervalYear](../data-types/special-data-types/interval.md). @@ -6945,7 +6815,6 @@ SELECT date + interval_to_year AS result └────────────┘ ``` - ## toIntervalQuarter {#tointervalquarter} Возвращает интервал продолжительностью `n` кварталов типа данных [IntervalQuarter](../data-types/special-data-types/interval.md). @@ -6983,7 +6852,6 @@ SELECT date + interval_to_quarter AS result └────────────┘ ``` - ## toIntervalMonth {#tointervalmonth} Возвращает интервал длительностью `n` месяцев типа данных [IntervalMonth](../data-types/special-data-types/interval.md). @@ -7021,7 +6889,6 @@ SELECT date + interval_to_month AS result └────────────┘ ``` - ## toIntervalWeek {#tointervalweek} Возвращает интервал продолжительностью `n` недель типа данных [IntervalWeek](../data-types/special-data-types/interval.md). @@ -7059,7 +6926,6 @@ SELECT date + interval_to_week AS result └────────────┘ ``` - ## toIntervalDay {#tointervalday} Возвращает интервал в `n` дней с типом данных [IntervalDay](../data-types/special-data-types/interval.md). @@ -7097,7 +6963,6 @@ SELECT date + interval_to_days AS result └────────────┘ ``` - ## toIntervalHour {#tointervalhour} Возвращает интервал продолжительностью `n` часов типа данных [IntervalHour](../data-types/special-data-types/interval.md). @@ -7135,7 +7000,6 @@ SELECT date + interval_to_hours AS result └─────────────────────┘ ``` - ## toIntervalMinute {#tointervalminute} Возвращает интервал в `n` минут типа данных [IntervalMinute](../data-types/special-data-types/interval.md). @@ -7173,7 +7037,6 @@ SELECT date + interval_to_minutes AS result └─────────────────────┘ ``` - ## toIntervalSecond {#tointervalsecond} Возвращает интервал длительностью `n` секунд типа данных [IntervalSecond](../data-types/special-data-types/interval.md). @@ -7211,7 +7074,6 @@ SELECT date + interval_to_seconds AS result └─────────────────────┘ ``` - ## toIntervalMillisecond {#tointervalmillisecond} Возвращает интервал продолжительностью `n` миллисекунд типа данных [IntervalMillisecond](../data-types/special-data-types/interval.md). @@ -7249,7 +7111,6 @@ SELECT date + interval_to_milliseconds AS result └─────────────────────────┘ ``` - ## toIntervalMicrosecond {#tointervalmicrosecond} Возвращает интервал длительностью `n` микросекунд типа данных [IntervalMicrosecond](../data-types/special-data-types/interval.md). @@ -7287,7 +7148,6 @@ SELECT date + interval_to_microseconds AS result └────────────────────────────┘ ``` - ## toIntervalNanosecond {#tointervalnanosecond} Возвращает интервал в `n` наносекунд типа данных [IntervalNanosecond](../data-types/special-data-types/interval.md). @@ -7325,7 +7185,6 @@ SELECT date + interval_to_nanoseconds AS result └───────────────────────────────┘ ``` - ## parseDateTime {#parsedatetime} Преобразует [String](../data-types/string.md) в [DateTime](../data-types/datetime.md) в соответствии со [строкой формата MySQL](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format). @@ -7366,21 +7225,16 @@ SELECT parseDateTime('2021-01-04+23:00:00', '%Y-%m-%d+%H:%i:%s') Псевдоним: `TO_TIMESTAMP`. - ## parseDateTimeOrZero {#parsedatetimeorzero} Аналогично функции [parseDateTime](#parsedatetime), за исключением того, что при встрече формата даты, который не может быть обработан, возвращает нулевую дату. - - ## parseDateTimeOrNull {#parsedatetimeornull} То же, что и [parseDateTime](#parsedatetime), за исключением того, что при встрече неподдерживаемого формата даты функция возвращает `NULL`. Псевдоним: `str_to_date`. - - ## parseDateTimeInJodaSyntax {#parsedatetimeinjodasyntax} Аналогична функции [parseDateTime](#parsedatetime), за исключением того, что строка формата задаётся в синтаксисе [Joda](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html), а не MySQL. @@ -7421,19 +7275,14 @@ SELECT parseDateTimeInJodaSyntax('2023-02-24 14:53:31', 'yyyy-MM-dd HH:mm:ss', ' └─────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## parseDateTimeInJodaSyntaxOrZero {#parsedatetimeinjodasyntaxorzero} То же, что и [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax), за исключением того, что при встрече с форматом даты, который не удаётся обработать, возвращает нулевую дату. - - ## parseDateTimeInJodaSyntaxOrNull {#parsedatetimeinjodasyntaxornull} Аналогично [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax), за исключением того, что при обнаружении неподдерживаемого формата даты возвращает `NULL`. - - ## parseDateTime64 {#parsedatetime64} Преобразует [String](../data-types/string.md) в тип [DateTime64](../data-types/datetime64.md) в соответствии со [строкой формата MySQL](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format). @@ -7455,19 +7304,14 @@ parseDateTime64(str[, format[, timezone]]) Возвращает значение типа [DateTime64](../data-types/datetime64.md), разобранное из входной строки в соответствии со строкой формата в стиле MySQL. Точность возвращаемого значения равна 6. - ## parseDateTime64OrZero {#parsedatetime64orzero} То же, что и [parseDateTime64](#parsedatetime64), но при обнаружении неподдерживаемого формата даты возвращает нулевую дату. - - ## parseDateTime64OrNull {#parsedatetime64ornull} То же, что и [parseDateTime64](#parsedatetime64), но возвращает `NULL`, если встречает формат даты, который не может быть обработан. - - ## parseDateTime64InJodaSyntax {#parsedatetime64injodasyntax} Преобразует [String](../data-types/string.md) в [DateTime64](../data-types/datetime64.md) в соответствии с [форматной строкой Joda](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html). @@ -7489,19 +7333,14 @@ parseDateTime64InJodaSyntax(str[, format[, timezone]]) Возвращает значение типа [DateTime64](../data-types/datetime64.md), полученное разбором входной строки в соответствии со строкой формата в стиле Joda. Точность возвращаемого значения равна количеству символов `S` в строке формата (но не более 6). - ## parseDateTime64InJodaSyntaxOrZero {#parsedatetime64injodasyntaxorzero} Работает так же, как [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax), за исключением того, что при обнаружении формата даты, который не может быть обработан, возвращает нулевую дату. - - ## parseDateTime64InJodaSyntaxOrNull {#parsedatetime64injodasyntaxornull} То же, что и [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax), но возвращает `NULL`, если формат даты не может быть обработан. - - ## parseDateTimeBestEffort {#parsedatetimebesteffort} ## parseDateTime32BestEffort {#parsedatetime32besteffort} @@ -7607,7 +7446,6 @@ SELECT toYear(now()) AS year, parseDateTimeBestEffort('10 20:19'); Результат: - ```response ┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐ │ 2023 │ 2023-01-10 20:19:00 │ @@ -7644,39 +7482,28 @@ FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around); * [Анонс ISO 8601 от @xkcd](https://xkcd.com/1179/) * [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2) - ## parseDateTimeBestEffortUS {#parsedatetimebesteffortus} Эта функция ведёт себя как [parseDateTimeBestEffort](#parsedatetimebesteffort) для ISO-форматов даты, например `YYYY-MM-DD hh:mm:ss`, и других форматов даты, в которых компоненты месяца и дня могут быть однозначно извлечены, например `YYYYMMDDhhmmss`, `YYYY-MM`, `DD hh` или `YYYY-MM-DD hh:mm:ss ±h:mm`. Если компоненты месяца и дня не могут быть однозначно извлечены, например `MM/DD/YYYY`, `MM-DD-YYYY` или `MM-DD-YY`, по умолчанию используется американский формат даты вместо `DD/MM/YYYY`, `DD-MM-YYYY` или `DD-MM-YY`. В качестве исключения из последнего, если значение месяца больше 12 и не превышает 31, функция переходит к поведению [parseDateTimeBestEffort](#parsedatetimebesteffort), например `15/08/2020` интерпретируется как `2020-08-15`. - - ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} ## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} Аналогично [parseDateTimeBestEffort](#parsedatetimebesteffort), за исключением того, что возвращает `NULL`, если встречает формат даты, который невозможно обработать. - - ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} ## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} То же, что и [parseDateTimeBestEffort](#parsedatetimebesteffort), за исключением того, что при встрече с форматом даты, который невозможно обработать, возвращается нулевая дата или нулевое значение даты-времени. - - ## parseDateTimeBestEffortUSOrNull {#parsedatetimebesteffortusornull} То же, что и функция [parseDateTimeBestEffortUS](#parsedatetimebesteffortus), за исключением того, что она возвращает `NULL`, если встречает формат даты, который нельзя обработать. - - ## parseDateTimeBestEffortUSOrZero {#parsedatetimebesteffortusorzero} То же, что и функция [parseDateTimeBestEffortUS](#parsedatetimebesteffortus), за исключением того, что она возвращает нулевую дату (`1970-01-01`) или нулевую дату со временем (`1970-01-01 00:00:00`), если встречается формат даты, который не удаётся обработать. - - ## parseDateTime64BestEffort {#parsedatetime64besteffort} То же, что и функция [parseDateTimeBestEffort](#parsedatetimebesteffort), но дополнительно обрабатывает миллисекунды и микросекунды и возвращает тип данных [DateTime](/sql-reference/data-types/datetime). @@ -7723,37 +7550,26 @@ FORMAT PrettyCompactMonoBlock; └────────────────────────────┴────────────────────────────────┘ ``` - ## parseDateTime64BestEffortUS {#parsedatetime64besteffortus} Аналогична функции [parseDateTime64BestEffort](#parsedatetime64besteffort), за исключением того, что при неоднозначности она отдаёт предпочтение американскому формату даты (`MM/DD/YYYY` и т.д.). - - ## parseDateTime64BestEffortOrNull {#parsedatetime64besteffortornull} Работает так же, как [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает `NULL`, если встречает формат даты, который не может быть обработан. - - ## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} То же, что и [parseDateTime64BestEffort](#parsedatetime64besteffort), за исключением того, что при встрече с форматом даты, который не удаётся обработать, возвращает нулевую дату или нулевое значение даты и времени. - - ## parseDateTime64BestEffortUSOrNull {#parsedatetime64besteffortusornull} Аналогична функции [parseDateTime64BestEffort](#parsedatetime64besteffort), за исключением того, что при неоднозначности эта функция отдаёт предпочтение формату даты, принятому в США (`MM/DD/YYYY` и т. д.), и возвращает `NULL`, если встречает формат даты, который не может быть обработан. - - ## parseDateTime64BestEffortUSOrZero {#parsedatetime64besteffortusorzero} То же, что и [parseDateTime64BestEffort](#parsedatetime64besteffort), за исключением того, что при неоднозначной интерпретации эта функция предпочитает американский формат даты (`MM/DD/YYYY` и т. д.) и возвращает нулевую дату или нулевое значение дата-время, если встречает формат даты, который невозможно обработать. - - ## toLowCardinality {#tolowcardinality} Преобразует входной параметр в вариант типа данных [LowCardinality](../data-types/lowcardinality.md) на основе исходного типа. @@ -7790,7 +7606,6 @@ SELECT toLowCardinality('1'); └───────────────────────┘ ``` - ## toUnixTimestamp {#toUnixTimestamp} Преобразует `String`, `Date` или `DateTime` в Unix-метку времени (количество секунд с `1970-01-01 00:00:00 UTC`) типа `UInt32`. @@ -7838,7 +7653,6 @@ from_date: 1509840000 from_date32: 1509840000 ``` - ## toUnixTimestamp64Second {#tounixtimestamp64second} Преобразует `DateTime64` в значение типа `Int64` с фиксированной точностью до секунды. Входное значение масштабируется вверх или вниз в зависимости от его точности. @@ -7878,7 +7692,6 @@ SELECT toUnixTimestamp64Second(dt64); └───────────────────────────────┘ ``` - ## toUnixTimestamp64Milli {#tounixtimestamp64milli} Преобразует `DateTime64` в значение типа `Int64` с фиксированной точностью до миллисекунд. Входное значение при необходимости увеличивается или уменьшается в зависимости от его исходной точности. @@ -7918,7 +7731,6 @@ SELECT toUnixTimestamp64Milli(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Micro {#tounixtimestamp64micro} Преобразует `DateTime64` в значение типа `Int64` с фиксированной микросекундной точностью. Входное значение соответствующим образом масштабируется (увеличивается или уменьшается) в зависимости от его точности. @@ -7958,7 +7770,6 @@ SELECT toUnixTimestamp64Micro(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Nano {#tounixtimestamp64nano} Преобразует `DateTime64` в значение типа `Int64` с фиксированной наносекундной точностью. Входное значение масштабируется вверх или вниз в зависимости от его точности. @@ -7998,7 +7809,6 @@ SELECT toUnixTimestamp64Nano(dt64); └─────────────────────────────┘ ``` - ## fromUnixTimestamp64Second {#fromunixtimestamp64second} Преобразует значение типа `Int64` в `DateTime64` с фиксированной точностью до секунд и необязательным указанием часового пояса. Входное значение масштабируется (увеличивается или уменьшается) в зависимости от его точности. @@ -8041,7 +7851,6 @@ SELECT └─────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Milli {#fromunixtimestamp64milli} Преобразует значение типа `Int64` в значение типа `DateTime64` с фиксированной точностью до миллисекунд и необязательным указанием часового пояса. Входное значение соответствующим образом масштабируется вверх или вниз в зависимости от его точности. @@ -8084,7 +7893,6 @@ SELECT └─────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Micro {#fromunixtimestamp64micro} Преобразует значение типа `Int64` в `DateTime64` с фиксированной микросекундной точностью и необязательным часовым поясом. Входное значение масштабируется в большую или меньшую сторону в зависимости от его точности. @@ -8127,7 +7935,6 @@ SELECT └────────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Nano {#fromunixtimestamp64nano} Преобразует значение типа `Int64` в `DateTime64` с фиксированной точностью до наносекунд и необязательным указанием часового пояса. Входное значение масштабируется (увеличивается или уменьшается) в зависимости от его исходной точности. @@ -8170,7 +7977,6 @@ SELECT └───────────────────────────────┴──────────────────────┘ ``` - ## formatRow {#formatrow} Преобразует произвольные выражения в строку по указанному формату. @@ -8242,7 +8048,6 @@ SETTINGS format_custom_result_before_delimiter='\n', format_custom_resul Примечание: в этой функции поддерживаются только строчно-ориентированные форматы. - ## formatRowNoNewline {#formatrownonewline} Преобразует произвольные выражения в строку через заданный формат. Отличается от formatRow тем, что эта функция удаляет последний символ `\n`, если он есть. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md index 0dd7f5c82c1..09d19cc0e80 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md @@ -10,7 +10,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # Пользовательская функция (UDF) {#executable-user-defined-functions} @@ -48,12 +47,8 @@ ClickHouse может вызывать любую внешнюю исполня Команда должна читать аргументы из `STDIN` и выводить результат в `STDOUT`. Команда должна обрабатывать аргументы итеративно, то есть после обработки блока аргументов она должна ожидать следующий блок. - - ## Выполнимые пользовательские функции {#executable-user-defined-functions} - - ## Примеры {#examples} ### UDF из inline-скрипта {#udf-inline} @@ -193,7 +188,6 @@ SELECT test_function_python(toUInt64(2)); Создайте `test_function_sum_json` с именованными аргументами и форматом [JSONEachRow](/interfaces/formats/JSONEachRow), используя конфигурацию в формате XML или YAML. - Файл `test_function.xml` (`/etc/clickhouse-server/test_function.xml` при использовании путей по умолчанию). @@ -332,7 +326,6 @@ if __name__ == "__main__": SELECT test_function_parameter_python(1)(2); ``` - ```text title="Result" ┌─test_function_parameter_python(1)(2)─┐ │ Параметр 1, значение 2 │ @@ -412,15 +405,12 @@ SELECT test_shell(number) FROM numbers(10); └────────────────────┘ ``` - ## Обработка ошибок {#error-handling} Некоторые функции могут выбрасывать исключение, если данные некорректны. В этом случае запрос отменяется, а клиенту возвращается текст ошибки. При распределённой обработке, когда исключение происходит на одном из серверов, остальные серверы также пытаются прервать выполнение запроса. - - ## Вычисление выражений аргументов {#evaluation-of-argument-expressions} Почти во всех языках программирования один из аргументов может не вычисляться для некоторых операторов. @@ -428,8 +418,6 @@ SELECT test_shell(number) FROM numbers(10); В ClickHouse аргументы функций (операторов) всегда вычисляются. Это связано с тем, что целые фрагменты столбцов обрабатываются сразу, а не вычисляется каждая строка по отдельности. - - ## Выполнение функций при распределённой обработке запросов {#performing-functions-for-distributed-query-processing} При распределённой обработке запросов как можно больше стадий обработки выполняется на удалённых серверах, а оставшиеся стадии (слияние промежуточных результатов и всё последующее) выполняются на сервере, инициировавшем запрос. @@ -446,13 +434,9 @@ SELECT test_shell(number) FROM numbers(10); Если функция в запросе выполняется на сервере, инициировавшем запрос, но вам нужно выполнить её на удалённых серверах, вы можете обернуть её в агрегатную функцию `any` или добавить её в ключ группировки в `GROUP BY`. - - ## Определяемые пользователем SQL-функции {#sql-user-defined-functions} Пользовательские функции на основе лямбда-выражений можно создавать с помощью оператора [CREATE FUNCTION](../statements/create/function.md). Чтобы удалить эти функции, используйте оператор [DROP FUNCTION](../statements/drop.md#drop-function). - - ## Связанные материалы {#related-content} - [Пользовательские функции (UDF) в ClickHouse Cloud](https://clickhouse.com/blog/user-defined-functions-clickhouse-udfs) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md index 1e4d2f1204f..9d781aedd90 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md @@ -6,8 +6,6 @@ title: 'Функции для работы с ULID' doc_type: 'reference' --- - - # Функции для работы с ULID {#functions-for-working-with-ulids} :::note @@ -20,7 +18,6 @@ doc_type: 'reference' См.: https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## ULIDStringToDateTime {#ULIDStringToDateTime} @@ -58,7 +55,6 @@ SELECT ULIDStringToDateTime('01GNB2S2FGN2P93QPXDNB4EN2R') └────────────────────────────────────────────────────┘ ``` - ## generateULID {#generateULID} Введена в версии v23.2 @@ -107,7 +103,6 @@ SELECT generateULID(1), generateULID(2) {/*AUTOGENERATED_END*/ } - ## См. также {#see-also} - [UUID](../../sql-reference/functions/uuid-functions.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md index 0574f2b2aa7..dfb1d046b87 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md @@ -130,7 +130,6 @@ SELECT └──────────────────────────┴──────────────────────────┘ ``` - ## Операторы для работы с наборами данных {#operators-for-working-with-data-sets} См. [операторы IN](../../sql-reference/operators/in.md) и оператор [EXISTS](../../sql-reference/operators/exists.md). @@ -205,7 +204,6 @@ SELECT number AS a FROM numbers(10) WHERE a > ANY (SELECT number FROM numbers(3, └───┘ ``` - ## Операторы для работы с датами и временем {#operators-for-working-with-dates-and-times} ### EXTRACT {#extract} @@ -272,7 +270,6 @@ FROM test.Orders; Дополнительные примеры можно найти в [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). - ### INTERVAL {#interval} Создает значение типа [Interval](../../sql-reference/data-types/special-data-types/interval.md), которое следует использовать в арифметических операциях со значениями типов [Date](../../sql-reference/data-types/date.md) и [DateTime](../../sql-reference/data-types/datetime.md). @@ -347,7 +344,6 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul') AS time, time + 60 * 6 * [Interval](../../sql-reference/data-types/special-data-types/interval.md) — тип данных * функции преобразования типов [toInterval](/sql-reference/functions/type-conversion-functions#tointervalyear) - ## Оператор логического AND {#logical-and-operator} Синтаксис `SELECT a AND b` — вычисляет логическую конъюнкцию выражений `a` и `b` с помощью функции [and](/sql-reference/functions/logical-functions#and). @@ -384,7 +380,6 @@ END Функция `transform` не поддерживает значение `NULL`. - ## Оператор конкатенации {#concatenation-operator} `s1 || s2` – Функция `concat(s1, s2)`. @@ -435,7 +430,6 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` - ### IS NOT NULL {#is_not_null} * Для значений типа [Nullable](../../sql-reference/data-types/nullable.md) оператор `IS NOT NULL` возвращает: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md index 0f82d900611..7d95272dbd7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md @@ -32,7 +32,6 @@ ALTER [TEMPORARY] TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COM * [MATERIALIZE COLUMN](#materialize-column) — Материализует столбец в частях таблицы, где этот столбец отсутствует. Эти действия подробно описаны ниже. - ## ADD COLUMN {#add-column} ```sql @@ -69,7 +68,6 @@ ToDrop UInt32 Added3 UInt32 ``` - ## Удаление столбца {#drop-column} ```sql @@ -90,7 +88,6 @@ DROP COLUMN [IF EXISTS] name ALTER TABLE visits DROP COLUMN browser ``` - ## ПЕРЕИМЕНОВАТЬ СТОЛБЕЦ {#rename-column} ```sql @@ -107,7 +104,6 @@ RENAME COLUMN [IF EXISTS] имя_столбца TO новое_имя ALTER TABLE visits RENAME COLUMN webBrowser TO browser ``` - ## ОЧИСТИТЬ СТОЛБЕЦ {#clear-column} ```sql @@ -124,7 +120,6 @@ CLEAR COLUMN [IF EXISTS] имя IN PARTITION имя_раздела ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() ``` - ## Столбец COMMENT {#comment-column} ```sql @@ -143,7 +138,6 @@ COMMENT COLUMN [IF EXISTS] имя 'Текстовый комментарий' ALTER TABLE visits COMMENT COLUMN browser 'В этом столбце указан браузер, используемый для доступа к сайту.' ``` - ## ИЗМЕНЕНИЕ СТОЛБЦА {#modify-column} ```sql @@ -223,7 +217,6 @@ DESCRIBE users; Будьте осторожны при изменении столбца типа Nullable на Non-Nullable. Убедитесь, что он не содержит значений NULL, в противном случае это приведёт к проблемам при чтении из него. В таком случае обходным решением будет остановить мутацию (KILL MUTATION) и вернуть столбец к типу Nullable. ::: - ## MODIFY COLUMN REMOVE {#modify-column-remove} Удаляет одно из следующих свойств столбца: `DEFAULT`, `ALIAS`, `MATERIALIZED`, `CODEC`, `COMMENT`, `TTL`, `SETTINGS`. @@ -246,7 +239,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; * [REMOVE TTL](ttl.md). - ## MODIFY COLUMN MODIFY SETTING {#modify-column-modify-setting} Изменяет параметр столбца. @@ -265,7 +257,6 @@ ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING name=value,...; ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING max_compress_block_size = 1048576; ``` - ## MODIFY COLUMN RESET SETTING {#modify-column-reset-setting} Сбрасывает настройку столбца и удаляет объявление этой настройки в определении столбца в запросе CREATE таблицы. @@ -284,7 +275,6 @@ ALTER TABLE имя_таблицы MODIFY COLUMN имя_столбца RESET SETT ALTER TABLE имя_таблицы MODIFY COLUMN имя_столбца RESET SETTING max_compress_block_size; ``` - ## MATERIALIZE COLUMN {#materialize-column} Материализует столбец с выражением значения `DEFAULT` или `MATERIALIZED`. При добавлении материализованного столбца с помощью `ALTER TABLE table_name ADD COLUMN column_name MATERIALIZED` существующие строки без материализованных значений не заполняются автоматически. Инструкцию `MATERIALIZE COLUMN` можно использовать для перезаписи данных существующего столбца после того, как выражение `DEFAULT` или `MATERIALIZED` было добавлено или обновлено (что обновляет только метаданные, но не изменяет существующие данные). Обратите внимание, что материализация столбца в ключе сортировки является недопустимой операцией, поскольку это может нарушить порядок сортировки. @@ -345,7 +335,6 @@ SELECT groupArray(x), groupArray(s) FROM tmp; * [MATERIALIZED](/sql-reference/statements/create/view#materialized-view). - ## Ограничения {#limitations} Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) во вложенных структурах данных, но не целые вложенные структуры данных. Чтобы добавить вложенную структуру данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)`. Вложенная структура данных эквивалентна нескольким столбцам-массивам с именами с одинаковым префиксом до точки. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md index f1220b68fbd..24dbad65005 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md @@ -9,21 +9,16 @@ keywords: ['ALTER TABLE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER TABLE ... MODIFY COMMENT {#alter-table-modify-comment} Добавляет, изменяет или удаляет комментарий к таблице, независимо от того, был ли он задан ранее или нет. Изменение комментария отображается как в [`system.tables`](../../../operations/system-tables/tables.md), так и в результате запроса `SHOW CREATE TABLE`. - - ## Синтаксис {#syntax} ```sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Комментарий' ``` - ## Примеры {#examples} Чтобы создать таблицу с комментарием: @@ -79,7 +74,6 @@ WHERE database = currentDatabase() AND name = 'table_with_comment'; └─────────┘ ``` - ## Ограничения {#caveats} Для таблиц Replicated комментарий может отличаться на разных репликах. @@ -88,8 +82,6 @@ WHERE database = currentDatabase() AND name = 'table_with_comment'; Эта возможность доступна, начиная с версии 23.9. В предыдущих версиях ClickHouse она не работает. - - ## Связанные материалы {#related-content} - предложение [`COMMENT`](/sql-reference/statements/create/table#comment-clause) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md index 30a57b4b4b1..c220c9cde83 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md @@ -9,21 +9,16 @@ keywords: ['ALTER DATABASE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER DATABASE ... MODIFY COMMENT {#alter-database-modify-comment} Добавляет, изменяет или удаляет комментарий к базе данных, независимо от того, был ли он задан ранее. Изменение комментария отражается как в [`system.databases`](/operations/system-tables/databases.md), так и в результате запроса `SHOW CREATE DATABASE`. - - ## Синтаксис {#syntax} ```sql ALTER DATABASE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' ``` - ## Примеры {#examples} Чтобы создать базу данных (`DATABASE`) с комментарием: @@ -74,7 +69,6 @@ WHERE name = 'database_with_comment'; └─────────┘ ``` - ## См. также {#related-content} - Предложение [`COMMENT`](/sql-reference/statements/create/table#comment-clause) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md index deba18c03a4..7536db944bb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md @@ -7,8 +7,6 @@ title: 'Оператор ALTER TABLE ... DELETE' doc_type: 'reference' --- - - # Инструкция ALTER TABLE ... DELETE {#alter-table-delete-statement} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr * [Синхронность запросов ALTER](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * Настройка [mutations_sync](/operations/settings/settings.md/#mutations_sync) - ## Связанные материалы {#related-content} - Блог: [Обработка обновлений и удалений в ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md index 66e8919fc40..bede6f3a2f4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md @@ -7,8 +7,6 @@ title: 'ALTER' doc_type: 'reference' --- - - # ALTER {#alter} Большинство запросов `ALTER TABLE` изменяют настройки таблицы или данные: @@ -51,8 +49,6 @@ doc_type: 'reference' | [ALTER TABLE ... MODIFY COMMENT](/sql-reference/statements/alter/comment.md) | Добавляет, изменяет или удаляет комментарии к таблице, независимо от того, были ли они заданы ранее. | | [ALTER NAMED COLLECTION](/sql-reference/statements/alter/named-collection.md) | Изменяет [именованные коллекции](/operations/named-collections.md). | - - ## Мутации {#mutations} `ALTER`-запросы, предназначенные для изменения данных таблицы, реализованы с помощью механизма, называемого «мутациями», в первую очередь [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete.md) и [ALTER TABLE ... UPDATE](/sql-reference/statements/alter/update.md). Это асинхронные фоновые процессы, подобные слияниям в таблицах [MergeTree](/engines/table-engines/mergetree-family/index.md), которые создают новые «мутированные» версии частей данных. @@ -66,8 +62,6 @@ doc_type: 'reference' Записи о завершенных мутациях не удаляются сразу (количество сохраняемых записей определяется параметром движка хранения `finished_mutations_to_keep`). Более старые записи о мутациях удаляются. - - ## Синхронность запросов ALTER {#synchronicity-of-alter-queries} Для нереплицируемых таблиц все запросы `ALTER` выполняются синхронно. Для реплицируемых таблиц запрос лишь добавляет инструкции для соответствующих действий в `ZooKeeper`, а сами действия выполняются как можно скорее. Однако запрос может ожидать завершения этих действий на всех репликах. @@ -82,8 +76,6 @@ doc_type: 'reference' Для всех запросов `ALTER`, если `alter_sync = 2` и некоторые реплики неактивны дольше времени, указанного в настройке `replication_wait_for_inactive_replica_timeout`, будет сгенерировано исключение `UNFINISHED`. ::: - - ## Связанные материалы {#related-content} - Блог: [Обработка обновлений и удалений в ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md index ce8fa4a941c..c796a270443 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md @@ -21,8 +21,6 @@ doc_type: 'reference' Более технические подробности об устройстве проекций можно найти на этой [странице](/guides/best-practices/sparse-primary-indexes.md/#option-3-projections). - - ## Пример фильтрации без использования первичного ключа {#example-filtering-without-using-primary-keys} Создание таблицы: @@ -79,7 +77,6 @@ LIMIT 2 SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## Пример запроса предварительной агрегации {#example-pre-aggregation-query} Создание таблицы с проекцией: @@ -157,7 +154,6 @@ GROUP BY user_agent SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## Обычная проекция с полем `_part_offset` {#normal-projection-with-part-offset-field} Создание таблицы с обычной проекцией, использующей поле `_part_offset`: @@ -202,31 +198,22 @@ WHERE _part_starting_offset + _part_offset IN ( SETTINGS enable_shared_storage_snapshot_in_query = 1 ``` - # Управление проекциями {#manipulating-projections} Доступны следующие операции с [проекциями](/engines/table-engines/mergetree-family/mergetree.md/#projections): - - ## ДОБАВИТЬ ПРОЕКЦИЮ {#add-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` — добавляет в метаданные таблицы описание проекции. - - ## DROP PROJECTION {#drop-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` — удаляет из метаданных таблицы описание проекции и соответствующие файлы проекции на диске. Реализовано как [мутация](/sql-reference/statements/alter/index.md#mutations). - - ## MATERIALIZE PROJECTION {#materialize-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - запрос перестраивает проекцию `name` в партиции `partition_name`. Реализован как [мутация](/sql-reference/statements/alter/index.md#mutations). - - ## CLEAR PROJECTION {#clear-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` — удаляет файлы проекции с диска, не удаляя её описания. Эта операция реализована как [мутация](/sql-reference/statements/alter/index.md#mutations). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md index 0677d5bfc9c..fda54a90e4a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md @@ -8,32 +8,22 @@ toc_hidden_folder: true doc_type: 'reference' --- - - # Работа с индексами пропуска данных {#manipulating-data-skipping-indices} Доступны следующие операции: - - ## ADD INDEX {#add-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Добавляет описание индекса в метаданные таблицы. - - ## DROP INDEX {#drop-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` — удаляет описание индекса из метаданных таблицы и файлы индекса с диска. Реализована как [мутация](/sql-reference/statements/alter/index.md#mutations). - - ## MATERIALIZE INDEX {#materialize-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` — перестраивает вторичный индекс `name` для указанного `partition_name`. Операция реализована как [мутация](/sql-reference/statements/alter/index.md#mutations). Если часть `IN PARTITION` опущена, индекс перестраивается для данных всей таблицы. - - ## CLEAR INDEX {#clear-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` — удаляет с диска файлы вторичного индекса, при этом не удаляя его описание. Эта операция реализована как [мутация](/sql-reference/statements/alter/index.md#mutations). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md index 4df162c04e6..6a514a8456a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md @@ -7,8 +7,6 @@ title: 'Команды ALTER TABLE ... UPDATE' doc_type: 'reference' --- - - # Команды ALTER TABLE ... UPDATE {#alter-table-update-statements} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN P * [Синхронность ALTER-запросов](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * Настройка [mutations_sync](/operations/settings/settings.md/#mutations_sync) - ## Связанные материалы {#related-content} - Блог: [Обработка обновлений и удалений в ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md index 8169ed033ef..b174bba82a1 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md @@ -30,7 +30,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] Чтобы использовать `ALTER USER`, необходимо иметь привилегию [ALTER USER](../../../sql-reference/statements/grant.md#access-management). - ## Клауза GRANTEES {#grantees-clause} Определяет пользователей или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#privileges) от этого пользователя при условии, что этому пользователю также выданы все необходимые привилегии с [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax). Параметры клаузы `GRANTEES`: @@ -42,8 +41,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] Вы можете исключить любого пользователя или роль с помощью выражения `EXCEPT`. Например, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если у `user1` есть какие‑то привилегии, выданные с `GRANT OPTION`, то он сможет выдавать эти привилегии кому угодно, кроме `user2`. - - ## Примеры {#examples} Сделайте назначенные роли ролями по умолчанию: @@ -104,7 +101,6 @@ ALTER USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by ' ALTER USER user1 RESET AUTHENTICATION METHODS TO NEW ``` - ## Оператор VALID UNTIL {#valid-until-clause} Позволяет задать дату окончания срока действия и, при необходимости, время для метода аутентификации. Принимает строку в качестве параметра. Рекомендуется использовать формат `YYYY-MM-DD [hh:mm:ss] [timezone]` для значения даты и времени. По умолчанию этот параметр равен `'infinity'`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md index 1d5b148dd76..e84e0dc5db7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md @@ -7,8 +7,6 @@ title: 'Оператор ALTER TABLE ... MODIFY QUERY' doc_type: 'reference' --- - - # Оператор ALTER TABLE ... MODIFY QUERY {#alter-table-modify-query-statement} Вы можете изменить запрос `SELECT`, который был указан при создании [материализованного представления](/sql-reference/statements/create/view#materialized-view), с помощью оператора `ALTER TABLE ... MODIFY QUERY` без прерывания процесса ингестии. @@ -92,7 +90,6 @@ ALTER TABLE mv MODIFY QUERY GROUP BY ts, event_type, browser; ``` - INSERT INTO events SELECT Date '2020-01-03' + interval number * 900 second, ['imp', 'click'][number%2+1], @@ -172,7 +169,6 @@ browser Возможности приложения сильно ограничены, так как вы можете изменять только раздел `SELECT`, не добавляя новые столбцы. ``` - ```sql CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; @@ -204,7 +200,6 @@ SELECT * FROM mv; └───┘ ``` - ## Оператор ALTER TABLE ... MODIFY REFRESH {#alter-table--modify-refresh-statement} Оператор `ALTER TABLE ... MODIFY REFRESH` изменяет параметры обновления для [обновляемого материализованного представления](../create/view.md#refreshable-materialized-view). См. [Изменение параметров обновления](../create/view.md#changing-refresh-parameters). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md index c98d498f4e7..354c9001238 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md @@ -9,8 +9,6 @@ doc_type: 'reference' Запрос `CHECK GRANT` используется для проверки, была ли текущему пользователю или роли предоставлена определённая привилегия. - - ## Синтаксис {#syntax} Основной синтаксис запроса следующий: @@ -21,7 +19,6 @@ CHECK GRANT privilege[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|t * `privilege` — тип права доступа. - ## Примеры {#examples} Если пользователю ранее была предоставлена привилегия, значение поля `check_grant` в ответе будет равно `1`. В противном случае значение `check_grant` будет равно `0`. @@ -50,6 +47,5 @@ CHECK GRANT SELECT(col2) ON table_2; └────────┘ ``` - ## Подстановочные символы {#wildcard} При указании привилегий можно использовать звездочку (`*`) вместо имени таблицы или базы данных. Правила использования подстановочных символов описаны в разделе [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md index 8fd534bff82..6490e289278 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md @@ -17,8 +17,6 @@ doc_type: 'reference' Этот запрос не улучшит производительность системы, и вам не следует выполнять его, если вы не уверены в своих действиях. ::: - - ## Синтаксис {#syntax} Основной синтаксис запроса выглядит следующим образом: @@ -55,7 +53,6 @@ doc_type: 'reference' Движки из семейства `*Log` не обеспечивают автоматическое восстановление данных при сбое. Используйте запрос `CHECK TABLE`, чтобы своевременно отслеживать потерю данных. - ## Примеры {#examples} По умолчанию запрос `CHECK TABLE` показывает общий результат проверки таблицы: @@ -152,7 +149,6 @@ FORMAT PrettyCompactMonoBlock SETTINGS check_query_single_value_result = 0 ``` - ```text ┌─база_данных─┬─таблица──┬─путь_части───┬─пройдено─┬─сообщение─┐ │ default │ t2 │ all_1_95_3 │ 1 │ │ @@ -168,7 +164,6 @@ SETTINGS check_query_single_value_result = 0 └──────────┴──────────┴─────────────┴───────────┴─────────┘ ``` - ## Если данные повреждены {#if-the-data-is-corrupted} Если таблица повреждена, вы можете скопировать неповреждённые данные в другую таблицу. Для этого: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md index 56d84c85c85..2591dc6419b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md @@ -17,7 +17,6 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...] ``` - ## Управление ролями {#managing-roles} Пользователю может быть назначено несколько ролей. Пользователи могут применять назначенные им роли в произвольных комбинациях с помощью оператора [SET ROLE](../../../sql-reference/statements/set-role.md). Итоговый набор привилегий представляет собой объединение всех привилегий всех применённых ролей. Если пользователю были выданы привилегии непосредственно на его учётную запись, они также объединяются с привилегиями, предоставленными ролями. @@ -28,8 +27,6 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus Чтобы удалить роль, используйте оператор [DROP ROLE](/sql-reference/statements/drop#drop-role). Удалённая роль автоматически отзывается у всех пользователей и ролей, которым она была назначена. - - ## Примеры {#examples} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md index a0245855eca..aec613fb73f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md @@ -24,13 +24,10 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` - ## Предложение USING {#using-clause} Позволяет задать условие для фильтрации строк. Пользователь увидит строку, если при вычислении условия для этой строки получается ненулевое значение. - - ## Клауза TO {#to-clause} В секции `TO` вы можете указать список пользователей и ролей, для которых должна действовать эта политика. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. @@ -49,8 +46,6 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ::: - - ## Оператор AS {#as-clause} Допускается одновременное включение нескольких политик для одной и той же таблицы и одного и того же пользователя. Поэтому нужен способ комбинировать условия из нескольких политик. @@ -96,13 +91,10 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio разрешить пользователю `peter` видеть строки таблицы table1 только при одновременном выполнении условий `b=1` И `c=2`, тогда как для всех остальных таблиц в mydb для этого пользователя будет применяться только политика `b=1`. - ## Предложение ON CLUSTER {#on-cluster-clause} Позволяет создавать политики доступа к строкам на кластере, см. [Distributed DDL](../../../sql-reference/distributed-ddl.md). - - ## Примеры {#examples} `CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md index 64e15a71bdb..a20af493e44 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md @@ -16,7 +16,6 @@ import TabItem from '@theme/TabItem'; По умолчанию таблицы создаются только на текущем сервере. Распределенные DDL-запросы реализованы с помощью предложения `ON CLUSTER`, которое [описано отдельно](../../../sql-reference/distributed-ddl.md). - ## Синтаксические формы {#syntax-forms} ### С явной схемой {#with-explicit-schema} @@ -100,7 +99,6 @@ SELECT x, toTypeName(x) FROM t1; └───┴───────────────┘ ``` - ## Модификаторы NULL и NOT NULL {#null-or-not-null-modifiers} Модификаторы `NULL` и `NOT NULL` после типа данных в определении столбца соответственно разрешают или запрещают делать его [Nullable](/sql-reference/data-types/nullable). @@ -109,8 +107,6 @@ SELECT x, toTypeName(x) FROM t1; См. также настройку [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable). - - ## Значения по умолчанию {#default_values} Описание столбца может задавать выражение значения по умолчанию в виде `DEFAULT expr`, `MATERIALIZED expr` или `ALIAS expr`. Пример: `URLDomain String DEFAULT domain(URL)`. @@ -217,7 +213,6 @@ FROM test FORMAT Vertical; ``` - Строка 1: ────── id: 1 @@ -263,7 +258,6 @@ SELECT * FROM test SETTINGS asterisk_include_alias_columns=1; └────┴────────────┴──────────┘ ```` - ## Первичный ключ {#primary-key} Вы можете задать [первичный ключ](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) при создании таблицы. Первичный ключ можно указать двумя способами: @@ -294,7 +288,6 @@ PRIMARY KEY(expr1[, expr2,...]); Нельзя совмещать оба подхода в одном запросе. ::: - ## Ограничения {#constraints} Наряду с описаниями столбцов можно задать ограничения: @@ -339,13 +332,10 @@ ORDER BY (name_len, name); `ASSUME CONSTRAINT` **не обеспечивает выполнение ограничения**, он лишь информирует оптимизатор, что ограничение соблюдается. Если ограничение на самом деле не выполняется, результаты запросов могут быть некорректными. Поэтому следует использовать `ASSUME CONSTRAINT` только в том случае, если вы уверены, что ограничение действительно выполняется. - ## Выражение TTL {#ttl-expression} Определяет срок хранения значений. Может быть задано только для таблиц семейства MergeTree. Для подробного описания см. раздел [TTL для столбцов и таблиц](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). - - ## Кодеки сжатия столбцов {#column_compression_codec} По умолчанию ClickHouse использует сжатие `lz4` в самостоятельной (self-managed) установке и `zstd` в ClickHouse Cloud. @@ -427,7 +417,6 @@ ClickHouse поддерживает кодеки как общего, так и `DEFLATE_QPL` — [алгоритм сжатия Deflate](https://github.com/intel/qpl), реализованный с помощью Intel® Query Processing Library. Применяются некоторые ограничения: - - DEFLATE_QPL отключен по умолчанию и может использоваться только после включения параметра конфигурации [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec). - DEFLATE_QPL требует сборку ClickHouse, скомпилированную с использованием инструкций SSE 4.2 (по умолчанию это так). Подробнее см. в разделе [Сборка ClickHouse с DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl). - DEFLATE_QPL работает наилучшим образом, если в системе есть устройство разгрузки Intel® IAA (In-Memory Analytics Accelerator). Подробнее см. [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) и [Benchmark with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl). @@ -455,8 +444,6 @@ ClickHouse поддерживает кодеки как общего, так и #### FPC {#fpc} - - `FPC(level, float_size)` — последовательно предсказывает следующее значение с плавающей запятой в последовательности, выбирая лучший из двух предикторов, затем выполняет XOR фактического значения с предсказанным и сжимает результат, обрезая ведущие нули. Аналогично алгоритму Gorilla, это эффективно при хранении последовательности значений с плавающей запятой, которые изменяются медленно. Для 64-битных значений (`double`) FPC работает быстрее, чем Gorilla, для 32-битных значений производительность может отличаться. Возможные значения `level`: 1–28, значение по умолчанию — 12. Возможные значения `float_size`: 4, 8, значение по умолчанию — `sizeof(type)`, если тип — `Float`. Во всех остальных случаях — 4. Подробное описание алгоритма см. в статье [High Throughput Compression of Double-Precision Floating-Point Data](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf). #### T64 {#t64} @@ -522,7 +509,6 @@ CREATE TABLE mytable ENGINE = MergeTree ORDER BY x; ``` - ## Временные таблицы {#temporary-tables} :::note @@ -553,7 +539,6 @@ CREATE [OR REPLACE] TEMPORARY TABLE [IF NOT EXISTS] table_name Вместо временных таблиц можно использовать таблицы с движком [ENGINE = Memory](../../../engines/table-engines/special/memory.md). - ## REPLACE TABLE {#replace-table} Оператор `REPLACE` позволяет [атомарно](/concepts/glossary#atomicity) обновлять таблицу. @@ -720,7 +705,6 @@ WHERE CounterID <12345; - ## Предложение COMMENT {#comment-clause} При создании таблицы вы можете добавить к ней комментарий. @@ -753,7 +737,6 @@ SELECT name, comment FROM system.tables WHERE name = 't1'; └──────┴──────────────────────┘ ``` - ## Похожие материалы {#related-content} - Блог: [Оптимизация ClickHouse с помощью схем и кодеков](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md index ffbc32f1811..180a840b58c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md @@ -26,7 +26,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus Предложение `ON CLUSTER` позволяет создавать пользователей на кластере, см. [Распределённый DDL](../../../sql-reference/distributed-ddl.md). - ## Идентификация {#identification} Существует несколько способов идентификации пользователя: @@ -73,7 +72,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus * Содержать как минимум 1 специальный символ ::: - ## Примеры {#examples} 1. Следующее имя пользователя — `name1`, и для него не требуется пароль — что, очевидно, практически не обеспечивает безопасность: @@ -162,15 +160,11 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus CREATE USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'' ``` - - Примечания: 1. Более старые версии ClickHouse могут не поддерживать синтаксис с несколькими методами аутентификации. Поэтому, если на сервере ClickHouse есть пользователи с такими настройками и сервер понижен до версии, которая этого не поддерживает, эти пользователи станут недоступны, а некоторые операции, связанные с пользователями, перестанут работать. Чтобы выполнить понижение версии корректно, необходимо перед понижением настроить всех пользователей так, чтобы у каждого был только один метод аутентификации. Либо, если сервер был понижен без соблюдения предусмотренной процедуры, проблемных пользователей следует удалить. 2. `no_password` не может сосуществовать с другими методами аутентификации по соображениям безопасности. Поэтому вы можете указать `no_password` только если это единственный метод аутентификации в запросе. - - ## Хост пользователя {#user-host} Хост пользователя — это хост, с которого может быть установлено соединение с сервером ClickHouse. Хост может быть указан в секции запроса `HOST` следующими способами: @@ -192,8 +186,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus ClickHouse рассматривает `user_name@'address'` как имя пользователя целиком. Таким образом, технически вы можете создать несколько пользователей с одинаковым `user_name` и разными конструкциями после `@`. Однако мы не рекомендуем делать это. ::: - - ## Оператор VALID UNTIL {#valid-until-clause} Позволяет задать дату окончания срока действия и, при необходимости, время для метода аутентификации. В качестве параметра принимает строку. Рекомендуется использовать формат `YYYY-MM-DD [hh:mm:ss] [timezone]` для даты и времени. По умолчанию этот параметр имеет значение `'infinity'`. @@ -207,8 +199,6 @@ ClickHouse рассматривает `user_name@'address'` как имя пол - ```CREATE USER name1 VALID UNTIL '2025-01-01 12:00:00 `Asia/Tokyo`'``` - `CREATE USER name1 IDENTIFIED WITH plaintext_password BY 'no_expiration', bcrypt_password BY 'expiration_set' VALID UNTIL '2025-01-01''` - - ## Клауза GRANTEES {#grantees-clause} Указывает пользователей или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#privileges) от этого пользователя при условии, что этому пользователю также выданы все требуемые привилегии с [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax). Варианты клаузы `GRANTEES`: @@ -220,8 +210,6 @@ ClickHouse рассматривает `user_name@'address'` как имя пол Вы можете исключить любого пользователя или роль с помощью выражения `EXCEPT`. Например, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если у `user1` есть некоторые привилегии, выданные с `GRANT OPTION`, он сможет выдавать эти привилегии кому угодно, кроме `user2`. - - ## Примеры {#examples-1} Создайте учетную запись пользователя `mira` с паролем `qwerty`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md index 918fabf1fb8..98b04b975ce 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md @@ -11,13 +11,10 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import DeprecatedBadge from '@theme/badges/DeprecatedBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # CREATE VIEW {#create-view} Создает новое представление. Представления бывают [обычными](#normal-view), [материализованными](#materialized-view), [обновляемыми материализованными](#refreshable-materialized-view) и [оконными](/sql-reference/statements/create/view#window-view). - - ## Обычный вид {#normal-view} Синтаксис: @@ -49,7 +46,6 @@ SELECT a, b, c FROM view SELECT a, b, c FROM (SELECT ...) ``` - ## Параметризованное представление {#parameterized-view} Параметризованные представления похожи на обычные представления, но могут создаваться с параметрами, которые не подставляются (не разрешаются) сразу. Эти представления можно использовать с табличными функциями, где имя представления выступает в роли имени функции, а значения параметров передаются как её аргументы. @@ -64,7 +60,6 @@ CREATE VIEW view AS SELECT * FROM TABLE WHERE Column1={column1:datatype1} and Co SELECT * FROM view(column1=value1, column2=value2 ...) ``` - ## Материализованное представление {#materialized-view} ```sql @@ -119,7 +114,6 @@ AS SELECT ... Чтобы удалить представление, используйте [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Хотя `DROP TABLE` также работает для представлений (VIEW). - ## SQL-безопасность {#sql_security} `DEFINER` и `SQL SECURITY` позволяют указать, под каким пользователем ClickHouse будет выполняться базовый запрос представления. @@ -166,7 +160,6 @@ SQL SECURITY INVOKER AS SELECT ... ``` - ## Live View {#live-view} @@ -175,8 +168,6 @@ AS SELECT ... Для вашего удобства старая документация доступна [здесь](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md) - - ## Обновляемое материализованное представление {#refreshable-materialized-view} ```sql @@ -247,7 +238,6 @@ REFRESH EVERY 1 DAY OFFSET 2 HOUR RANDOMIZE FOR 1 HOUR -- ежедневно в В режиме `APPEND` координацию можно отключить с помощью `SETTINGS all_replicas = 1`. Тогда реплики выполняют обновления независимо друг от друга и ReplicatedMergeTree не требуется. - В режиме, отличном от `APPEND`, поддерживается только координированное обновление. Для некоординированного используйте базу данных `Atomic` и запрос `CREATE ... ON CLUSTER`, чтобы создать обновляемые материализованные представления на всех репликах. Координация осуществляется через Keeper. Путь znode определяется серверным параметром [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path). @@ -320,7 +310,6 @@ ALTER TABLE [db.]имя_таблицы MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE ### Другие операции {#other-operations} - Состояние всех обновляемых материализованных представлений доступно в таблице [`system.view_refreshes`](../../../operations/system-tables/view_refreshes.md). В частности, она содержит прогресс обновления (если оно выполняется), время последнего и следующего обновления, сообщение об исключении, если обновление завершилось с ошибкой. Чтобы вручную остановить, запустить, инициировать или отменить обновление, используйте [`SYSTEM STOP|START|REFRESH|WAIT|CANCEL VIEW`](../system.md#refreshable-materialized-views). @@ -331,8 +320,6 @@ ALTER TABLE [db.]имя_таблицы MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE Интересный факт: запрос обновления может читать из обновляемого представления, видя версию данных до обновления. Это означает, что вы можете реализовать игру «Жизнь» Конвея: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA== ::: - - ## Оконное представление {#window-view} @@ -394,7 +381,6 @@ CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTE Обратите внимание, что элементы, выдаваемые при позднем срабатывании, следует рассматривать как обновлённые результаты предыдущего вычисления. Вместо срабатывания в конце окон представление окна будет срабатывать сразу при поступлении запоздалого события. В результате для одного и того же окна может быть получено несколько выходных результатов. Пользователям необходимо учитывать эти дублирующиеся результаты или удалять дубликаты. - Вы можете изменить `SELECT`-запрос, указанный в оконном представлении, с помощью оператора `ALTER TABLE ... MODIFY QUERY`. Структура данных, формируемая новым `SELECT`-запросом, должна совпадать со структурой исходного `SELECT`-запроса как при использовании предложения `TO [db.]name`, так и без него. Обратите внимание, что данные в текущем окне будут потеряны, поскольку промежуточное состояние не может быть повторно использовано. ### Мониторинг новых окон {#monitoring-new-windows} @@ -465,14 +451,11 @@ Window View полезен в следующих сценариях: * **Мониторинг**: Агрегировать и вычислять метрики логов по времени, выводя результаты в целевую таблицу. Панель мониторинга может использовать целевую таблицу в качестве источника данных. * **Аналитика**: Автоматически агрегировать и предварительно обрабатывать данные во временном окне. Это может быть полезно при анализе больших объемов логов. Предварительная обработка устраняет повторные вычисления в нескольких запросах и снижает задержку их выполнения. - ## Связанные материалы {#related-content} - Блог: [Работа с временными рядами в ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) - Блог: [Построение системы наблюдаемости с помощью ClickHouse — часть 2. Трейсы](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse) - - ## Временные представления {#temporary-views} ClickHouse поддерживает **временные представления** со следующими характеристиками (по возможности аналогичными временным таблицам): diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md index 3f4b5f5ad0a..7b6571bc3dc 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md @@ -16,7 +16,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE Это называется "облегчённым `DELETE`", чтобы противопоставить его команде [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete), которая является тяжеловесным процессом. - ## Примеры {#examples} ```sql @@ -24,7 +23,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE DELETE FROM hits WHERE Title LIKE '%hello%'; ``` - ## Облегчённый `DELETE` не удаляет данные немедленно {#lightweight-delete-does-not-delete-data-immediately} Облегчённый `DELETE` реализован как [мутация](/sql-reference/statements/alter#mutations), которая помечает строки как удалённые, но не удаляет их физически сразу. @@ -35,24 +33,18 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; Если вам необходимо гарантировать удаление данных из хранилища в предсказуемые сроки, рассмотрите использование настройки таблицы [`min_age_to_force_merge_seconds`](/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Либо вы можете использовать команду [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete). Обратите внимание, что удаление данных с помощью `ALTER TABLE ... DELETE` может потреблять значительные ресурсы, так как все затронутые части пересоздаются. - - ## Удаление больших объёмов данных {#deleting-large-amounts-of-data} Массовое удаление данных может негативно сказаться на производительности ClickHouse. Если вы хотите удалить все строки из таблицы, рассмотрите возможность использования команды [`TRUNCATE TABLE`](/sql-reference/statements/truncate). Если вы ожидаете частые операции удаления, рассмотрите возможность использования [пользовательского ключа партиционирования](/engines/table-engines/mergetree-family/custom-partitioning-key). В этом случае вы можете воспользоваться командой [`ALTER TABLE ... DROP PARTITION`](/sql-reference/statements/alter/partition#drop-partitionpart), чтобы быстро удалить все строки, относящиеся к этой партиции. - - ## Ограничения легковесного `DELETE` {#limitations-of-lightweight-delete} ### Легковесные `DELETE` с проекциями {#lightweight-deletes-with-projections} По умолчанию `DELETE` не работает для таблиц с проекциями. Это связано с тем, что строки в проекции могут быть затронуты операцией `DELETE`. Однако существует [настройка MergeTree](/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode`, которая позволяет изменить это поведение. - - ## Особенности производительности при использовании легковесного `DELETE` {#performance-considerations-when-using-lightweight-delete} **Удаление больших объемов данных с помощью легковесного оператора `DELETE` может негативно сказаться на производительности запросов `SELECT`.** @@ -64,8 +56,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; - В затронутой таблице очень большое количество кусков данных (data parts). - Большой объем данных хранится в компактных частях. В компактной части (Compact part) все столбцы хранятся в одном файле. - - ## Права на удаление {#delete-permissions} Для выполнения `DELETE` требуется привилегия `ALTER DELETE`. Чтобы разрешить выполнение операторов `DELETE` для определённой таблицы и пользователя, выполните следующую команду: @@ -74,7 +64,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; GRANT ALTER DELETE ON db.table TO username; ``` - ## Как легковесные операции DELETE работают внутри ClickHouse {#how-lightweight-deletes-work-internally-in-clickhouse} 1. **К затронутым строкам применяется «маска»** @@ -103,8 +92,6 @@ GRANT ALTER DELETE ON db.table TO username; Из описанных выше шагов видно, что легковесный `DELETE`, использующий технику маскирования, повышает производительность по сравнению с традиционным `ALTER TABLE ... DELETE`, поскольку не перезаписывает файлы всех столбцов для затронутых частей. - - ## Связанные материалы {#related-content} - Блог: [Обновление и удаление данных в ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md index 3335442d03f..8dd4be00400 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md @@ -58,7 +58,6 @@ Union ReadFromStorage (SystemNumbers) ``` - ## Типы EXPLAIN {#explain-types} - `AST` — абстрактное синтаксическое дерево. @@ -101,7 +100,6 @@ EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); ExpressionList ``` - ### EXPLAIN SYNTAX {#explain-syntax} Показывает абстрактное синтаксическое дерево (AST) запроса после синтаксического анализа. @@ -146,7 +144,6 @@ ALL INNER JOIN system.numbers AS __table2 ON __table1.number = __table2.number ALL INNER JOIN system.numbers AS __table3 ON __table2.number = __table3.number ``` - ### EXPLAIN QUERY TREE {#explain-query-tree} Настройки: @@ -176,21 +173,21 @@ QUERY id: 0 TABLE id: 3, table_name: default.test_table ``` - ### EXPLAIN PLAN {#explain-plan} -Выводит шаги плана выполнения запроса. +Выводит шаги плана запроса. -Параметры: +Настройки: -* `header` — Выводит заголовок результата для шага. По умолчанию: 0. -* `description` — Выводит описание шага. По умолчанию: 1. -* `indexes` — Показывает используемые индексы, количество отфильтрованных частей и количество отфильтрованных гранул для каждого применённого индекса. По умолчанию: 0. Поддерживается для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Начиная с ClickHouse >= v25.9, этот запрос выдаёт осмысленный результат только при использовании с `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0`. -* `projections` — Показывает все проанализированные проекции и их влияние на фильтрацию на уровне частей на основе условий первичного ключа проекции. Для каждой проекции этот раздел включает статистику, такую как количество частей, строк, меток и диапазонов, которые были проанализированы с использованием первичного ключа проекции. Он также показывает, сколько частей с данными было пропущено благодаря этой фильтрации без чтения самой проекции. По полю `description` можно определить, была ли проекция фактически использована для чтения или только проанализирована для фильтрации. По умолчанию: 0. Поддерживается для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). -* `actions` — Выводит подробную информацию о действиях шага. По умолчанию: 0. -* `json` — Выводит шаги плана запроса как строку в формате [JSON](/interfaces/formats/JSON). По умолчанию: 0. Рекомендуется использовать формат [TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw), чтобы избежать лишнего экранирования. -* `input_headers` — Выводит входные заголовки для шага. По умолчанию: 0. В основном полезно только для разработчиков для отладки проблем, связанных с несоответствием входных и выходных заголовков. -* `column_structure` — Дополнительно выводит структуру столбцов в заголовках, помимо их имени и типа. По умолчанию: 0. В основном полезно только для разработчиков для отладки проблем, связанных с несоответствием входных и выходных заголовков. +* `header` — Печатает заголовок вывода для шага. По умолчанию: 0. +* `description` — Печатает описание шага. По умолчанию: 1. +* `indexes` — Показывает используемые индексы, количество отфильтрованных частей и количество отфильтрованных гранул для каждого применённого индекса. По умолчанию: 0. Поддерживается для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Начиная с ClickHouse >= v25.9, эта команда даёт содержательный вывод только при использовании вместе с `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0`. +* `projections` — Показывает все проанализированные проекции и их влияние на фильтрацию на уровне частей на основе условий по первичному ключу проекции. Для каждой проекции этот раздел включает статистику, такую как количество частей, строк, меток и диапазонов, которые были обработаны с использованием её первичного ключа. Также показывает, сколько частей данных было пропущено благодаря этой фильтрации, без чтения из самой проекции. То, была ли проекция фактически использована для чтения или только проанализирована для фильтрации, можно определить по полю `description`. По умолчанию: 0. Поддерживается для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). +* `actions` — Печатает подробную информацию о действиях шага. По умолчанию: 0. +* `json` — Печатает шаги плана запроса как строку в формате [JSON](/interfaces/formats/JSON). По умолчанию: 0. Рекомендуется использовать формат [TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw), чтобы избежать лишнего экранирования. +* `input_headers` - Печатает входные заголовки для шага. По умолчанию: 0. В основном полезно только разработчикам для отладки проблем, связанных с несоответствием входных и выходных заголовков. +* `column_structure` - Дополнительно печатает структуру столбцов в заголовках, помимо их имени и типа. По умолчанию: 0. В основном полезно только разработчикам для отладки проблем, связанных с несоответствием входных и выходных заголовков. +* `distributed` — Показывает планы запросов, выполняемые на удалённых узлах для distributed таблиц или параллельных реплик. По умолчанию: 0. Когда `json=1`, имена шагов будут содержать дополнительный суффикс с уникальным идентификатором шага. @@ -211,10 +208,10 @@ Union ``` :::note -Оценка стоимости шагов и всего запроса не поддерживается. +Оценка стоимости шагов и запросов не поддерживается. ::: -Когда `json = 1`, план запроса представлен в формате JSON. Каждый узел — это словарь, который всегда содержит ключи `Node Type` и `Plans`. `Node Type` — это строка с именем шага. `Plans` — это массив с описаниями дочерних шагов. В зависимости от типа узла и настроек могут быть добавлены другие необязательные ключи. +Когда `json = 1`, план запроса представляется в формате JSON. Каждый узел представляет собой словарь, который всегда содержит ключи `Node Type` и `Plans`. `Node Type` — строка с именем шага. `Plans` — массив с описаниями дочерних шагов. В зависимости от типа узла и настроек могут быть добавлены дополнительные необязательные ключи. Пример: @@ -264,7 +261,7 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; } ``` -При `header` = 1 к шагу добавляется ключ `Header` в виде массива столбцов. +При значении `header` = 1 ключ `Header` добавляется к шагу в виде массива столбцов. Пример: @@ -401,8 +398,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; ] ``` - -При `actions` = 1 добавленные ключи зависят от типа шага. +Если `actions` = 1, добавляемые ключи зависят от типа шага. Пример: @@ -461,6 +457,50 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; ] ``` +При `distributed` = 1 вывод включает не только локальный план запроса, но и планы запросов, которые будут выполняться на удалённых узлах. Это полезно для анализа и отладки распределённых запросов. + +Пример с distributed таблицей: + +```sql +EXPLAIN distributed=1 SELECT * FROM remote('127.0.0.{1,2}', numbers(2)) WHERE number = 1; +``` + +```sql +Union + Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromSystemNumbers + Expression ((Project names + (Projection + Change column names to column identifiers))) + ReadFromRemote (Чтение с удалённой реплики) + Expression ((Project names + Projection)) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromSystemNumbers +``` + +Пример с параллельными репликами: + +```sql +SET enable_parallel_replicas = 2, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'default'; + +EXPLAIN distributed=1 SELECT sum(number) FROM test_table GROUP BY number % 4; +``` + +```sql +Expression ((Названия проектов + Projection)) + MergingAggregated + Union + Aggregating + Expression ((Перед GROUP BY + Изменить имена столбцов на идентификаторы столбцов)) + ReadFromMergeTree (default.test_table) + ReadFromRemoteParallelReplicas + BlocksMarshalling + Aggregating + Expression ((Перед GROUP BY + Изменить имена столбцов на идентификаторы столбцов)) + ReadFromMergeTree (default.test_table) +``` + +В обоих примерах план запроса показывает полный процесс выполнения, включая локальные и удалённые этапы. + ### EXPLAIN PIPELINE {#explain-pipeline} @@ -494,7 +534,6 @@ ExpressionTransform NumbersRange × 2 0 → 1 ``` - ### EXPLAIN ESTIMATE {#explain-estimate} Показывает приблизительное количество строк, меток и частей, которые нужно прочитать из таблиц при обработке запроса. Работает с таблицами семейства [MergeTree](/engines/table-engines/mergetree-family/mergetree). @@ -523,7 +562,6 @@ EXPLAIN ESTIMATE SELECT * FROM ttt; └──────────┴───────┴───────┴──────┴───────┘ ``` - ### EXPLAIN TABLE OVERRIDE {#explain-table-override} Показывает результат применения переопределения таблицы к схеме таблицы, доступной через табличную функцию. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md index b2c7ef69302..979ec48eb66 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md @@ -9,7 +9,6 @@ doc_type: 'reference' import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Команда GRANT {#grant-statement} - Предоставляет [привилегии](#privileges) учетным записям пользователей ClickHouse или ролям. @@ -30,7 +29,6 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta Предложение `WITH GRANT OPTION` предоставляет `user` или `role` право выполнять запрос `GRANT`. Пользователи могут предоставлять привилегии того же уровня и уже по охвату, чем те, которыми они обладают. Предложение `WITH REPLACE OPTION` заменяет старые привилегии новыми для `user` или `role`; если оно не указано, привилегии добавляются. - ## Синтаксис назначения роли {#assigning-role-syntax} ```sql @@ -43,7 +41,6 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US Предложение `WITH ADMIN OPTION` предоставляет привилегию [ADMIN OPTION](#admin-option) для `user` или `role`. Предложение `WITH REPLACE OPTION` заменяет старые роли новыми для `user` или `role`; если оно не указано, новые роли добавляются к существующим. - ## Синтаксис оператора GRANT CURRENT GRANTS {#grant-current-grants-syntax} ```sql @@ -57,7 +54,6 @@ GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|* Использование оператора `CURRENT GRANTS` позволяет выдать все указанные привилегии заданному пользователю или роли. Если ни одна привилегия не была указана, заданный пользователь или роль получит все доступные привилегии текущего пользователя (`CURRENT_USER`). - ## Использование {#usage} Чтобы использовать `GRANT`, ваша учетная запись должна иметь привилегию `GRANT OPTION`. Вы можете выдавать привилегии только в рамках привилегий вашей учетной записи. @@ -87,7 +83,6 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION Вы можете выдать несколько привилегий нескольким учётным записям в одном запросе. Запрос `GRANT SELECT, INSERT ON *.* TO john, robin` позволяет учётным записям `john` и `robin` выполнять запросы `INSERT` и `SELECT` ко всем таблицам во всех базах данных на сервере. - ## Права с использованием подстановочных символов {#wildcard-grants} При указании привилегий вы можете использовать звёздочку (`*`) вместо имени таблицы или базы данных. Например, запрос `GRANT SELECT ON db.* TO john` позволяет пользователю `john` выполнять запрос `SELECT` для всех таблиц в базе данных `db`. @@ -139,7 +134,6 @@ GRANT SELECT ON *suffix TO john -- некорректно GRANT SELECT(foo) ON db.table* TO john -- некорректно ``` - ## Привилегии {#privileges} Привилегия — это право, предоставляемое пользователю на выполнение определённых видов запросов. @@ -399,7 +393,6 @@ GRANT SELECT(x,y) ON db.table TO john Эта привилегия позволяет `john` выполнять любые запросы `SELECT`, которые обращаются к данным из столбцов `x` и/или `y` таблицы `db.table`, например `SELECT x FROM db.table`. `john` не может выполнять `SELECT z FROM db.table`. Запрос `SELECT * FROM db.table` также недоступен. При выполнении этого запроса ClickHouse не возвращает никаких данных, даже `x` и `y`. Единственное исключение — если таблица содержит только столбцы `x` и `y`, в таком случае ClickHouse возвращает все данные. - ### INSERT {#insert} Позволяет выполнять запросы [INSERT](../../sql-reference/statements/insert-into.md). @@ -418,7 +411,6 @@ GRANT INSERT(x,y) ON db.table TO john Предоставленная привилегия позволяет пользователю `john` вставлять данные в столбцы `x` и/или `y` таблицы `db.table`. - ### ALTER {#alter} Позволяет выполнять запросы [ALTER](../../sql-reference/statements/alter/index.md) в соответствии со следующей иерархией привилегий: @@ -510,7 +502,6 @@ GRANT CLUSTER ON *.* TO ``` - ### DROP {#drop} Позволяет выполнять запросы [DROP](../../sql-reference/statements/drop.md) и [DETACH](../../sql-reference/statements/detach.md) в соответствии со следующей иерархией прав доступа: @@ -737,7 +728,6 @@ GRANT CURRENT GRANTS(READ ON S3) TO alice * **Частичный отзыв прав не допускается:** вы не можете отозвать только часть ранее выданного шаблона фильтра. Необходимо отозвать весь `GRANT` и при необходимости выдать его заново с новыми шаблонами. * **Выдача прав с использованием только `wildcard` не допускается:** вы не можете использовать `GRANT READ ON *('regexp')` или аналогичные шаблоны, состоящие только из `wildcard`. Должен быть указан конкретный источник. - ### dictGet {#dictget} - `dictGet`. Алиасы: `dictHas`, `dictGetHierarchy`, `dictIsIn` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md index f3bacec6ed6..fbb586f3c71 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md @@ -7,8 +7,6 @@ title: 'Оператор INSERT INTO' doc_type: 'reference' --- - - # Оператор INSERT INTO {#insert-into-statement} Вставляет данные в таблицу. @@ -105,13 +103,10 @@ INSERT INTO table SETTINGS ... FORMAT format_name набор_данных ::: - ## Ограничения {#constraints} Если у таблицы есть [ограничения](../../sql-reference/statements/create/table.md#constraints), их выражения проверяются для каждой строки вставляемых данных. Если какое-либо из этих ограничений не удовлетворено, сервер выбросит исключение с именем ограничения и его выражением, а выполнение запроса будет прекращено. - - ## Вставка результатов запроса SELECT {#inserting-the-results-of-select} **Синтаксис** @@ -138,7 +133,6 @@ INSERT INTO x WITH y AS (SELECT * FROM numbers(10)) SELECT * FROM y; WITH y AS (SELECT * FROM numbers(10)) INSERT INTO x SELECT * FROM y; ``` - ## Вставка данных из файла {#inserting-data-from-a-file} **Синтаксис** @@ -197,7 +191,6 @@ INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV; ::: - ## Вставка с использованием табличной функции {#inserting-using-a-table-function} Данные можно вставлять в таблицы, на которые ссылаются [табличные функции](../../sql-reference/table-functions/index.md). @@ -227,7 +220,6 @@ SELECT * FROM simple_table; └─────┴───────────────────────┘ ``` - ## Вставка в ClickHouse Cloud {#inserting-into-clickhouse-cloud} По умолчанию сервисы ClickHouse Cloud предоставляют несколько реплик для обеспечения высокой доступности. При подключении к сервису устанавливается соединение с одной из этих реплик. @@ -242,15 +234,12 @@ SELECT .... SETTINGS select_sequential_consistency = 1; Обратите внимание, что использование `select_sequential_consistency` увеличит нагрузку на ClickHouse Keeper (который используется в ClickHouse Cloud) и может привести к снижению производительности в зависимости от нагрузки на сервис. Мы не рекомендуем включать эту настройку без необходимости. Рекомендуемый подход — выполнять операции чтения и записи в рамках одного сеанса или использовать клиентский драйвер, который работает по нативному протоколу (и, соответственно, поддерживает «липкие» подключения). - ## Вставка в реплицируемую конфигурацию {#inserting-into-a-replicated-setup} В реплицируемой конфигурации данные становятся видимыми на других репликах после того, как они были реплицированы. Репликация данных (загрузка на другие реплики) начинается сразу после выполнения `INSERT`. Это отличается от ClickHouse Cloud, где данные немедленно записываются в общее хранилище, а реплики подписываются на изменения метаданных. Обратите внимание, что в реплицируемых конфигурациях операции `INSERT` иногда могут занимать заметное время (порядка одной секунды), так как требуется фиксация в ClickHouse Keeper для достижения распределённого консенсуса. Использование S3 в качестве хранилища также добавляет дополнительную задержку. - - ## Особенности производительности {#performance-considerations} `INSERT` сортирует входные данные по первичному ключу и разбивает их на партиции по ключу партиционирования. Если вы вставляете данные сразу в несколько партиций, это может значительно снизить производительность запроса `INSERT`. Чтобы этого избежать: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md index 0723e5b1c90..cbda61d11d2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md @@ -7,14 +7,10 @@ title: 'Предложение PARALLEL WITH' doc_type: 'reference' --- - - # Предложение PARALLEL WITH {#parallel-with-clause} Позволяет выполнять несколько операторов параллельно. - - ## Синтаксис {#syntax} ```sql @@ -25,7 +21,6 @@ doc_type: 'reference' Параллельное выполнение операторов во многих случаях может быть быстрее, чем их последовательный запуск. Например, `statement1 PARALLEL WITH statement2 PARALLEL WITH statement3` скорее всего будет выполнено быстрее, чем `statement1; statement2; statement3`. - ## Примеры {#examples} Создаёт две таблицы одновременно: @@ -44,13 +39,10 @@ PARALLEL WITH DROP TABLE table2; ``` - ## Настройки {#settings} Параметр [max_threads](../../operations/settings/settings.md#max_threads) определяет, сколько потоков будет создано. - - ## Сравнение с UNION {#comparison-with-union} Конструкция `PARALLEL WITH` немного похожа на [UNION](select/union.md), который также выполняет свои операнды параллельно. Однако есть некоторые отличия: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md index 0036a315417..71987faf2e6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md @@ -7,14 +7,10 @@ title: 'Оператор REVOKE' doc_type: 'reference' --- - - # Оператор REVOKE {#revoke-statement} Отзывает привилегии у пользователей или ролей. - - ## Синтаксис {#syntax} **Отмена привилегий для пользователей** @@ -29,7 +25,6 @@ REVOKE [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.t OTMENITЬ [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] ``` - ## Описание {#description} Чтобы отозвать какую‑либо привилегию, вы можете использовать привилегию более широкого уровня, чем та, которую планируете отозвать. Например, если у пользователя есть привилегия `SELECT (x,y)`, администратор может выполнить запрос `REVOKE SELECT(x,y) ...`, или `REVOKE SELECT * ...`, или даже `REVOKE ALL PRIVILEGES ...`, чтобы отозвать эту привилегию. @@ -38,8 +33,6 @@ OTMENITЬ [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | Вы можете отозвать часть привилегии. Например, если у пользователя есть привилегия `SELECT *.*`, вы можете отозвать у него привилегию на чтение данных из некоторой таблицы или базы данных. - - ## Примеры {#examples} Предоставьте учётной записи пользователя `john` привилегию SELECT для всех баз данных, кроме базы данных `accounts`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md index 2fc449d0a76..7db0ef78642 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md @@ -7,21 +7,16 @@ keywords: ['APPLY', 'modifier'] doc_type: 'reference' --- - - # Модификатор APPLY {#apply} > Позволяет выполнить функцию для каждой строки, возвращаемой внешним табличным выражением запроса. - - ## Синтаксис {#syntax} ```sql SELECT APPLY( ) FROM [db.]table_name ``` - ## Пример {#example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md index 2d6302b1f83..8c15aab6216 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md @@ -6,8 +6,6 @@ title: 'Оператор ARRAY JOIN' doc_type: 'reference' --- - - # Оператор ARRAY JOIN {#array-join-clause} Для таблиц, содержащих столбец-массив, часто требуется получить новую таблицу, в которой для каждого отдельного элемента массива исходного столбца создаётся отдельная строка, а значения остальных столбцов дублируются. Это базовый случай работы оператора `ARRAY JOIN`. @@ -29,7 +27,6 @@ FROM * `ARRAY JOIN` — по умолчанию пустые массивы не включаются в результат `JOIN`. * `LEFT ARRAY JOIN` — результат `JOIN` содержит строки с пустыми массивами. Значение для пустого массива устанавливается в значение по умолчанию для типа элемента массива (обычно 0, пустая строка или NULL). - ## Базовые примеры ARRAY JOIN {#basic-array-join-examples} ### ARRAY JOIN и LEFT ARRAY JOIN {#array-join-left-array-join-examples} @@ -151,7 +148,6 @@ ORDER BY Reaches DESC LIMIT 10 ``` - ```text ┌──GoalID─┬─Reaches─┬─Visits─┐ │ 53225 │ 3214 │ 1097 │ @@ -167,7 +163,6 @@ LIMIT 10 └─────────┴─────────┴────────┘ ``` - ## Использование псевдонимов {#using-aliases} Для массива можно задать псевдоним в предложении `ARRAY JOIN`. В этом случае к элементу массива можно обратиться по этому псевдониму, но сам массив по‑прежнему доступен по исходному имени. Пример: @@ -254,7 +249,6 @@ FROM arrays_test ARRAY JOIN arr AS a, [['a','b'],['c']] AS b SETTINGS enable_unaligned_array_join = 1; ``` - ```response ┌─s───────┬─arr─────┬─a─┬─b─────────┐ │ Привет │ [1,2] │ 1 │ ['a','b'] │ @@ -267,7 +261,6 @@ SETTINGS enable_unaligned_array_join = 1; └─────────┴─────────┴───┴───────────┘ ``` - ## ARRAY JOIN с вложенной структурой данных {#array-join-with-nested-data-structure} `ARRAY JOIN` также работает с [вложенными структурами данных](../../../sql-reference/data-types/nested-data-structures/index.md): @@ -371,7 +364,6 @@ FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; ``` - ```response ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ @@ -382,7 +374,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; └───────┴─────┴─────┴─────────┴────────────┴─────┘ ``` - ## Подробности реализации {#implementation-details} Порядок выполнения запроса оптимизируется при использовании `ARRAY JOIN`. Хотя `ARRAY JOIN` всегда должен указываться в запросе перед секцией [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md), технически они могут выполняться в любом порядке, если только результат `ARRAY JOIN` не используется для фильтрации. Порядок обработки контролируется оптимизатором запросов. @@ -393,8 +384,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; `arrayJoin` всегда выполняется и не поддерживает вычисление функций с коротким замыканием. Это связано с тем, что это особая функция, которая обрабатывается отдельно от всех прочих функций при анализе и выполнении запроса и требует дополнительной логики, несовместимой с коротким замыканием при вычислении. Причина в том, что количество строк в результате зависит от результата `arrayJoin`, и реализация отложенного (lazy) выполнения `arrayJoin` была бы слишком сложной и ресурсоёмкой. - - ## Связанные материалы {#related-content} - Блог: [Работа с временными рядами в ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md index 372d8d19d47..4d0c570bb79 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md @@ -6,8 +6,6 @@ title: 'Оператор DISTINCT' doc_type: 'reference' --- - - # Оператор DISTINCT {#distinct-clause} Если указан `SELECT DISTINCT`, в результате запроса останутся только уникальные строки. Таким образом, для каждого набора полностью совпадающих строк в результате останется только одна строка. @@ -56,7 +54,6 @@ SELECT DISTINCT ON (a,b) * FROM t1; └───┴───┴───┘ ``` - ## DISTINCT и ORDER BY {#distinct-and-order-by} ClickHouse поддерживает использование конструкций `DISTINCT` и `ORDER BY` для разных столбцов в одном запросе. Конструкция `DISTINCT` выполняется раньше, чем `ORDER BY`. @@ -104,13 +101,10 @@ SELECT DISTINCT a FROM t1 ORDER BY b DESC; При написании запросов учитывайте эту особенность реализации. - ## Обработка NULL {#null-processing} `DISTINCT` работает с [NULL](/sql-reference/syntax#null) так, как будто `NULL` является обычным конкретным значением и при этом `NULL == NULL`. Другими словами, в результатах `DISTINCT` каждая комбинация с `NULL` появляется только один раз. Это отличается от обработки `NULL` в большинстве других контекстов. - - ## Альтернативы {#alternatives} Тот же результат можно получить, применяя [GROUP BY](/sql-reference/statements/select/group-by) к тому же набору значений, который указан в предложении `SELECT`, без использования агрегатных функций. Но есть несколько отличий по сравнению с подходом, основанным на `GROUP BY`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md index cf6cef55f97..04e5edad0e2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md @@ -7,21 +7,16 @@ keywords: ['EXCEPT', 'modifier'] doc_type: 'reference' --- - - # Модификатор EXCEPT {#except} > Указывает имена одного или нескольких столбцов, которые следует исключить из результата. Все столбцы с такими именами исключаются из вывода. - - ## Синтаксис {#syntax} ```sql SELECT EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name ``` - ## Примеры {#examples} ```sql title="Query" diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md index 962651ab5bc..0d0124def0f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md @@ -6,22 +6,16 @@ title: 'Клауза FORMAT' doc_type: 'reference' --- - - # Оператор FORMAT {#format-clause} ClickHouse поддерживает широкий спектр [форматов сериализации](../../../interfaces/formats.md), которые, среди прочего, могут использоваться для результатов запросов. Существует несколько способов выбрать формат для вывода результата `SELECT`, один из них — указать `FORMAT format` в конце запроса, чтобы получить данные в нужном формате. Определённый формат может использоваться для удобства, интеграции с другими системами или повышения производительности. - - ## Формат по умолчанию {#default-format} Если предложение `FORMAT` не указано, используется формат по умолчанию, который зависит как от настроек, так и от интерфейса, используемого для доступа к серверу ClickHouse. Для [HTTP-интерфейса](../../../interfaces/http.md) и [клиента командной строки](../../../interfaces/cli.md) в пакетном режиме форматом по умолчанию является `TabSeparated`. Для клиента командной строки в интерактивном режиме форматом по умолчанию является `PrettyCompact` (он выводит компактные, удобочитаемые таблицы). - - ## Подробности реализации {#implementation-details} При использовании клиентского приложения командной строки данные всегда передаются по сети во внутреннем эффективном формате (`Native`). Клиент самостоятельно интерпретирует оператор `FORMAT` в запросе и сам форматирует данные (тем самым разгружая сеть и сервер от дополнительной нагрузки). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md index 5ff87e15dcd..d692742042b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md @@ -6,8 +6,6 @@ title: 'Предложение FROM' doc_type: 'reference' --- - - # Секция FROM {#from-clause} Секция `FROM` задаёт источник, из которого читаются данные: @@ -29,7 +27,6 @@ FROM table SELECT * ``` - ## Модификатор FINAL {#final-modifier} Когда указан `FINAL`, ClickHouse полностью объединяет данные перед возвратом результата. При этом также выполняются все преобразования данных, которые происходят во время слияний для данного движка таблицы. @@ -78,7 +75,6 @@ SET final = 1; SELECT x, y FROM mytable WHERE x > 1; ``` - ## Подробности реализации {#implementation-details} Если предложение `FROM` опущено, данные будут прочитаны из таблицы `system.one`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md index e9023c78a3c..0e0d7abf207 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md @@ -6,8 +6,6 @@ title: 'Оператор GROUP BY' doc_type: 'reference' --- - - # Оператор GROUP BY {#group-by-clause} Оператор `GROUP BY` переводит запрос `SELECT` в режим агрегации, который работает следующим образом: @@ -22,8 +20,6 @@ doc_type: 'reference' Существует дополнительный способ выполнения агрегации над таблицей. Если в запросе столбцы таблицы встречаются только внутри агрегатных функций, оператор `GROUP BY` можно опустить, и будет подразумеваться агрегация по пустому набору ключей. Такие запросы всегда возвращают ровно одну строку. ::: - - ## Обработка NULL {#null-processing} При группировке ClickHouse интерпретирует [NULL](/sql-reference/syntax#null) как значение, и `NULL == NULL`. Это отличается от обработки `NULL` в большинстве других контекстов. @@ -56,7 +52,6 @@ doc_type: 'reference' Если указать в `GROUP BY` несколько ключей, в результате вы получите все комбинации выборки, как если бы `NULL` рассматривался как конкретное значение. - ## Модификатор ROLLUP {#rollup-modifier} Модификатор `ROLLUP` используется для вычисления промежуточных итогов для ключевых выражений в соответствии с их порядком в списке `GROUP BY`. Строки с промежуточными итогами добавляются после результирующей таблицы. @@ -130,7 +125,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP; * Параметр [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) для обеспечения совместимости со стандартом SQL. - ## Модификатор CUBE {#cube-modifier} Модификатор `CUBE` используется для вычисления промежуточных итогов для каждой комбинации ключевых выражений в списке `GROUP BY`. Строки с промежуточными итогами добавляются после результирующей таблицы. @@ -175,7 +169,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY CUBE(year, month, day); Столбцы, не включённые в `GROUP BY`, заполняются нулями. - ```text ┌─year─┬─month─┬─day─┬─count()─┐ │ 2020 │ 10 │ 15 │ 1 │ @@ -229,7 +222,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; * Настройка [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) для совместимости со стандартом SQL. - ## Модификатор WITH TOTALS {#with-totals-modifier} Если указан модификатор `WITH TOTALS`, будет вычислена дополнительная строка. В этой строке ключевые столбцы будут содержать значения по умолчанию (нули или пустые строки), а столбцы с агрегирующими функциями — значения, вычисленные по всем строкам (итоговые значения). @@ -266,8 +258,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; Вы можете использовать `WITH TOTALS` во вложенных подзапросах, включая подзапросы в предложении [JOIN](/sql-reference/statements/select/join.md) (в этом случае соответствующие итоговые значения объединяются). - - ## GROUP BY ALL {#group-by-all} `GROUP BY ALL` эквивалентен перечислению в предложении SELECT всех выражений, которые не являются агрегатными функциями. @@ -316,7 +306,6 @@ FROM t GROUP BY substring(a, 4, 2), substring(a, 1, 2) ``` - ## Примеры {#examples} Пример: @@ -344,7 +333,6 @@ GROUP BY domain Для каждого различного значения ключа оператор `GROUP BY` вычисляет набор значений агрегатных функций. - ## Модификатор GROUPING SETS {#grouping-sets-modifier} Это самый общий модификатор. @@ -382,7 +370,6 @@ GROUPING SETS * настройку [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) для обеспечения совместимости со стандартом SQL. - ## Подробности реализации {#implementation-details} Агрегация — одна из важнейших функций колоночной СУБД, и, следовательно, её реализация является одной из наиболее оптимизированных частей ClickHouse. По умолчанию агрегация выполняется в памяти с использованием хеш-таблицы. Для неё существует более 40 специализаций, которые выбираются автоматически в зависимости от типов данных «ключа группировки». diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md index 768c4c0caca..c9bbb092f21 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md @@ -7,8 +7,6 @@ keywords: ['INNER JOIN', 'LEFT JOIN', 'LEFT OUTER JOIN', 'RIGHT JOIN', 'RIGHT OU doc_type: 'справочник' --- - - # Оператор JOIN {#join-clause} Оператор `JOIN` формирует новую таблицу, объединяя столбцы из одной или нескольких таблиц по общим для них значениям. Это распространённая операция в базах данных с поддержкой SQL, которая соответствует операции соединения в [реляционной алгебре](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Особый случай соединения таблицы с самой собой часто называют «self-join». @@ -24,7 +22,6 @@ FROM Выражения из предложения `ON` и столбцы из предложения `USING` называются «ключами соединения». Если не указано иное, оператор `JOIN` формирует [декартово произведение](https://en.wikipedia.org/wiki/Cartesian_product) строк с совпадающими «ключами соединения», что может приводить к получению результата с гораздо большим количеством строк, чем в исходных таблицах. - ## Поддерживаемые типы JOIN {#supported-types-of-join} Поддерживаются все стандартные типы [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)): @@ -55,8 +52,6 @@ FROM Когда [join_algorithm](../../../operations/settings/settings.md#join_algorithm) установлен в значение `partial_merge`, `RIGHT JOIN` и `FULL JOIN` поддерживаются только со строгостью `ALL` (`SEMI`, `ANTI`, `ANY` и `ASOF` не поддерживаются). ::: - - ## Настройки {#settings} Тип соединения по умолчанию можно переопределить с помощью настройки [`join_default_strictness`](../../../operations/settings/settings.md#join_default_strictness). @@ -74,8 +69,6 @@ FROM Используйте настройку `cross_to_inner_join_rewrite`, чтобы задать поведение на случай, если ClickHouse не может переписать `CROSS JOIN` в `INNER JOIN`. Значение по умолчанию — `1`, при котором соединение продолжает выполняться, но будет работать медленнее. Установите `cross_to_inner_join_rewrite` в `0`, если вы хотите, чтобы генерировалась ошибка, и в `2` — чтобы не выполнять операции `CROSS JOIN`, а вместо этого принудительно переписывать все соединения через запятую/`CROSS JOIN`. Если при значении `2` переписать не удаётся, вы получите сообщение об ошибке: «Please, try to simplify `WHERE` section». - - ## Условия в секции ON {#on-section-conditions} Секция `ON` может содержать несколько условий, объединённых операторами `AND` и `OR`. Условия, определяющие ключи соединения, должны: @@ -167,7 +160,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key; :::note - По умолчанию условия с операторами неравенства поддерживаются, если в них используются столбцы из одной и той же таблицы. Например, `t1.a = t2.key AND t1.b > 0 AND t2.b > t2.c`, поскольку `t1.b > 0` использует столбцы только из `t1`, а `t2.b > t2.c` использует столбцы только из `t2`. Однако вы можете включить экспериментальную поддержку условий вида `t1.a = t2.key AND t1.b > t2.key`; подробности см. в разделе ниже. @@ -188,7 +180,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key AND t2. └───┴────┴─────┘ ``` - ## JOIN с условиями неравенства для столбцов из разных таблиц {#join-with-inequality-conditions-for-columns-from-different-tables} ClickHouse в настоящее время поддерживает `ALL/ANY/SEMI/ANTI INNER/LEFT/RIGHT/FULL JOIN` с условиями неравенства в дополнение к условиям равенства. Условия неравенства поддерживаются только для алгоритмов соединения `hash` и `grace_hash`. Условия неравенства не поддерживаются при `join_use_nulls`. @@ -239,7 +230,6 @@ key2 a2 1 1 1 0 0 \N key4 f 2 3 4 0 0 \N ``` - ## Значения NULL в ключах JOIN {#null-values-in-join-keys} `NULL` не равно ни одному значению, включая само себя. Это означает, что если ключ `JOIN` содержит значение `NULL` в одной таблице, оно не будет соответствовать значению `NULL` в другой таблице. @@ -294,7 +284,6 @@ SELECT A.name, B.score FROM A LEFT JOIN B ON isNotDistinctFrom(A.id, B.id) └─────────┴───────┘ ``` - ## Использование ASOF JOIN {#asof-join-usage} `ASOF JOIN` полезен, когда нужно соединить записи, для которых нет точного совпадения. @@ -349,7 +338,6 @@ USING (столбец_равенства1, ... столбец_равенства Он **не** поддерживается в табличном движке [Join](../../../engines/table-engines/special/join.md). ::: - ## Использование PASTE JOIN {#paste-join-usage} Результат `PASTE JOIN` — таблица, содержащая все столбцы из левого подзапроса, за которыми следуют все столбцы из правого подзапроса. @@ -408,7 +396,6 @@ SETTINGS max_block_size = 2; └───┴──────┘ ``` - ## Распределённый JOIN {#distributed-join} Существует два способа выполнить JOIN с участием распределённых таблиц: @@ -418,8 +405,6 @@ SETTINGS max_block_size = 2; Будьте осторожны при использовании `GLOBAL`. Дополнительную информацию см. в разделе [Распределённые подзапросы](/sql-reference/operators/in#distributed-subqueries). - - ## Неявное преобразование типов {#implicit-type-conversion} Запросы `INNER JOIN`, `LEFT JOIN`, `RIGHT JOIN` и `FULL JOIN` поддерживают неявное преобразование типов для «ключей соединения». Однако запрос не может быть выполнен, если ключи соединения из левой и правой таблиц не могут быть приведены к одному типу (например, не существует типа данных, который может содержать все значения как из `UInt64`, так и из `Int64`, или из `String` и `Int32`). @@ -462,7 +447,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); └────┴──────┴───────────────┴─────────────────┘ ``` - ## Рекомендации по использованию {#usage-recommendations} ### Обработка пустых или NULL-ячеек {#processing-of-empty-or-null-cells} @@ -510,8 +494,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); Когда достигается любой из этих лимитов, ClickHouse действует в соответствии с настройкой [join_overflow_mode](/operations/settings/settings#join_overflow_mode). - - ## Примеры {#examples} Пример: @@ -555,7 +537,6 @@ LIMIT 10 └───────────┴────────┴────────┘ ``` - ## Связанные материалы {#related-content} - Блог: [ClickHouse: молниеносно быстрая СУБД с полной поддержкой операторов SQL JOIN — часть 1](https://clickhouse.com/blog/clickhouse-fully-supports-joins) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md index d1d5f9c82c1..5c758b1bcf6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md @@ -42,7 +42,6 @@ LIMIT n, m В обоих вариантах `n` и `m` должны быть неотрицательными целыми числами. - ## Отрицательные ограничения {#negative-limits} Выбирайте строки с *конца* набора результатов, используя отрицательные значения: @@ -81,7 +80,6 @@ LIMIT 10 OFFSET 0.5 -- 10 строк, начиная с половины LIMIT 10 OFFSET -20 -- 10 строк после пропуска последних 20 строк ``` - ## LIMIT ... WITH TIES {#limit--with-ties-modifier} Модификатор `WITH TIES` включает дополнительные строки, имеющие те же значения `ORDER BY`, что и последняя строка в установленном лимите. @@ -129,7 +127,6 @@ SELECT * FROM ( Этот модификатор можно комбинировать с модификатором [`ORDER BY ... WITH FILL`](/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier). - ## Особенности {#considerations} **Недетерминированные результаты:** Без предложения [`ORDER BY`](../../../sql-reference/statements/select/order-by.md) возвращаемые строки могут быть произвольными и отличаться от выполнения к выполнению одного и того же запроса. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md index f21502612a1..e52a067bd23 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md @@ -6,8 +6,6 @@ title: 'Оператор ORDER BY' doc_type: 'reference' --- - - # Оператор ORDER BY {#order-by-clause} Оператор `ORDER BY` содержит: @@ -27,8 +25,6 @@ doc_type: 'reference' Строки с одинаковыми значениями сортировочных выражений возвращаются в произвольном и недетерминированном порядке. Если оператор `ORDER BY` опущен в операторе `SELECT`, порядок строк также является произвольным и недетерминированным. - - ## Сортировка специальных значений {#sorting-of-special-values} Существует два варианта порядка сортировки значений `NaN` и `NULL`: @@ -74,7 +70,6 @@ doc_type: 'reference' При сортировке чисел с плавающей запятой значения NaN отделяются от остальных. Независимо от порядка сортировки значения NaN всегда оказываются в конце. Другими словами, при сортировке по возрастанию они ведут себя так, как будто больше всех остальных чисел, а при сортировке по убыванию — так, как будто меньше всех остальных. - ## Поддержка collation {#collation-support} Для сортировки по значениям типа [String](../../../sql-reference/data-types/string.md) вы можете указать collation (правила сравнения). Пример: `ORDER BY SearchPhrase COLLATE 'tr'` — сортировка по ключевому слову по возрастанию с использованием турецкого алфавита, без учета регистра, при условии, что строки закодированы в UTF-8. `COLLATE` может быть указан или не указан для каждого выражения в ORDER BY независимо. Если указано `ASC` или `DESC`, то `COLLATE` указывается после него. При использовании `COLLATE` сортировка всегда выполняется без учета регистра. @@ -83,8 +78,6 @@ Collation поддерживается для типов [LowCardinality](../../ Мы рекомендуем использовать `COLLATE` только для окончательной сортировки небольшого количества строк, так как сортировка с `COLLATE` менее эффективна, чем обычная сортировка по байтам. - - ## Примеры сравнения строк {#collation-examples} Пример только со значениями [String](../../../sql-reference/data-types/string.md): @@ -229,7 +222,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; Пример с типом [Tuple](../../../sql-reference/data-types/tuple.md): - ```response ┌─x─┬─s───────┐ │ 1 │ (1,'Z') │ @@ -262,7 +254,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; └───┴─────────┘ ``` - ## Детали реализации {#implementation-details} ОЗУ расходуется меньше, если помимо `ORDER BY` указано достаточно маленькое значение [LIMIT](../../../sql-reference/statements/select/limit.md). В противном случае объём используемой памяти пропорционален объёму данных для сортировки. При распределённой обработке запросов, если [GROUP BY](/sql-reference/statements/select/group-by) опущен, сортировка частично выполняется на удалённых серверах, а результаты объединяются на сервере, инициировавшем запрос. Это означает, что при распределённой сортировке объём данных для сортировки может превышать объём памяти одного сервера. @@ -273,8 +264,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; Внешняя сортировка работает значительно менее эффективно, чем сортировка в ОЗУ. - - ## Оптимизация чтения данных {#optimization-of-data-reading} Если выражение `ORDER BY` имеет префикс, который совпадает с ключом сортировки таблицы, вы можете оптимизировать запрос с помощью настройки [optimize_read_in_order](../../../operations/settings/settings.md#optimize_read_in_order). @@ -295,8 +284,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; В таблицах с движком `MaterializedView` оптимизация работает с представлениями вида `SELECT ... FROM merge_tree_table ORDER BY pk`. Однако она не поддерживается в запросах вида `SELECT ... FROM view ORDER BY pk`, если запрос представления не содержит оператора `ORDER BY`. - - ## Модификатор ORDER BY Expr WITH FILL {#order-by-expr-with-fill-modifier} Этот модификатор также может быть использован совместно с [модификатором LIMIT ... WITH TIES](/sql-reference/statements/select/limit#limit--with-ties-modifier). @@ -385,7 +372,6 @@ ORDER BY Результат: - ```text ┌───d1───────┬───d2───────┬─source───┐ │ 1970-01-11 │ 1970-01-02 │ оригинал │ @@ -448,7 +434,6 @@ ORDER BY d2 WITH FILL; ``` - Результат: ```response @@ -615,7 +600,6 @@ SELECT n, source, inter FROM ( Результат: - ```text ┌───n─┬─source───┬─inter─┐ │ 0 │ │ 0 │ @@ -634,7 +618,6 @@ SELECT n, source, inter FROM ( └─────┴──────────┴───────┘ ``` - ## Заполнение, сгруппированное по сортировочному префиксу {#filling-grouped-by-sorting-prefix} Иногда полезно заполнять строки, которые имеют одинаковые значения в определённых столбцах, независимо друг от друга — хороший пример — заполнение пропущенных значений во временных рядах. @@ -687,7 +670,6 @@ INTERPOLATE ( value AS 9999 ) Здесь столбец `value` был заполнен значением `9999`, чтобы заполненные строки были более заметны. Это поведение управляется параметром `use_with_fill_by_sorting_prefix` (включен по умолчанию). - ## Связанные материалы {#related-content} - Блог: [Работа с временными рядами в ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md index eea6cc1366b..6aff1479961 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md @@ -6,16 +6,12 @@ title: 'Предложение PREWHERE' doc_type: 'reference' --- - - # Оператор PREWHERE {#prewhere-clause} Prewhere — это оптимизация, позволяющая применять фильтрацию более эффективно. Она включена по умолчанию, даже если оператор `PREWHERE` явно не указан. Оптимизация работает за счёт автоматического переноса части условия [WHERE](../../../sql-reference/statements/select/where.md) на этап PREWHERE. Роль оператора `PREWHERE` состоит только в управлении этой оптимизацией, если вы считаете, что можете настроить её лучше, чем это делается по умолчанию. С оптимизацией PREWHERE сначала считываются только те столбцы, которые необходимы для вычисления выражения PREWHERE. Затем считываются остальные столбцы, которые нужны для выполнения оставшейся части запроса, но только для тех блоков, где выражение PREWHERE равно `true` хотя бы для некоторых строк. Если есть много блоков, где выражение PREWHERE равно `false` для всех строк, и PREWHERE требует меньше столбцов, чем другие части запроса, это часто позволяет считать с диска значительно меньше данных при выполнении запроса. - - ## Ручное управление PREWHERE {#controlling-prewhere-manually} Эта конструкция имеет то же значение, что и предложение `WHERE`. Разница заключается в том, какие данные читаются из таблицы. Ручное управление `PREWHERE` целесообразно, когда условия фильтрации используются лишь для небольшой части столбцов в запросе, но обеспечивают сильную фильтрацию данных. Это уменьшает объем читаемых данных. @@ -30,14 +26,10 @@ Prewhere — это оптимизация, позволяющая примен Часть запроса `PREWHERE` выполняется до `FINAL`, поэтому результаты запросов `FROM ... FINAL` могут быть искажены при использовании `PREWHERE` с полями, которых нет в секции `ORDER BY` таблицы. ::: - - ## Ограничения {#limitations} `PREWHERE` можно использовать только с таблицами из семейства [*MergeTree](../../../engines/table-engines/mergetree-family/index.md). - - ## Пример {#example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md index 387875cf15e..e160b40135d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md @@ -6,22 +6,16 @@ title: 'Предложение QUALIFY' doc_type: 'reference' --- - - # Оператор QUALIFY {#qualify-clause} Позволяет фильтровать результаты оконных функций. Аналогичен предложению [WHERE](../../../sql-reference/statements/select/where.md), но отличие в том, что `WHERE` применяется до вычисления оконных функций, тогда как `QUALIFY` — после. В `QUALIFY` можно по псевдониму ссылаться на результаты оконных функций из предложения `SELECT`. Либо предложение `QUALIFY` может фильтровать по результатам дополнительных оконных функций, которые не возвращаются в результатах запроса. - - ## Ограничения {#limitations} `QUALIFY` нельзя использовать, если в запросе нет оконных функций. Используйте вместо него `WHERE`. - - ## Примеры {#examples} Пример: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md index 021d35ea25d..339832c8eb6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md @@ -7,8 +7,6 @@ doc_type: 'reference' keywords: ['WHERE'] --- - - # Условие WHERE {#where-clause} Условие `WHERE` позволяет отфильтровать данные, полученные из предложения [`FROM`](../../../sql-reference/statements/select/from.md) запроса `SELECT`. @@ -26,8 +24,6 @@ PREWHERE — это оптимизация для более эффективн Она включена по умолчанию, даже если конструкция `PREWHERE` явно не указана. ::: - - ## Проверка на `NULL` {#testing-for-null} Если вам нужно проверить значение на [`NULL`](/sql-reference/syntax#null), используйте: @@ -36,8 +32,6 @@ PREWHERE — это оптимизация для более эффективн В противном случае выражение с `NULL` никогда не будет истинным. - - ## Фильтрация данных с помощью логических операторов {#filtering-data-with-logical-operators} Вы можете использовать следующие [логические функции](/sql-reference/functions/logical-functions#and) в сочетании с предложением `WHERE` для объединения нескольких условий: @@ -47,15 +41,11 @@ PREWHERE — это оптимизация для более эффективн - [`or()`](/sql-reference/functions/logical-functions#or) или `NOT` - [`xor()`](/sql-reference/functions/logical-functions#xor) - - ## Использование столбцов UInt8 в качестве условия {#using-uint8-columns-as-a-condition} В ClickHouse столбцы `UInt8` могут напрямую использоваться в булевых условиях, где `0` — это `false`, а любое ненулевое значение (обычно `1`) — `true`. Пример этого приведён в разделе [ниже](#example-uint8-column-as-condition). - - ## Использование операторов сравнения {#using-comparison-operators} Можно использовать следующие [операторы сравнения](/sql-reference/operators#comparison-operators): @@ -76,8 +66,6 @@ PREWHERE — это оптимизация для более эффективн | `a BETWEEN b AND c` | `a >= b AND a <= c` | Проверка вхождения в диапазон (включительно) | `price BETWEEN 100 AND 500` | | `a NOT BETWEEN b AND c` | `a < b OR a > c` | Проверка выхода за пределы диапазона | `price NOT BETWEEN 100 AND 500` | - - ## Сопоставление по шаблону и условные выражения {#pattern-matching-and-conditional-expressions} Помимо операторов сравнения, в предложении `WHERE` можно использовать сопоставление по шаблону и условные выражения. @@ -92,8 +80,6 @@ PREWHERE — это оптимизация для более эффективн См. раздел ["Сопоставление по шаблону и условные выражения"](#examples-pattern-matching-and-conditional-expressions) с примерами использования. - - ## Выражение с литералами, столбцами или подзапросами {#expressions-with-literals-columns-subqueries} Выражение после оператора `WHERE` также может включать [литералы](/sql-reference/syntax#literals), столбцы или подзапросы — вложенные операторы `SELECT`, которые возвращают значения, используемые в условиях. @@ -119,7 +105,6 @@ WHERE category = 'Electronics' AND id IN (SELECT product_id FROM bestsellers) ``` - -- Все три условия с логическими операторами WHERE (price > 100 OR category IN (SELECT category FROM featured)) AND in_stock = true @@ -240,7 +225,6 @@ WHERE (category = 'Электроника' OR category = 'Мебель') AND price < 400; ``` - ```response ┌─id─┬─name────┬─price─┬─category────┬─in_stock─┐ 1. │ 2 │ Mouse │ 25.5 │ Электроника │ true │ @@ -366,7 +350,6 @@ WHERE category = 'Электроника' AND in_stock = true; #### Примеры LIKE {#like-examples} - ```sql -- Найти продукты с буквой 'o' в названии SELECT * FROM products WHERE name LIKE '%o%'; diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md index a79f48c09e3..a285e6f1e8e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md @@ -17,8 +17,6 @@ doc_type: 'reference' Кроме того, у пользователя должна быть привилегия [`displaySecretsInShowAndSelect`](grant.md/#displaysecretsinshowandselect). ::: - - ## SHOW CREATE TABLE | DICTIONARY | VIEW | DATABASE {#show-create-table--dictionary--view--database} Эти операторы возвращают один столбец типа String, @@ -36,7 +34,6 @@ SHOW [CREATE] TABLE | TEMPORARY TABLE | DICTIONARY | VIEW | DATABASE [db.]table| и не может быть использован для создания таблицы. ::: - ## SHOW DATABASES {#show-databases} Эта команда выводит список всех баз данных. @@ -111,7 +108,6 @@ SHOW DATABASES LIMIT 2 * [`CREATE DATABASE`](/sql-reference/statements/create/database) - ## SHOW TABLES {#show-tables} Оператор `SHOW TABLES` отображает список таблиц. @@ -190,7 +186,6 @@ SHOW TABLES FROM system LIMIT 2 * [`CREATE TABLE`](/sql-reference/statements/create/table) * [`SHOW CREATE TABLE`](#show-create-table--dictionary--view--database) - ## SHOW COLUMNS {#show_columns} Оператор `SHOW COLUMNS` отображает список столбцов. @@ -243,7 +238,6 @@ SHOW COLUMNS FROM 'orders' LIKE 'delivery_%' * [`system.columns`](../../operations/system-tables/columns.md) - ## SHOW DICTIONARIES {#show-dictionaries} Оператор `SHOW DICTIONARIES` отображает список [словарей](../../sql-reference/dictionaries/index.md). @@ -277,7 +271,6 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` - ## SHOW INDEX {#show-index} Отображает список первичных и индексов пропуска данных таблицы. @@ -325,7 +318,6 @@ SHOW [EXTENDED] {INDEX | INDEXES | INDICES | KEYS } {FROM | IN}
[{FROM | SHOW INDEX FROM 'tbl' ``` - ```text title="Response" ┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐ │ tbl │ 1 │ blf_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ BLOOM_FILTER │ │ │ YES │ d, b │ @@ -342,7 +334,6 @@ SHOW INDEX FROM 'tbl' * [`system.tables`](../../operations/system-tables/tables.md) * [`system.data_skipping_indices`](../../operations/system-tables/data_skipping_indices.md) - ## SHOW PROCESSLIST {#show-processlist} Выводит содержимое таблицы [`system.processes`](/operations/system-tables/processes), в которой хранится список запросов, обрабатываемых в данный момент, за исключением запросов `SHOW PROCESSLIST`. @@ -364,7 +355,6 @@ $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ::: - ## SHOW GRANTS {#show-grants} Оператор `SHOW GRANTS` отображает привилегии, предоставленные пользователю. @@ -381,7 +371,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] Модификатор `FINAL` объединяет все привилегии, выданные пользователю и его ролям (с учетом наследования). - ## SHOW CREATE USER {#show-create-user} Оператор `SHOW CREATE USER` выводит параметры, которые были заданы при [создании пользователя](../../sql-reference/statements/create/user.md). @@ -392,7 +381,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] SHOW CREATE USER [имя1 [, имя2 ...] | CURRENT_USER] ``` - ## SHOW CREATE ROLE {#show-create-role} Оператор `SHOW CREATE ROLE` выводит параметры, использованные при [создании роли](../../sql-reference/statements/create/role.md). @@ -403,7 +391,6 @@ SHOW CREATE USER [имя1 [, имя2 ...] | CURRENT_USER] SHOW CREATE ROLE имя1 [, имя2 ...] ``` - ## SHOW CREATE ROW POLICY {#show-create-row-policy} Оператор `SHOW CREATE ROW POLICY` выводит параметры, которые были использованы при [создании политики строк](../../sql-reference/statements/create/row-policy.md). @@ -414,7 +401,6 @@ SHOW CREATE ROLE имя1 [, имя2 ...] SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] ``` - ## SHOW CREATE QUOTA {#show-create-quota} Оператор `SHOW CREATE QUOTA` отображает параметры, использованные при [создании квоты](../../sql-reference/statements/create/quota.md). @@ -425,7 +411,6 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] ``` - ## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile} Оператор `SHOW CREATE SETTINGS PROFILE` выводит параметры, которые были использованы при [создании профиля настроек](../../sql-reference/statements/create/settings-profile.md). @@ -436,7 +421,6 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] SHOW CREATE [SETTINGS] PROFILE имя1 [, имя2 ...] ``` - ## SHOW USERS {#show-users} Оператор `SHOW USERS` возвращает список имен [учетных записей пользователей](../../guides/sre/user-management/index.md#user-account-management). @@ -448,7 +432,6 @@ SHOW CREATE [SETTINGS] PROFILE имя1 [, имя2 ...] SHOW USERS ``` - ## SHOW ROLES {#show-roles} Оператор `SHOW ROLES` возвращает список [ролей](../../guides/sre/user-management/index.md#role-management). @@ -457,8 +440,6 @@ SHOW USERS ### Синтаксис {#syntax-14} - - ```sql title="Syntax" SHOW [CURRENT|ENABLED] ROLES ``` @@ -474,7 +455,6 @@ SHOW [CURRENT|ENABLED] ROLES SHOW [SETTINGS] PROFILES ``` - ## SHOW POLICIES {#show-policies} Оператор `SHOW POLICIES` возвращает список [политик строк](../../guides/sre/user-management/index.md#row-policy-management) для указанной таблицы. @@ -486,7 +466,6 @@ SHOW [SETTINGS] PROFILES SHOW [ROW] POLICIES [ON [db.]table] ``` - ## SHOW QUOTAS {#show-quotas} Оператор `SHOW QUOTAS` возвращает список [квот](../../guides/sre/user-management/index.md#quotas-management). @@ -498,7 +477,6 @@ SHOW [ROW] POLICIES [ON [db.]table] SHOW QUOTAS ``` - ## SHOW QUOTA {#show-quota} Оператор `SHOW QUOTA` возвращает информацию об использовании [квот](../../operations/quotas.md) для всех пользователей или только для текущего пользователя. @@ -506,8 +484,6 @@ SHOW QUOTAS ### Синтаксис {#syntax-18} - - ```sql title="Syntax" SHOW [CURRENT] QUOTA ``` @@ -522,7 +498,6 @@ SHOW [CURRENT] QUOTA SHOW ACCESS ``` - ## SHOW CLUSTER(S) {#show-clusters} Оператор `SHOW CLUSTER(S)` возвращает список кластеров. @@ -581,7 +556,6 @@ host_address: 127.0.0.1 port: 9000 ``` - ## SHOW SETTINGS {#show-settings} Оператор `SHOW SETTINGS` возвращает список системных настроек и их значений. @@ -639,7 +613,6 @@ SHOW CHANGED SETTINGS ILIKE '%MEMORY%' └──────────────────┴────────┴─────────────┘ ``` - ## SHOW SETTING {#show-setting} Оператор `SHOW SETTING` выводит значение указанной настройки. @@ -654,7 +627,6 @@ SHOW SETTING <имя> * таблица [`system.settings`](../../operations/system-tables/settings.md) - ## Просмотр кэшей файловой системы {#show-filesystem-caches} ### Примеры {#examples-7} @@ -673,7 +645,6 @@ SHOW FILESYSTEM CACHES * таблица [`system.settings`](../../operations/system-tables/settings.md) - ## SHOW ENGINES {#show-engines} Оператор `SHOW ENGINES` выводит содержимое таблицы [`system.table_engines`](../../operations/system-tables/table_engines.md), @@ -689,7 +660,6 @@ SHOW ENGINES [INTO OUTFILE имя_файла] [FORMAT формат] * таблица [system.table_engines](../../operations/system-tables/table_engines.md) - ## SHOW FUNCTIONS {#show-functions} Оператор `SHOW FUNCTIONS` выводит содержимое таблицы [`system.functions`](../../operations/system-tables/functions.md). @@ -706,7 +676,6 @@ SHOW FUNCTIONS [LIKE | ILIKE ''] * Таблица [`system.functions`](../../operations/system-tables/functions.md) - ## SHOW MERGES {#show-merges} Оператор `SHOW MERGES` возвращает список слияний. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md index 065863b123b..905add3e354 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md @@ -290,8 +290,9 @@ SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` SLEEP ENTRY 0 1 #### PROFILE {#instrument-add-profile} -Измеряет время, проходящее между `ENTRY` и `EXIT` функции. -Результат профилирования сохраняется в таблице [`system.trace_log`](../../operations/system-tables/trace_log.md). +Измеряет время, прошедшее между `ENTRY` и `EXIT` функции. +Результаты профилирования сохраняются в [`system.trace_log`](../../operations/system-tables/trace_log.md) и могут быть преобразованы +в [Chrome Event Trace Format](../../operations/system-tables/trace_log.md#chrome-event-trace-format). ```sql SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` PROFILE diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md index ab330407162..0c0e7413156 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md @@ -28,7 +28,6 @@ UPDATE [db.]table [ON CLUSTER cluster] SET column1 = expr1 [, ...] [IN PARTITION `filter_expr` должен иметь тип `UInt8`. Этот запрос обновляет значения указанных столбцов, устанавливая их равными значениям соответствующих выражений в строках, для которых `filter_expr` принимает ненулевое значение. Значения приводятся к типу столбца с помощью оператора `CAST`. Обновление столбцов, используемых при вычислении первичного ключа или ключа партиционирования, не поддерживается. - ## Примеры {#examples} ```sql @@ -37,15 +36,12 @@ UPDATE hits SET Title = 'Новый заголовок' WHERE EventDate = today( UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; ``` - ## Облегчённые обновления не обновляют данные немедленно {#lightweight-update-does-not-update-data-immediately} Облегчённый `UPDATE` реализован с использованием **патч‑частей** (patch parts) — специального типа частей данных, которые содержат только обновлённые столбцы и строки. Облегчённый `UPDATE` создаёт патч‑части, но не изменяет исходные данные физически в хранилище немедленно. Процесс обновления аналогичен запросу `INSERT ... SELECT ...`, но запрос `UPDATE` возвращается только после завершения создания патч‑частей. - - Обновлённые значения: - **Сразу видны** в запросах `SELECT` благодаря применению патчей - **Физически материализуются** только во время последующих слияний и мутаций @@ -56,14 +52,10 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; Чтобы использовать лёгкие обновления, необходимо включить материализацию столбцов `_block_number` и `_block_offset` с помощью настроек таблицы [`enable_block_number_column`](/operations/settings/merge-tree-settings#enable_block_number_column) и [`enable_block_offset_column`](/operations/settings/merge-tree-settings#enable_block_offset_column). - - ## Легковесные операции удаления {#lightweight-delete} Запрос [легковесного `DELETE`](/sql-reference/statements/delete) может быть выполнен как легковесный `UPDATE` вместо мутации `ALTER UPDATE`. Поведение легковесного `DELETE` определяется настройкой [`lightweight_delete_mode`](/operations/settings/settings#lightweight_delete_mode). - - ## Особенности производительности {#performance-considerations} **Преимущества легковесных обновлений:** @@ -78,15 +70,11 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; - Слишком частые небольшие обновления могут привести к ошибке «too many parts» («слишком много частей»). Рекомендуется объединять несколько обновлений в один запрос, например, поместив идентификаторы для обновления в один оператор `IN` в условии `WHERE` - Легковесные обновления предназначены для обновления небольшого количества строк (до примерно 10% таблицы). Если необходимо обновить больший объём, рекомендуется использовать мутацию [`ALTER TABLE ... UPDATE`](/sql-reference/statements/alter/update) - - ## Одновременные операции {#concurrent-operations} Легковесные обновления не дожидаются завершения уже выполняющихся слияний и мутаций, в отличие от тяжёлых мутаций. Согласованность одновременных легковесных обновлений контролируется настройками [`update_sequential_consistency`](/operations/settings/settings#update_sequential_consistency) и [`update_parallel_mode`](/operations/settings/settings#update_parallel_mode). - - ## Права на выполнение UPDATE {#update-permissions} `UPDATE` требует привилегии `ALTER UPDATE`. Чтобы разрешить выполнение операторов `UPDATE` для конкретной таблицы определённому пользователю, выполните: @@ -95,7 +83,6 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; GRANT ALTER UPDATE ON db.table TO username; ``` - ## Подробности реализации {#details-of-the-implementation} Patch parts аналогичны обычным партам, но содержат только обновлённые столбцы и несколько системных столбцов: @@ -131,8 +118,6 @@ Patch parts могут сливаться друг с другом, чтобы Режим join медленнее и требует больше памяти, чем режим merge, но используется реже. - - ## Связанные материалы {#related-content} - [`ALTER UPDATE`](/sql-reference/statements/alter/update) — «тяжёлые» операции `UPDATE` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md index a2ecdad2989..ee5db533ab6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md @@ -12,20 +12,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Табличная функция azureBlobStorage {#azureblobstorage-table-function} Предоставляет табличный интерфейс для чтения и записи файлов в [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs). Эта табличная функция аналогична [функции s3](../../sql-reference/table-functions/s3.md). - - ## Синтаксис {#syntax} ```sql azureBlobStorage(- connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure, partition_strategy, partition_columns_in_data_file, extra_credentials(client_id=, tenant_id=)]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -42,14 +38,10 @@ azureBlobStorage(- connection_string|storage_account_url, container_name, blobpa | `partition_columns_in_data_file` | Необязательный параметр. Используется только со стратегией партиционирования `HIVE`. Указывает ClickHouse, следует ли ожидать, что столбцы партиции будут записаны в файл данных. Значение по умолчанию — `false`. | | `extra_credentials` | Используйте `client_id` и `tenant_id` для аутентификации. Если указаны `extra_credentials`, они имеют приоритет над `account_name` и `account_key`. - - ## Возвращаемое значение {#returned_value} Таблица заданной структуры для чтения данных из указанного файла или записи их в него. - - ## Примеры {#examples} Аналогично движку таблиц [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage), пользователи могут использовать эмулятор Azurite для локальной разработки с использованием Azure Storage. Дополнительные сведения см. [здесь](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage). Ниже предполагается, что Azurite доступен по имени хоста `azurite1`. @@ -89,7 +81,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. @@ -97,8 +88,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam - `_size` — Размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер файла неизвестен, значение — `NULL`. - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - - ## Запись с партиционированием {#partitioned-write} ### Стратегия партиционирования {#partition-strategy} @@ -124,7 +113,6 @@ select _path, * from azureBlobStorage(azure_conf2, storage_account_url = 'http:/ └─────────────────────────────────────────────────────────────────────────────────┴────┴──────┴─────────┘ ``` - ## настройка use_hive_partitioning {#hive-style-partitioning} Это указание для ClickHouse при разборе файлов, партиционированных в стиле Hive, во время чтения. Оно не влияет на запись. Для симметричного чтения и записи используйте аргумент `partition_strategy`. @@ -139,7 +127,6 @@ select _path, * from azureBlobStorage(azure_conf2, storage_account_url = 'http:/ SELECT * FROM azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## Использование Shared Access Signatures (SAS) {#using-shared-access-signatures-sas-sas-tokens} Shared Access Signature (SAS) — это URI, который предоставляет ограниченный доступ к контейнеру или файлу в Azure Storage. Используйте его, чтобы предоставить ограниченный по времени доступ к ресурсам учетной записи хранения без передачи ключа учетной записи хранения. Подробнее [здесь](https://learn.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature). @@ -172,6 +159,5 @@ FROM azureBlobStorage('https://clickhousedocstest.blob.core.windows.net/?sp=r&st Получена 1 строка. Прошло: 0,153 сек. ``` - ## См. также {#related} - [Движок таблицы AzureBlobStorage](engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md index 44bcf1f40a6..61ff92f8875 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md @@ -7,22 +7,17 @@ title: 'azureBlobStorageCluster' doc_type: 'reference' --- - - # Табличная функция azureBlobStorageCluster {#azureblobstoragecluster-table-function} Позволяет обрабатывать файлы из [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) параллельно на множестве узлов в указанном кластере. На узле-инициаторе создаётся подключение ко всем узлам кластера, раскрываются звёздочки в пути к файлу S3, и каждый файл динамически распределяется между узлами. Рабочий узел запрашивает у инициатора следующую задачу и обрабатывает её. Это повторяется до тех пор, пока все задачи не будут завершены. Эта табличная функция аналогична [функции s3Cluster](../../sql-reference/table-functions/s3Cluster.md). - - ## Синтаксис {#syntax} ```sql azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure]) ``` - ## Аргументы {#arguments} | Argument | Description | @@ -37,14 +32,10 @@ azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, con | `compression` | Поддерживаемые значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. По умолчанию тип сжатия определяется автоматически по расширению файла (то же, что установка значения `auto`). | | `structure` | Структура таблицы. Формат: `'column1_name column1_type, column2_name column2_type, ...'`. | - - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой для чтения данных из указанного файла или записи данных в него. - - ## Примеры {#examples} Аналогично табличному движку [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) пользователи могут использовать эмулятор Azurite для локальной разработки с использованием Azure Storage. Дополнительные сведения см. [здесь](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage). Ниже мы предполагаем, что Azurite доступен по имени хоста `azurite1`. @@ -58,13 +49,10 @@ SELECT count(*) FROM azureBlobStorageCluster( 'auto', 'key UInt64') ``` - ## Использование подписей общего доступа (Shared Access Signatures, SAS) {#using-shared-access-signatures-sas-sas-tokens} См. примеры в разделе [azureBlobStorage](/sql-reference/table-functions/azureBlobStorage#using-shared-access-signatures-sas-sas-tokens). - - ## См. также {#related} - [Движок AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md index 9ab09eeb1be..0362eb25b16 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md @@ -8,8 +8,6 @@ title: 'clusterAllReplicas' doc_type: 'reference' --- - - # Табличная функция clusterAllReplicas {#clusterallreplicas-table-function} Позволяет обращаться ко всем шардам кластера (настроенным в разделе `remote_servers`) без создания таблицы [Distributed](../../engines/table-engines/special/distributed.md). Запрашивается только одна реплика каждого шарда. @@ -20,12 +18,8 @@ doc_type: 'reference' Все доступные кластеры перечислены в таблице [system.clusters](../../operations/system-tables/clusters.md). ::: - - ## Синтаксис {#syntax} - - ```sql cluster(['cluster_name', db.table, sharding_key]) cluster(['cluster_name', db, table, sharding_key]) @@ -41,13 +35,10 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) | `db.table` или `db`, `table` | Имя базы данных и таблицы. | | `sharding_key` | Ключ шардинга. Необязательный параметр. Должен быть указан, если кластер содержит более одного шарда. | - ## Возвращаемое значение {#returned_value} Набор данных, полученный из кластеров. - - ## Использование макросов {#using_macros} `cluster_name` может содержать макросы — подстановки в фигурных скобках. Значение подстановки берётся из раздела [macros](../../operations/server-configuration-parameters/settings.md#macros) файла конфигурации сервера. @@ -58,7 +49,6 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) SELECT * FROM cluster('{cluster}', default.example_table); ``` - ## Использование и рекомендации {#usage_recommendations} Использование табличных функций `cluster` и `clusterAllReplicas` менее эффективно, чем создание таблицы `Distributed`, потому что в этом случае соединение с сервером заново устанавливается для каждого запроса. При обработке большого количества запросов всегда заранее создавайте таблицу `Distributed` и не используйте табличные функции `cluster` и `clusterAllReplicas`. @@ -71,8 +61,6 @@ SELECT * FROM cluster('{cluster}', default.example_table); Параметры подключения, такие как `host`, `port`, `user`, `password`, `compression`, `secure`, берутся из секции конфигурации ``. Подробности см. в описании [движка Distributed](../../engines/table-engines/special/distributed.md). - - ## См. также {#related} - [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md index bf2ab363f8e..defbbed8c17 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md @@ -8,14 +8,10 @@ title: 'deltaLake' doc_type: 'reference' --- - - # Табличная функция deltaLake {#deltalake-table-function} Предоставляет табличный интерфейс только для чтения к таблицам [Delta Lake](https://github.com/delta-io/delta) в Amazon S3, Azure Blob Storage или локально смонтированной файловой системе. - - ## Синтаксис {#syntax} `deltaLake` — это псевдоним `deltaLakeS3` и поддерживается для обеспечения совместимости. @@ -30,20 +26,15 @@ deltaLakeAzure(connection_string|storage_account_url, container_name, blobpath, deltaLakeLocal(path, [,format]) ``` - ## Аргументы {#arguments} Описание аргументов совпадает с описанием аргументов табличных функций `s3`, `azureBlobStorage`, `HDFS` и `file` соответственно. `format` обозначает формат файлов данных в таблице Delta Lake. - - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой для чтения данных из указанной таблицы Delta Lake. - - ## Примеры {#examples} Выборка строк из таблицы в S3 `https://clickhouse-public-datasets.s3.amazonaws.com/delta_lake/hits/`: @@ -64,7 +55,6 @@ LIMIT 2 └───────────────────────────────────────────────────────────────────────┴───────────┘ ``` - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. @@ -73,8 +63,6 @@ LIMIT 2 - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение — `NULL`. - - ## Связанные разделы {#related} - [Движок DeltaLake](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md index f359c17be11..8e1d08b5b19 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md @@ -7,16 +7,12 @@ title: 'deltaLakeCluster' doc_type: 'reference' --- - - # Табличная функция deltaLakeCluster {#deltalakecluster-table-function} Это расширение табличной функции [deltaLake](sql-reference/table-functions/deltalake.md). Позволяет параллельно обрабатывать файлы из таблиц [Delta Lake](https://github.com/delta-io/delta) в Amazon S3 одновременно с нескольких узлов заданного кластера. На инициаторе создаётся подключение ко всем узлам в кластере, и каждый файл динамически распределяется между ними. Рабочий узел запрашивает у инициатора следующую задачу и обрабатывает её. Это повторяется до тех пор, пока все задачи не будут выполнены. - - ## Синтаксис {#syntax} ```sql @@ -32,21 +28,16 @@ deltaLakeAzureCluster(cluster_name, named_collection[, option=value [,..]]) `deltaLakeS3Cluster` — это псевдоним `deltaLakeCluster`, оба используются с S3. - ## Аргументы {#arguments} - `cluster_name` — имя кластера, которое используется для формирования набора адресов и параметров подключения к удалённым и локальным серверам. - Описание всех остальных аргументов аналогично описанию аргументов в эквивалентной табличной функции [deltaLake](sql-reference/table-functions/deltalake.md). - - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой для чтения данных с кластера из указанной таблицы Delta Lake в S3. - - ## Виртуальные столбцы {#virtual-columns} - `_path` — путь к файлу. Тип: `LowCardinality(String)`. @@ -55,8 +46,6 @@ deltaLakeAzureCluster(cluster_name, named_collection[, option=value [,..]]) - `_time` — время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение — `NULL`. - - ## См. также {#related} - [движок Delta Lake](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md index 94f21c2e11a..c9d76662d95 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md @@ -8,33 +8,24 @@ title: 'dictionary' doc_type: 'reference' --- - - # Табличная функция dictionary {#dictionary-table-function} Отображает данные словаря [dictionary](../../sql-reference/dictionaries/index.md) в виде таблицы ClickHouse. Работает так же, как движок [Dictionary](../../engines/table-engines/special/dictionary.md). - - ## Синтаксис {#syntax} ```sql dictionary('dict') ``` - ## Аргументы {#arguments} - `dict` — Имя словаря. [String](../../sql-reference/data-types/string.md). - - ## Возвращаемое значение {#returned_value} Таблица ClickHouse. - - ## Примеры {#examples} Исходная таблица `dictionary_source_table`: @@ -68,7 +59,6 @@ SELECT * FROM dictionary('new_dictionary'); └────┴───────┘ ``` - ## См. также {#related} - [Движок Dictionary](/engines/table-engines/special/dictionary) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md index f131d64c189..32be302b685 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md @@ -8,8 +8,6 @@ title: 'executable' doc_type: 'reference' --- - - # табличная функция `executable` для UDF {#executable-table-function-for-udfs} Табличная функция `executable` создаёт таблицу на основе вывода пользовательской функции (UDF), которую вы определяете в скрипте, выводящем строки в **stdout**. Исполняемый скрипт хранится в директории `users_scripts` и может читать данные из любого источника. Убедитесь, что на вашем сервере ClickHouse установлены все необходимые пакеты для запуска исполняемого скрипта. Например, если это скрипт на Python, убедитесь, что на сервере установлены необходимые пакеты Python. @@ -20,8 +18,6 @@ doc_type: 'reference' Ключевое преимущество по сравнению с обычными UDF-функциями у табличной функции `executable` и движка таблицы `Executable` заключается в том, что обычные UDF-функции не могут изменять количество строк. Например, если на вход подаётся 100 строк, то результат также должен содержать 100 строк. При использовании табличной функции `executable` или движка таблицы `Executable` ваш скрипт может выполнять любые необходимые преобразования данных, включая сложные агрегации. ::: - - ## Синтаксис {#syntax} Табличная функция `executable` принимает три обязательных параметра и необязательный список входных запросов: @@ -90,7 +86,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random └────┴────────────┘ ``` - ## Настройки {#settings} - `send_chunk_header` — управляет тем, нужно ли отправлять количество строк перед отправкой блока данных на обработку. Значение по умолчанию — `false`. @@ -100,8 +95,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random - `command_read_timeout` — таймаут чтения данных из stdout команды в миллисекундах. Значение по умолчанию — 10000. - `command_write_timeout` — таймаут записи данных в stdin команды в миллисекундах. Значение по умолчанию — 10000. - - ## Передача результатов запроса в скрипт {#passing-query-results-to-a-script} Обязательно ознакомьтесь с примером в табличном движке `Executable` о том, [как передать результаты запроса в скрипт](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script). Ниже показано, как выполнить тот же скрипт из этого примера с помощью табличной функции `executable`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md index 7be0a0e1fd9..8cff0510381 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Табличная функция file {#file-table-function} Табличный движок, который предоставляет табличный интерфейс для выполнения SELECT из файлов и INSERT в файлы, аналогично табличной функции [s3](/sql-reference/table-functions/url.md). Используйте `file()` при работе с локальными файлами и `s3()` при работе с бакетами в объектном хранилище, например S3, GCS или MinIO. Функция `file` может использоваться в запросах `SELECT` и `INSERT` для чтения из файлов или записи в файлы. - - ## Синтаксис {#syntax} ```sql file([путь_к_архиву ::] путь [,формат] [,структура] [,сжатие]) ``` - ## Аргументы {#arguments} | Параметр | Описание | @@ -36,14 +32,10 @@ file([путь_к_архиву ::] путь [,формат] [,структура | `structure` | Структура таблицы. Формат: `'column1_name column1_type, column2_name column2_type, ...'`. | | `compression` | Тип существующего сжатия при использовании в запросе `SELECT` или требуемый тип сжатия при использовании в запросе `INSERT`. Поддерживаемые типы сжатия: `gz`, `br`, `xz`, `zst`, `lz4` и `bz2`. | - - ## Возвращаемое значение {#returned_value} Таблица для чтения данных из файла или записи в файл. - - ## Примеры записи в файл {#examples-for-writing-to-a-file} ### Запись в файл в формате TSV {#write-to-a-tsv-file} @@ -56,7 +48,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) В результате данные будут записаны в файл `test.tsv`: - ```bash # cat /var/lib/clickhouse/user_files/test.tsv {#cat-varlibclickhouseuser_filestesttsv} 1 2 3 @@ -77,18 +68,14 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) В результате данные записываются в три файла: `test_1.tsv`, `test_2.tsv` и `test_3.tsv`. - ```bash # cat /var/lib/clickhouse/user_files/test_1.tsv {#cat-varlibclickhouseuser_filestest_1tsv} 3 2 1 ``` - # cat /var/lib/clickhouse/user_files/test_2.tsv {#cat-varlibclickhouseuser_filestest_2tsv} 1 3 2 - - # cat /var/lib/clickhouse/user_files/test_3.tsv {#cat-varlibclickhouseuser_filestest_3tsv} 1 2 3 @@ -96,7 +83,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) ``` ``` - ## Примеры чтения из файла {#examples-for-reading-from-a-file} ### SELECT из CSV-файла {#select-from-a-csv-file} @@ -154,7 +140,6 @@ file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32'); SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` - ## Глоб-шаблоны в пути {#globs-in-path} В путях можно использовать глоб-шаблоны. Файлы должны соответствовать всему шаблону пути, а не только суффиксу или префиксу. Есть одно исключение: если путь указывает на существующий каталог и не использует глоб-шаблоны, к пути неявно добавляется `*`, чтобы были выбраны все файлы в каталоге. @@ -167,8 +152,6 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); Конструкции с `{}` аналогичны табличным функциям [remote](remote.md) и [hdfs](hdfs.md). - - ## Примеры {#examples} **Пример** @@ -228,7 +211,6 @@ SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); ``` - ## Виртуальные столбцы {#virtual-columns} - `_path` — путь к файлу. Тип: `LowCardinality(String)`. @@ -236,8 +218,6 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер файла неизвестен, значение равно `NULL`. - `_time` — время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение равно `NULL`. - - ## настройка use_hive_partitioning {#hive-style-partitioning} Когда настройка `use_hive_partitioning` имеет значение 1, ClickHouse будет обнаруживать секционирование в стиле Hive в пути (`/name=value/`) и позволит использовать столбцы секций как виртуальные столбцы в запросе. Эти виртуальные столбцы будут иметь те же имена, что и в секционированном пути, но с префиксом `_`. @@ -250,7 +230,6 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## Настройки {#settings} | Настройка | Описание | @@ -261,8 +240,6 @@ SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > | [engine_file_skip_empty_files](operations/settings/settings.md#engine_file_skip_empty_files) | позволяет пропускать пустые файлы при чтении. По умолчанию отключено. | | [storage_file_read_method](/operations/settings/settings#engine_file_empty_if_not_exists) | метод чтения данных из файла хранилища, один из: `read`, `pread`, `mmap` (только для `clickhouse-local`). Значение по умолчанию: `pread` для `clickhouse-server`, `mmap` для `clickhouse-local`. | - - ## См. также {#related} - [Виртуальные столбцы](engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md index bf14bd4d328..a67989753ff 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md @@ -7,8 +7,6 @@ title: 'fileCluster' doc_type: 'reference' --- - - # Табличная функция fileCluster {#filecluster-table-function} Позволяет одновременно обрабатывать файлы, соответствующие заданному пути, на нескольких узлах кластера. Инициатор устанавливает соединения с рабочими узлами, разворачивает glob-шаблоны в пути к файлам и делегирует задачи чтения файлов рабочим узлам. Каждый рабочий узел запрашивает у инициатора следующий файл для обработки и повторяет это до тех пор, пока все задачи не будут выполнены (все файлы не будут прочитаны). @@ -18,15 +16,12 @@ doc_type: 'reference' Если эти файлы различаются между узлами, возвращаемое значение нельзя заранее предсказать, так как оно зависит от порядка, в котором рабочие узлы запрашивают задачи у инициатора. ::: - - ## Синтаксис {#syntax} ```sql fileCluster(cluster_name, path[, format, structure, compression_method]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -37,8 +32,6 @@ fileCluster(cluster_name, path[, format, structure, compression_method]) | `structure` | Структура таблицы в формате `'UserID UInt64, Name String'`. Определяет имена и типы столбцов. Тип: [String](../../sql-reference/data-types/string.md). | | `compression_method` | Метод сжатия. Поддерживаемые типы сжатия: `gz`, `br`, `xz`, `zst`, `lz4` и `bz2`. | - - ## Возвращаемое значение {#returned_value} Таблица указанного формата и структуры с данными из файлов, путь к которым соответствует указанному. @@ -88,13 +81,10 @@ SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s Str └────┴────────┘ ``` - ## Глоб-шаблоны в пути {#globs-in-path} Все шаблоны, поддерживаемые табличной функцией [File](../../sql-reference/table-functions/file.md#globs-in-path), также поддерживаются функцией FileCluster. - - ## Смотрите также {#related} - [Табличная функция `file`](../../sql-reference/table-functions/file.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md index 272f201afec..9b808352bb2 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md @@ -7,35 +7,26 @@ title: 'format' doc_type: 'reference' --- - - # Табличная функция format {#format-table-function} Разбирает данные из аргументов в соответствии с указанным входным форматом. Если аргумент структуры не указан, структура определяется по данным. - - ## Синтаксис {#syntax} ```sql format(format_name, [structure], data) ``` - ## Аргументы {#arguments} - `format_name` — [формат](/sql-reference/formats) данных. - `structure` — структура таблицы. Необязательный параметр. Формат: `column1_name column1_type, column2_name column2_type, ...`. - `data` — строковый литерал или константное выражение, которое возвращает строку с данными в заданном формате. - - ## Возвращаемое значение {#returned_value} Таблица с данными, полученными при разборе аргумента `data` в соответствии с указанным форматом и заданной или определённой структурой. - - ## Примеры {#examples} Без аргумента `structure`: @@ -109,7 +100,6 @@ $$) └───────┴─────┘ ``` - ## См. также {#related} - [Форматы](../../interfaces/formats.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md index 77b6d457565..87d474d333e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md @@ -7,21 +7,16 @@ title: 'fuzzJSON' doc_type: 'reference' --- - - # Табличная функция fuzzJSON {#fuzzjson-table-function} Вносит случайные искажения в строку JSON. - - ## Синтаксис {#syntax} ```sql fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -41,14 +36,10 @@ fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) | `min_key_length` (UInt64) | Минимальная длина ключа. Должна быть не менее 1. | | `max_key_length` (UInt64) | Максимальная длина ключа. Должна быть больше или равна `min_key_length`, если задана. | - - ## Возвращаемое значение {#returned_value} Объект таблицы с одним столбцом, содержащим модифицированные JSON-строки. - - ## Пример использования {#usage-example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md index bd28e4db2c0..56b1ce8fecb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md @@ -7,21 +7,16 @@ title: 'fuzzQuery' doc_type: 'reference' --- - - # Табличная функция fuzzQuery {#fuzzquery-table-function} Модифицирует указанную строку запроса, внося в неё случайные изменения. - - ## Синтаксис {#syntax} ```sql fuzzQuery(query[, max_query_length[, random_seed]]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -30,14 +25,10 @@ fuzzQuery(query[, max_query_length[, random_seed]]) | `max_query_length`| (UInt64) — максимальная длина запроса в процессе фаззинга. | | `random_seed` | (UInt64) — начальное значение генератора случайных чисел для стабильных результатов. | - - ## Возвращаемое значение {#returned_value} Объект таблицы с одним столбцом, содержащим изменённые строки запросов. - - ## Пример использования {#usage-example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md index c5aba32672d..eb6e956f315 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md @@ -8,8 +8,6 @@ title: 'gcs' doc_type: 'reference' --- - - # Табличная функция gcs {#gcs-table-function} Предоставляет табличный интерфейс для выполнения `SELECT` и `INSERT` данных из [Google Cloud Storage](https://cloud.google.com/storage/). Требуется роль IAM [`Storage Object User`](https://cloud.google.com/storage/docs/access-control/iam-roles). @@ -18,8 +16,6 @@ doc_type: 'reference' Если в вашем кластере несколько реплик, вы можете использовать [функцию s3Cluster](../../sql-reference/table-functions/s3Cluster.md) (которая работает с GCS) для параллельной вставки данных. - - ## Синтаксис {#syntax} ```sql @@ -32,7 +28,6 @@ gcs(named_collection[, option=value [,..]]) Дополнительные сведения об endpoint и HMAC см. в [документации по совместимости Google](https://cloud.google.com/storage/docs/interoperability). ::: - ## Аргументы {#arguments} | Аргумент | Описание | @@ -65,13 +60,10 @@ and not ~~[https://storage.cloud.google.com](https://storage.cloud.google.com)~~ | `no_sign_request` | Отключён по умолчанию. | | `expiration_window_seconds` | Значение по умолчанию — 120. | - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой для чтения данных из указанного файла или записи данных в него. - - ## Примеры {#examples} Выбор первых двух строк таблицы из файла в GCS `https://storage.googleapis.com/my-test-bucket-768/data.csv`: @@ -104,7 +96,6 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` - ## Использование {#usage} Предположим, что у нас есть несколько файлов со следующими URI в GCS: @@ -198,7 +189,6 @@ SELECT count(*) FROM gcs(creds, url='https://s3-object-url.csv') ``` - ## Партиционированная запись {#partitioned-write} Если при вставке данных в таблицу `GCS` указано выражение `PARTITION BY`, для каждого значения партиции создаётся отдельный файл. Разделение данных на отдельные файлы помогает повысить эффективность операций чтения. @@ -225,7 +215,6 @@ INSERT INTO TABLE FUNCTION В результате данные записываются в три файла в разных бакетах: `my_bucket_1/file.csv`, `my_bucket_10/file.csv` и `my_bucket_20/file.csv`. - ## См. также {#related} - [Табличная функция S3](s3.md) - [Движок таблицы S3](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md index 7821d6fea3d..5de40a2f299 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md @@ -7,23 +7,18 @@ title: 'generateRandom' doc_type: 'reference' --- - - # Табличная функция generateRandom {#generaterandom-table-function} Генерирует случайные данные с заданной схемой. Позволяет заполнять тестовые таблицы этими данными. Поддерживаются не все типы данных. - - ## Синтаксис {#syntax} ```sql generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -34,14 +29,10 @@ generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_str | `max_string_length` | Максимальная длина для всех сгенерированных строк. По умолчанию `10`. | | `max_array_length` | Максимальное количество элементов для всех сгенерированных массивов или map-ов. По умолчанию `10`. | - - ## Возвращаемое значение {#returned_value} Объект таблицы с запрошенной схемой. - - ## Пример использования {#usage-example} ```sql @@ -89,7 +80,6 @@ SELECT * FROM generateRandom(generateRandomStructure(4, 101), 101) LIMIT 3; SELECT * FROM generateRandom() LIMIT 3; ``` - ```text ┌───c1─┬─────────c2─┬─────────────────────c3─┬──────────────────────c4─┬─c5───────┐ │ -128 │ 317300854 │ 2030-08-16 08:22:20.65 │ 1994-08-16 12:08:56.745 │ R0qgiC46 │ @@ -104,7 +94,6 @@ SELECT * FROM generateRandom() LIMIT 3; SELECT * FROM generateRandom(11) LIMIT 3; ``` - ```text ┌───────────────────────────────────────c1─┬─────────────────────────────────────────────────────────────────────────────c2─┬─────────────────────────────────────────────────────────────────────────────c3─┬─────────c4─┬─────────────────────────────────────────────────────────────────────────────c5─┬──────────────────────c6─┬─c7──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─c8──────────────────────────────────────┬─────────c9─┐ │ -77422512305044606600216318673365695785 │ 636812099959807642229.503817849012019401335326013846687285151335352272727523 │ -34944452809785978175157829109276115789694605299387223845886143311647505037529 │ 544473976 │ 111220388331710079615337037674887514156741572807049614590010583571763691328563 │ 22016.22623506465 │ {'2052-01-31 20:25:33':4306400876908509081044405485378623663,'1993-04-16 15:58:49':164367354809499452887861212674772770279,'2101-08-19 03:07:18':-60676948945963385477105077735447194811,'2039-12-22 22:31:39':-59227773536703059515222628111999932330} │ a7b2:8f58:4d07:6707:4189:80cf:92f5:902d │ 1950-07-14 │ @@ -117,6 +106,5 @@ SELECT * FROM generateRandom(11) LIMIT 3; `generateRandom(generateRandomStructure(), [random seed], max_string_length, max_array_length)` с достаточно большим значением `max_array_length` может сгенерировать очень большой результат из-за потенциально большой глубины вложенности (до 16) составных типов (`Array`, `Tuple`, `Map`, `Nested`). ::: - ## Связанные материалы {#related-content} - Блог: [Генерация случайных данных в ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md index 685f2e74011..f08f621d927 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md @@ -11,20 +11,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Табличная функция hdfs {#hdfs-table-function} Создает таблицу из файлов в HDFS. Эта табличная функция аналогична табличным функциям [url](../../sql-reference/table-functions/url.md) и [file](../../sql-reference/table-functions/file.md). - - ## Синтаксис {#syntax} ```sql hdfs(URI, format, structure) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -33,8 +29,6 @@ hdfs(URI, format, structure) | `format` | [Формат](/sql-reference/formats) файла. | | `structure`| Структура таблицы. В формате `'column1_name column1_type, column2_name column2_type, ...'`. | - - ## Возвращаемое значение {#returned_value} Таблица заданной структуры для чтения или записи данных в указанный файл. @@ -56,7 +50,6 @@ LIMIT 2 └─────────┴─────────┴─────────┘ ``` - ## Глоб-шаблоны в пути {#globs_in_path} В путях можно использовать глоб-шаблоны. Файлы должны соответствовать всему шаблону пути, а не только суффиксу или префиксу. @@ -111,7 +104,6 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. @@ -119,8 +111,6 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — Размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер неизвестен, значение — `NULL`. - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - - ## параметр use_hive_partitioning {#hive-style-partitioning} Когда параметр `use_hive_partitioning` установлен в значение 1, ClickHouse будет обнаруживать секционирование в стиле Hive в пути (`/name=value/`) и позволит использовать столбцы секций как виртуальные столбцы в запросе. Эти виртуальные столбцы будут иметь те же имена, что и в секционированном пути, но с префиксом `_`. @@ -133,15 +123,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin SELECT * FROM HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## Настройки хранилища {#storage-settings} - [hdfs_truncate_on_insert](operations/settings/settings.md#hdfs_truncate_on_insert) — позволяет усекать файл перед вставкой данных в него. По умолчанию отключено. - [hdfs_create_new_file_on_insert](operations/settings/settings.md#hdfs_create_new_file_on_insert) — позволяет создавать новый файл при каждой вставке, если формат имеет суффикс. По умолчанию отключено. - [hdfs_skip_empty_files](operations/settings/settings.md#hdfs_skip_empty_files) — позволяет пропускать пустые файлы при чтении. По умолчанию отключено. - - ## См. также {#related} - [Виртуальные столбцы](../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md index 621c90d2e5f..c7fbcbd4554 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md @@ -7,21 +7,16 @@ title: 'hdfsCluster' doc_type: 'reference' --- - - # Табличная функция hdfsCluster {#hdfscluster-table-function} Позволяет обрабатывать файлы из HDFS параллельно с множества узлов в указанном кластере. На инициирующем узле создаётся соединение со всеми узлами кластера, раскрываются символы `*` в пути к файлам HDFS, и каждый файл динамически распределяется по узлам. Рабочий узел запрашивает у инициирующего узла следующую задачу и обрабатывает её. Это повторяется до тех пор, пока все задачи не будут выполнены. - - ## Синтаксис {#syntax} ```sql hdfsCluster(cluster_name, URI, format, structure) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -31,14 +26,10 @@ hdfsCluster(cluster_name, URI, format, structure) | `format` | [Формат](/sql-reference/formats) файла. | | `structure` | Структура таблицы. Формат: `'column1_name column1_type, column2_name column2_type, ...'`. | - - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой, предназначенная для чтения данных из указанного файла. - - ## Примеры {#examples} 1. Предположим, что у нас есть кластер ClickHouse с именем `cluster_simple` и несколько файлов со следующими URI в HDFS: @@ -68,7 +59,6 @@ FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TS Если в списке файлов встречаются числовые диапазоны с ведущими нулями, используйте фигурные скобки для каждой цифры по отдельности или символ `?`. ::: - ## См. также {#related} - [Движок HDFS](../../engines/table-engines/integrations/hdfs.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md index 6d3de0a21a7..ae9b2c76711 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md @@ -7,21 +7,16 @@ title: 'hudi' doc_type: 'reference' --- - - # Табличная функция hudi {#hudi-table-function} Предоставляет интерфейс только для чтения, аналогичный таблице, для работы с таблицами Apache [Hudi](https://hudi.apache.org/) в Amazon S3. - - ## Синтаксис {#syntax} ```sql hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -32,14 +27,10 @@ hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,co | `structure` | Структура таблицы. Формат: `'column1_name column1_type, column2_name column2_type, ...'`. | | `compression` | Параметр является необязательным. Поддерживаемые значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. По умолчанию тип сжатия автоматически определяется по расширению файла. | - - ## Возвращаемое значение {#returned_value} Таблица с заданной структурой для чтения данных из указанной таблицы Hudi в S3. - - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. @@ -48,8 +39,6 @@ hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,co - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение — `NULL`. - - ## Связанные материалы {#related} - [Движок Hudi](/engines/table-engines/integrations/hudi.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md index 2592f321ee2..6c2ace52975 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md @@ -7,23 +7,18 @@ title: 'Табличная функция hudiCluster' doc_type: 'reference' --- - - # Табличная функция hudiCluster {#hudicluster-table-function} Это расширение табличной функции [hudi](sql-reference/table-functions/hudi.md). Позволяет параллельно обрабатывать файлы из таблиц Apache [Hudi](https://hudi.apache.org/) в Amazon S3 на многих узлах заданного кластера. На инициирующем узле создаётся соединение со всеми узлами кластера, и каждый файл динамически распределяется между ними. Рабочий узел запрашивает у инициирующего узла следующую задачу для обработки и выполняет её. Это повторяется до тех пор, пока все задачи не будут завершены. - - ## Синтаксис {#syntax} ```sql hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -35,14 +30,10 @@ hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,form | `structure` | Структура таблицы. Формат: `'column1_name column1_type, column2_name column2_type, ...'`. | | `compression` | Параметр является необязательным. Поддерживаемые значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. По умолчанию тип сжатия автоматически определяется по расширению файла. | - - ## Возвращаемое значение {#returned_value} Таблица с указанной структурой для чтения данных кластера из заданной таблицы Hudi в S3. - - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. @@ -51,8 +42,6 @@ hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,form - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение — `NULL`. - - ## Смотрите также {#related} - [Движок Hudi](engines/table-engines/integrations/hudi.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md index 19fefe71613..4c528c0e2d9 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md @@ -1,5 +1,5 @@ --- -description: 'Предоставляет табличный интерфейс только для чтения к таблицам Apache Iceberg, размещённым в Amazon S3, Azure, HDFS или хранящимся локально.' +description: 'Предоставляет табличный интерфейс в режиме только для чтения к таблицам Apache Iceberg, размещённым в Amazon S3, Azure, HDFS или локально.' sidebar_label: 'iceberg' sidebar_position: 90 slug: /sql-reference/table-functions/iceberg @@ -7,13 +7,9 @@ title: 'iceberg' doc_type: 'reference' --- - - # Табличная функция iceberg {#iceberg-table-function} -Предоставляет табличный интерфейс только для чтения к таблицам Apache [Iceberg](https://iceberg.apache.org/) в Amazon S3, Azure, HDFS или локальном хранилище. - - +Предоставляет табличный интерфейс только для чтения к таблицам Apache [Iceberg](https://iceberg.apache.org/), размещённым в Amazon S3, Azure, HDFS или в локальном хранилище. ## Синтаксис {#syntax} @@ -34,7 +30,7 @@ icebergLocal(named_collection[, option=value [,..]]) ## Аргументы {#arguments} -Описание аргументов аналогично описанию аргументов в табличных функциях `s3`, `azureBlobStorage`, `HDFS` и `file` соответственно. +Описание аргументов совпадает с описанием аргументов табличных функций `s3`, `azureBlobStorage`, `HDFS` и `file` соответственно. `format` обозначает формат файлов с данными в таблице Iceberg. ### Возвращаемое значение {#returned-value} @@ -48,7 +44,7 @@ SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_tab ``` :::important -ClickHouse в настоящее время поддерживает чтение формата Iceberg версий v1 и v2 с помощью табличных функций `icebergS3`, `icebergAzure`, `icebergHDFS` и `icebergLocal`, а также табличных движков `IcebergS3`, `icebergAzure`, `IcebergHDFS` и `IcebergLocal`. +На данный момент ClickHouse поддерживает чтение версий v1 и v2 формата Iceberg с помощью табличных функций `icebergS3`, `icebergAzure`, `icebergHDFS` и `icebergLocal`, а также табличных движков `IcebergS3`, `icebergAzure`, `IcebergHDFS` и `IcebergLocal`. ::: @@ -76,105 +72,143 @@ DESCRIBE icebergS3(iceberg_conf, filename = 'test_table') ``` -## Эволюция схемы {#schema-evolution} +## Использование каталога данных {#iceberg-writes-catalogs} -На данный момент с помощью ClickHouse вы можете читать таблицы Iceberg, схема которых изменялась со временем. Мы поддерживаем чтение таблиц, в которых столбцы добавлялись и удалялись, а их порядок изменялся. Вы также можете изменить столбец с обязательным значением на столбец, в котором допускается значение NULL. Дополнительно мы поддерживаем допустимое приведение типов для простых типов, а именно:   +Таблицы Iceberg также можно использовать с различными каталогами данных, такими как [REST Catalog](https://iceberg.apache.org/rest-catalog-spec/), [AWS Glue Data Catalog](https://docs.aws.amazon.com/prescriptive-guidance/latest/serverless-etl-aws-glue/aws-glue-data-catalog.html) и [Unity Catalog](https://www.unitycatalog.io/). -* int -> long -* float -> double -* decimal(P, S) -> decimal(P', S), где P' > P. +:::important +При использовании каталога большинству пользователей следует использовать движок базы данных `DataLakeCatalog`, который подключает ClickHouse к вашему каталогу для обнаружения ваших таблиц. Вы можете использовать этот движок базы данных вместо ручного создания отдельных таблиц с движком таблиц `IcebergS3`. +::: -В настоящее время невозможно изменять вложенные структуры или типы элементов внутри массивов и структур map. +Чтобы использовать такие каталоги, создайте таблицу с движком `IcebergS3` и укажите необходимые настройки. +Например, использование REST Catalog с хранилищем MinIO: +```sql +CREATE TABLE `database_name.table_name` +ENGINE = IcebergS3( + 'http://minio:9000/warehouse-rest/table_name/', + 'minio_access_key', + 'minio_secret_key' +) +SETTINGS + storage_catalog_type="rest", + storage_warehouse="demo", + object_storage_endpoint="http://minio:9000/warehouse-rest", + storage_region="us-east-1", + storage_catalog_url="http://rest:8181/v1" +``` -## Отсечение партиций {#partition-pruning} +Либо с использованием AWS Glue Data Catalog и S3: -ClickHouse поддерживает отсечение партиций при выполнении запросов SELECT к таблицам Iceberg, что помогает оптимизировать производительность запросов за счёт пропуска нерелевантных файлов данных. Чтобы включить отсечение партиций, установите `use_iceberg_partition_pruning = 1`. Для получения дополнительной информации об отсечении партиций в Iceberg см. https://iceberg.apache.org/spec/#partitioning +```sql +CREATE TABLE `my_database.my_table` +ENGINE = IcebergS3( + 's3://my-data-bucket/warehouse/my_database/my_table/', + 'aws_access_key', + 'aws_secret_key' +) +SETTINGS + storage_catalog_type = 'glue', + storage_warehouse = 'my_database', + object_storage_endpoint = 's3://my-data-bucket/', + storage_region = 'us-east-1', + storage_catalog_url = 'https://glue.us-east-1.amazonaws.com/iceberg/v1' +``` +## Эволюция схемы {#schema-evolution} -## Time Travel {#time-travel} +На данный момент в ClickHouse можно читать таблицы Iceberg, схема которых со временем изменялась. Поддерживается чтение таблиц, в которых были добавлены и удалены столбцы, а также изменён их порядок. Можно также изменить столбец с обязательным значением на столбец, где допускается значение NULL. Кроме того, поддерживается допустимое приведение простых типов, а именно:   + +* int -> long +* float -> double +* decimal(P, S) -> decimal(P', S), где P' > P. -ClickHouse поддерживает механизм Time Travel для таблиц Iceberg, позволяющий выполнять запросы к историческим данным на указанную метку времени или по идентификатору снимка (snapshot). +Пока невозможно изменять вложенные структуры или типы элементов внутри массивов и map. +## Отсечение партиций {#partition-pruning} + +ClickHouse поддерживает отсечение партиций при выполнении запросов SELECT к таблицам Iceberg, что помогает оптимизировать производительность запросов за счёт пропуска не относящихся к делу файлов данных. Чтобы включить отсечение партиций, установите `use_iceberg_partition_pruning = 1`. Для получения дополнительной информации об отсечении партиций в Iceberg см. https://iceberg.apache.org/spec/#partitioning + +## Time Travel {#time-travel} +ClickHouse поддерживает механизм time travel для таблиц Iceberg, позволяющий выполнять запросы к историческим данным по заданной метке времени или идентификатору снимка. ## Обработка таблиц с удалёнными строками {#deleted-rows} -В настоящее время поддерживаются только таблицы Iceberg, использующие [position deletes](https://iceberg.apache.org/spec/#position-delete-files). +В настоящее время поддерживаются только таблицы Iceberg, использующие [position deletes](https://iceberg.apache.org/spec/#position-delete-files). Следующие методы удаления **не поддерживаются**: -* [Equality deletes](https://iceberg.apache.org/spec/#equality-delete-files) -* [Deletion vectors](https://iceberg.apache.org/spec/#deletion-vectors) (добавлены в v3) +- [Equality deletes](https://iceberg.apache.org/spec/#equality-delete-files) +- [Deletion vectors](https://iceberg.apache.org/spec/#deletion-vectors) (появились в версии 3) -### Базовое использование {#basic-usage} +### Основы использования {#basic-usage} ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_timestamp_ms = 1714636800000 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_timestamp_ms = 1714636800000 ``` ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_snapshot_id = 3547395809148285433 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_snapshot_id = 3547395809148285433 ``` -Note: Нельзя указывать параметры `iceberg_timestamp_ms` и `iceberg_snapshot_id` в одном и том же запросе. +Примечание: Нельзя указывать параметры `iceberg_timestamp_ms` и `iceberg_snapshot_id` одновременно в одном запросе. + ### Важные замечания {#important-considerations} * **Снимки (snapshots)** обычно создаются, когда: - * В таблицу записываются новые данные +* Выполняется операция компактации (compaction) данных -* Выполняется операция по уплотнению данных (compaction) - -* **Изменения схемы обычно не создают новых снимков** — это приводит к важным особенностям поведения при использовании time travel для таблиц, в которых происходила эволюция схемы. +* **Изменения схемы обычно не создают снимки** — это приводит к важным особенностям поведения при использовании time travel для таблиц, схему которых со временем изменяли. ### Примеры сценариев {#example-scenarios} -Все сценарии приведены в Spark, так как ClickHouse пока не поддерживает запись в таблицы Iceberg. +Во всех сценариях используется Spark, так как ClickHouse пока не поддерживает запись в таблицы Iceberg. -#### Сценарий 1: изменения схемы без новых снимков {#scenario-1} +#### Сценарий 1: изменение схемы без создания новых снимков {#scenario-1} Рассмотрим следующую последовательность операций: ```sql --- Создание таблицы с двумя столбцами - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( - order_number bigint, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2') - -- - Вставка данных в таблицу - INSERT INTO spark_catalog.db.time_travel_example VALUES - (1, 'Mars') + -- Создать таблицу с двумя столбцами + CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( + order_number bigint, + product_code string + ) + USING iceberg + OPTIONS ('format-version'='2') - ts1 = now() // Фрагмент псевдокода +-- Вставить данные в таблицу + INSERT INTO spark_catalog.db.time_travel_example VALUES + (1, 'Mars') -- - Изменение таблицы для добавления нового столбца - ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + ts1 = now() // Фрагмент псевдокода - ts2 = now() +-- Изменить таблицу, добавив новый столбец + ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + + ts2 = now() -- - Вставка данных в таблицу - INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) +-- Вставить данные в таблицу + INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) - ts3 = now() + ts3 = now() -- - Запрос таблицы для каждой временной метки - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +-- Выполнить запрос к таблице для каждой временной метки + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +------------+------------+ |order_number|product_code| +------------+------------+ | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; +------------+------------+ |order_number|product_code| @@ -182,7 +216,7 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; +------------+------------+-----+ |order_number|product_code|price| @@ -192,17 +226,18 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` +------------+------------+-----+ ``` -Результаты запроса на разных временных метках: +Результаты запроса в разные моменты времени: + +* На ts1 и ts2: отображаются только исходные два столбца +* На ts3: отображаются все три столбца; для цены первой строки указано значение NULL -* В моменты ts1 и ts2: отображаются только исходные два столбца -* В момент ts3: отображаются все три столбца, при этом для цены в первой строке указано значение NULL #### Сценарий 2: различия между исторической и текущей схемой {#scenario-2} -Запрос time travel, выполненный в текущий момент, может показать схему, отличающуюся от схемы текущей таблицы: +Запрос time travel, выполненный в текущий момент времени, может показать схему, отличающуюся от текущей схемы таблицы: ```sql --- Создать таблицу +-- Создание таблицы CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example_2 ( order_number bigint, product_code string @@ -210,15 +245,15 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` USING iceberg OPTIONS ('format-version'='2') --- Вставить начальные данные в таблицу +-- Вставка начальных данных в таблицу INSERT INTO spark_catalog.db.time_travel_example_2 VALUES (2, 'Venus'); --- Изменить таблицу, добавив новый столбец +-- Изменение таблицы для добавления нового столбца ALTER TABLE spark_catalog.db.time_travel_example_2 ADD COLUMN (price double); ts = now(); --- Запросить таблицу в текущий момент времени, используя синтаксис временной метки +-- Запрос таблицы на текущий момент с использованием синтаксиса временной метки SELECT * FROM spark_catalog.db.time_travel_example_2 TIMESTAMP AS OF ts; @@ -228,7 +263,7 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` | 2| Venus| +------------+------------+ --- Запросить таблицу в текущий момент времени +-- Запрос таблицы на текущий момент SELECT * FROM spark_catalog.db.time_travel_example_2; +------------+------------+-----+ |order_number|product_code|price| @@ -237,12 +272,12 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` +------------+------------+-----+ ``` -Это происходит потому, что `ALTER TABLE` не создаёт новый снимок, но для текущей таблицы Spark использует значение `schema_id` из последнего файла метаданных, а не из снимка. +Это происходит потому, что `ALTER TABLE` не создаёт новый snapshot, а при работе с текущей таблицей Spark берёт значение `schema_id` из последнего файла метаданных, а не из snapshot. -#### Сценарий 3: различия между исторической и текущей схемами {#scenario-3} +#### Сценарий 3: различия между исторической и текущей схемой {#scenario-3} -Второй момент заключается в том, что при использовании механизма time travel вы не можете получить состояние таблицы на момент до записи в неё каких-либо данных: +Второе ограничение состоит в том, что при использовании механизма time travel нельзя получить состояние таблицы до того, как в неё были записаны какие‑либо данные: ```sql -- Создание таблицы @@ -256,90 +291,85 @@ Note: Нельзя указывать параметры `iceberg_timestamp_ms` ts = now(); -- Запрос таблицы на определённую временную метку - SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- Завершается ошибкой: Cannot find a snapshot older than ts. + SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- Завершается с ошибкой: Cannot find a snapshot older than ts. ``` -В ClickHouse поведение аналогично Spark. Вы можете мысленно заменить запросы SELECT в Spark на запросы SELECT в ClickHouse — и всё будет работать так же. +В ClickHouse поведение такое же, как в Spark. Вы можете мысленно заменить запросы SELECT в Spark на запросы SELECT в ClickHouse — и всё будет работать так же. ## Определение файла метаданных {#metadata-file-resolution} -При использовании табличной функции `iceberg` в ClickHouse система должна найти корректный файл metadata.json, который описывает структуру таблицы Iceberg. Ниже описано, как работает этот процесс определения: +При использовании табличной функции `iceberg` в ClickHouse системе необходимо найти нужный файл metadata.json, который описывает структуру таблицы Iceberg. Ниже описано, как работает процесс его определения: ### Поиск кандидатов (в порядке приоритета) {#candidate-search} 1. **Явное указание пути**: - *Если вы задаёте `iceberg_metadata_file_path`, система будет использовать этот точный путь, объединяя его с путём к директории таблицы Iceberg.* +*Если вы задаёте `iceberg_metadata_file_path`, система будет использовать именно этот путь, добавляя его к пути каталога таблицы Iceberg. -* При наличии этого параметра все остальные параметры выбора игнорируются. +* При наличии этого параметра все остальные параметры разрешения пути игнорируются. -2. **Соответствие UUID таблицы**: - *Если указан `iceberg_metadata_table_uuid`, система будет:* - *Просматривать только файлы `.metadata.json` в директории `metadata`* - *Фильтровать файлы, содержащие поле `table-uuid`, совпадающее с указанным вами UUID (без учёта регистра)* +2. **Сопоставление UUID таблицы**: +*Если указан `iceberg_metadata_table_uuid`, система будет: + *Смотреть только файлы `.metadata.json` в каталоге `metadata` + *Отбирать файлы, содержащие поле `table-uuid` со значением, совпадающим с указанным UUID (без учёта регистра) 3. **Поиск по умолчанию**: - *Если ни один из вышеперечисленных параметров не задан, все файлы `.metadata.json` в директории `metadata` становятся кандидатами.* +*Если ни один из вышеперечисленных параметров не задан, все файлы `.metadata.json` в каталоге `metadata` рассматриваются как кандидаты ### Выбор самого нового файла {#most-recent-file} -После определения файлов-кандидатов по приведённым выше правилам система выбирает самый новый: +После определения кандидатов на основе приведённых выше правил система выбирает, какой файл является самым новым: -* Если включён `iceberg_recent_metadata_file_by_last_updated_ms_field`: +* Если `iceberg_recent_metadata_file_by_last_updated_ms_field` включён: -* Выбирается файл с максимальным значением `last-updated-ms` +* Выбирается файл с наибольшим значением `last-updated-ms` * В противном случае: * Выбирается файл с наибольшим номером версии -* (Версия представлена как `V` в именах файлов формата `V.metadata.json` или `V-uuid.metadata.json`) +* (Версия обозначается как `V` в именах файлов формата `V.metadata.json` или `V-uuid.metadata.json`) -**Примечание**: Все упомянутые параметры являются параметрами табличной функции (а не глобальными или параметрами уровня запроса) и должны указываться так, как показано ниже: +**Примечание**: Все упомянутые настройки являются настройками табличной функции (а не глобальными или на уровне запроса) и должны указываться как показано ниже: ```sql SELECT * FROM iceberg('s3://bucket/path/to/iceberg_table', SETTINGS iceberg_metadata_table_uuid = 'a90eed4c-f74b-4e5b-b630-096fb9d09021'); ``` -**Примечание**: Хотя Iceberg Catalogs обычно отвечают за разрешение метаданных, табличная функция `iceberg` в ClickHouse напрямую интерпретирует файлы, хранящиеся в S3, как таблицы Iceberg, поэтому важно понимать эти правила разрешения метаданных. +**Примечание**: Хотя каталоги Iceberg обычно отвечают за разрешение метаданных, табличная функция `iceberg` в ClickHouse напрямую интерпретирует файлы, хранящиеся в S3, как таблицы Iceberg, поэтому важно понимать эти правила разрешения метаданных. -## Кэш метаданных {#metadata-cache} - -Движок таблицы и табличная функция `Iceberg` поддерживают кэш метаданных, в котором хранится информация о файлах манифеста, списке манифестов и JSON-файле метаданных. Кэш хранится в памяти. Эта возможность управляется настройкой `use_iceberg_metadata_files_cache`, которая по умолчанию включена. - +## Metadata cache {#metadata-cache} +Движок таблиц `Iceberg` и табличная функция `Iceberg` поддерживают кэш метаданных, в котором хранится информация о файлах manifest, списках manifest и JSON-файлах с метаданными. Кэш хранится в памяти. Этот функционал управляется настройкой `use_iceberg_metadata_files_cache`, которая по умолчанию включена. ## Псевдонимы {#aliases} -Табличная функция `iceberg` теперь является алиасом функции `icebergS3`. - - +Табличная функция `iceberg` теперь является псевдонимом для `icebergS3`. ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. - `_file` — Имя файла. Тип: `LowCardinality(String)`. -- `_size` — Размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер файла неизвестен, значение равно `NULL`. -- `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение равно `NULL`. -- `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение равно `NULL`. - - +- `_size` — Размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер файла неизвестен, значение — `NULL`. +- `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. +- `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение — `NULL`. ## Запись в таблицы Iceberg {#writes-into-iceberg-table} -Начиная с версии 25.7, ClickHouse поддерживает изменение пользовательских таблиц Iceberg. +Начиная с версии 25.7, ClickHouse поддерживает модификацию пользовательских таблиц Iceberg. -В настоящее время это экспериментальная функциональность, поэтому её сначала необходимо включить: +В настоящее время это экспериментальная функция, поэтому сначала её нужно включить: ```sql SET allow_experimental_insert_into_iceberg = 1; ``` + ### Создание таблицы {#create-iceberg-table} -Чтобы создать собственную пустую таблицу Iceberg, используйте те же команды, что и для чтения, но явно задайте схему. +Чтобы создать собственную пустую таблицу Iceberg, используйте те же команды, что и для чтения, но явно укажите схему. Операции записи поддерживают все форматы данных из спецификации Iceberg, такие как Parquet, Avro и ORC. ### Пример {#example-iceberg-writes-create} @@ -354,11 +384,12 @@ ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') ``` Примечание: чтобы создать файл подсказки версии, включите настройку `iceberg_use_version_hint`. -Если вы хотите сжать файл metadata.json, укажите имя кодека в настройке `iceberg_metadata_compression_method`. +Если нужно сжать файл metadata.json, укажите имя кодека в настройке `iceberg_metadata_compression_method`. + ### INSERT {#writes-inserts} -После создания новой таблицы вы можете вставлять данные, используя обычный синтаксис ClickHouse. +После создания новой таблицы вы можете добавить данные, используя стандартный синтаксис ClickHouse. ### Пример {#example-iceberg-writes-insert} @@ -380,13 +411,14 @@ x: Ivanov y: 993 ``` -### УДАЛЕНИЕ {#iceberg-writes-delete} -В ClickHouse также поддерживается удаление строк в формате merge-on-read. -Этот запрос создаст новый снимок с файлами позиционного удаления. +### DELETE {#iceberg-writes-delete} + +Удаление избыточных строк в формате merge-on-read также поддерживается в ClickHouse. +Этот запрос создаст новый снимок (snapshot) с файлами position delete. -ПРИМЕЧАНИЕ: если в будущем вы захотите читать свои таблицы с помощью других движков Iceberg (таких как Spark), вам нужно отключить настройки `output_format_parquet_use_custom_encoder` и `output_format_parquet_parallel_encoding`. -Это связано с тем, что Spark читает эти файлы по идентификаторам полей Parquet (field-ids), в то время как ClickHouse в настоящее время не поддерживает запись идентификаторов полей при включённых этих флагах. +ПРИМЕЧАНИЕ: Если вы хотите в дальнейшем читать свои таблицы с использованием других движков Iceberg (таких как Spark), необходимо отключить настройки `output_format_parquet_use_custom_encoder` и `output_format_parquet_parallel_encoding`. +Это связано с тем, что Spark читает эти файлы по идентификаторам полей Parquet (field-id), в то время как ClickHouse в настоящее время не поддерживает запись этих идентификаторов при включённых флагах. Мы планируем исправить это поведение в будущем. ### Пример {#example-iceberg-writes-delete} @@ -399,14 +431,15 @@ FROM iceberg_writes_example FORMAT VERTICAL; Строка 1: -───────── +────── x: Ivanov y: 993 ``` + ### Эволюция схемы {#iceberg-writes-schema-evolution} -ClickHouse позволяет добавлять, удалять или изменять столбцы с простыми типами (не tuple, не array, не map). +ClickHouse позволяет добавлять, удалять или изменять столбцы с простыми типами данных (не типа `Tuple`, `Array` или `Map`). ### Пример {#example-iceberg-writes-evolution} @@ -440,40 +473,37 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -Строка 1: +Row 1: ────── -x: Иванов +x: Ivanov y: 993 z: ᴺᵁᴸᴸ -``` - -ALTER TABLE iceberg_writes_example DROP COLUMN z; -SHOW CREATE TABLE iceberg_writes_example; -┌─statement─────────────────────────────────────────────────┐ - -1. │ CREATE TABLE default.iceberg_writes_example ↴│ +ALTER TABLE iceberg_writes_example DROP COLUMN z; +SHOW CREATE TABLE iceberg_writes_example; + ┌─statement─────────────────────────────────────────────────┐ +1. │ CREATE TABLE default.iceberg_writes_example ↴│ │↳( ↴│ │↳ `x` Nullable(String), ↴│ │↳ `y` Nullable(Int64) ↴│ │↳) ↴│ - │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ + │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ └───────────────────────────────────────────────────────────┘ SELECT * -FROM iceberg_writes_example +FROM iceberg_writes_example FORMAT VERTICAL; -Строка 1: +Row 1: ────── x: Ivanov y: 993 +``` -```` -### Уплотнение {#iceberg-writes-compaction} +### Компакция {#iceberg-writes-compaction} -ClickHouse поддерживает уплотнение таблиц Iceberg. В настоящее время можно объединять файлы позиционного удаления с файлами данных при обновлении метаданных. Идентификаторы и временные метки предыдущих снимков остаются неизменными, поэтому функция путешествия во времени продолжает работать с теми же значениями. +ClickHouse поддерживает компактацию таблиц Iceberg. В данный момент он может объединять файлы position delete с файлами данных с одновременным обновлением метаданных. Идентификаторы и метки времени предыдущих snapshot остаются без изменений, поэтому возможность time-travel по-прежнему доступна с теми же значениями. Как использовать: @@ -490,21 +520,10 @@ Row 1: ────── x: Ivanov y: 993 -```` - - -## Таблица с каталогами {#iceberg-writes-catalogs} - -Все описанные выше возможности записи также доступны с REST- и Glue‑каталогами. -Чтобы использовать их, создайте таблицу с табличным движком `IcebergS3` и укажите необходимые настройки: - -```sql -CREATE TABLE `database_name.table_name` ENGINE = IcebergS3('http://minio:9000/warehouse-rest/table_name/', 'minio_access_key', 'minio_secret_key') -SETTINGS storage_catalog_type="rest", storage_warehouse="demo", object_storage_endpoint="http://minio:9000/warehouse-rest", storage_region="us-east-1", storage_catalog_url="http://rest:8181/v1", ``` ## См. также {#see-also} -* [Движок таблиц Iceberg](/engines/table-engines/integrations/iceberg.md) -* [Табличная функция `icebergCluster`](/sql-reference/table-functions/icebergCluster.md) +* [Движок Iceberg](/engines/table-engines/integrations/iceberg.md) +* [Табличная функция icebergCluster](/sql-reference/table-functions/icebergCluster.md) \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md index 285e53ea490..d06c67d7335 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md @@ -8,16 +8,12 @@ title: 'icebergCluster' doc_type: 'reference' --- - - # Табличная функция icebergCluster {#icebergcluster-table-function} Это расширение табличной функции [iceberg](/sql-reference/table-functions/iceberg.md). Позволяет параллельно обрабатывать файлы Apache [Iceberg](https://iceberg.apache.org/) на многих узлах в заданном кластере. На узле-инициаторе создаётся соединение со всеми узлами кластера, и каждый файл динамически распределяется между ними. Рабочий узел запрашивает у инициатора следующую задачу для обработки и выполняет её. Это повторяется до тех пор, пока все задачи не будут завершены. - - ## Синтаксис {#syntax} ```sql @@ -31,7 +27,6 @@ icebergHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## Аргументы {#arguments} * `cluster_name` — имя кластера, которое используется для построения набора адресов и параметров подключения к удалённым и локальным серверам. @@ -47,7 +42,6 @@ icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) SELECT * FROM icebergS3Cluster('cluster_simple', 'http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test') ``` - ## Виртуальные столбцы {#virtual-columns} - `_path` — путь к файлу. Тип: `LowCardinality(String)`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md index caf840bb200..32522dffef0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md @@ -5,12 +5,8 @@ title: 'loop' doc_type: 'reference' --- - - # Табличная функция loop {#loop-table-function} - - ## Синтаксис {#syntax} ```sql @@ -20,7 +16,6 @@ SELECT ... FROM loop(table); SELECT ... FROM loop(other_table_function(...)); ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -29,14 +24,10 @@ SELECT ... FROM loop(other_table_function(...)); | `table` | имя таблицы. | | `other_table_function(...)` | другая табличная функция. Пример: `SELECT * FROM loop(numbers(10));` здесь `other_table_function(...)` — это `numbers(10)`. | - - ## Возвращаемые значения {#returned_values} Бесконечный цикл, возвращающий результаты запроса. - - ## Примеры {#examples} Получение данных из ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md index 6a9f05b1e62..2c3b007fd6a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md @@ -7,20 +7,14 @@ title: 'merge' doc_type: 'reference' --- - - # Табличная функция merge {#merge-table-function} Создаёт временную таблицу [Merge](../../engines/table-engines/special/merge.md). Схема таблицы выводится из базовых таблиц путём объединения их столбцов и вывода общих типов. Доступны те же виртуальные столбцы, что и для движка таблиц [Merge](../../engines/table-engines/special/merge.md). - - ## Синтаксис {#syntax} - - ```sql merge(['db_name',] 'tables_regexp') ``` @@ -32,7 +26,6 @@ merge(['db_name',] 'tables_regexp') | `db_name` | Возможные значения (необязательный параметр, по умолчанию — `currentDatabase()`):
- имя базы данных,
- константное выражение, которое возвращает строку с именем базы данных, например `currentDatabase()`,
- `REGEXP(expression)`, где `expression` — регулярное выражение для сопоставления имен БД. | | `tables_regexp` | Регулярное выражение для сопоставления имен таблиц в указанной БД или нескольких БД. | - ## См. также {#related} - [Merge](../../engines/table-engines/special/merge.md) — движок таблиц diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md index 67b3d9caed5..07d31fb88ef 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md @@ -8,21 +8,16 @@ title: 'mergeTreeIndex' doc_type: 'reference' --- - - # Табличная функция mergeTreeIndex {#mergetreeindex-table-function} Предоставляет доступ к содержимому файлов индексов и меток таблиц MergeTree. Может использоваться для интроспекции. - - ## Синтаксис {#syntax} ```sql mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -32,8 +27,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) | `with_marks` | Включать ли в результат столбцы с метками. | | `with_minmax` | Включать ли в результат индекс min-max. | - - ## Возвращаемое значение {#returned_value} Объект-таблица со столбцами, содержащими значения первичного индекса и индекса min-max (если включён) исходной таблицы, столбцы со значениями меток (если включены) для всех возможных файлов в частях данных исходной таблицы и виртуальные столбцы: @@ -44,8 +37,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) Столбец меток может содержать значение `(NULL, NULL)`, если столбец отсутствует в части данных или метки для одного из его подпотоков не записаны (например, в компактных частях). - - ## Пример использования {#usage-example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md index f292217a8cd..760da8987a7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md @@ -8,21 +8,16 @@ title: 'mergeTreeProjection' doc_type: 'reference' --- - - # Табличная функция mergeTreeProjection {#mergetreeprojection-table-function} Представляет содержимое некоторой проекции в таблицах MergeTree. Может использоваться для интроспекции. - - ## Синтаксис {#syntax} ```sql mergeTreeProjection(база_данных, таблица, проекция) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -31,14 +26,10 @@ mergeTreeProjection(база_данных, таблица, проекция) | `table` | Имя таблицы, из которой считывается проекция. | | `projection` | Проекция, из которой выполняется чтение. | - - ## Возвращаемое значение {#returned_value} Объект таблицы с набором столбцов, определённых указанной проекцией. - - ## Пример использования {#usage-example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md index bf8efdbe87f..b87d4b97e83 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md @@ -7,21 +7,16 @@ title: 'mongodb' doc_type: 'reference' --- - - # Табличная функция MongoDB {#mongodb-table-function} Позволяет выполнять `SELECT`-запросы к данным, хранящимся на удалённом сервере MongoDB. - - ## Синтаксис {#syntax} ```sql mongodb(хост:порт, база_данных, коллекция, пользователь, пароль, структура[, опции[, oid_столбцы]]) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -57,13 +52,10 @@ mongodb(uri, collection, structure[, oid_columns]) | `structure` | Схема таблицы ClickHouse, возвращаемой этой функцией. | | `oid_columns` | Список столбцов, разделённых запятыми, которые в предложении WHERE должны интерпретироваться как `oid`. По умолчанию — `_id`. | - ## Возвращаемое значение {#returned_value} Объект таблицы с теми же столбцами, что и исходная таблица MongoDB. - - ## Примеры {#examples} Предположим, у нас есть коллекция `my_collection` в базе данных MongoDB `test`, и мы вставляем в неё пару документов: @@ -106,7 +98,6 @@ SELECT * FROM mongodb( ) ``` - ## См. также {#related} - [Движок таблицы `MongoDB`](engines/table-engines/integrations/mongodb.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md index 5cb94914db1..ea94a1e3f55 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md @@ -8,21 +8,16 @@ title: 'mysql' doc_type: 'reference' --- - - # Табличная функция `mysql` {#mysql-table-function} Позволяет выполнять запросы `SELECT` и `INSERT` к данным, хранящимся на удалённом сервере MySQL. - - ## Синтаксис {#syntax} ```sql mysql({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]}) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -53,7 +48,6 @@ SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'us SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` - ## Возвращаемое значение {#returned_value} Объект таблицы с теми же столбцами, что и исходная таблица MySQL. @@ -66,8 +60,6 @@ SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', В запросе `INSERT` чтобы отличить табличную функцию `mysql(...)` от имени таблицы со списком имён столбцов, необходимо использовать ключевые слова `FUNCTION` или `TABLE FUNCTION`. См. примеры ниже. ::: - - ## Примеры {#examples} Таблица в MySQL: @@ -151,7 +143,6 @@ SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password') WHERE id > (SELECT max(id) FROM mysql_copy); ``` - ## См. также {#related} - [Движок таблицы MySQL](../../engines/table-engines/integrations/mysql.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md index c48e5bf52da..b191953ef42 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md @@ -7,33 +7,24 @@ title: 'null' doc_type: 'reference' --- - - # Функция таблицы null {#null-table-function} Создает временную таблицу указанной структуры с движком таблицы [Null](../../engines/table-engines/special/null.md). В соответствии со свойствами движка `Null` данные таблицы игнорируются, а сама таблица немедленно удаляется после выполнения запроса. Функция используется для удобства при написании тестов и проведении демонстраций. - - ## Синтаксис {#syntax} ```sql null('structure') ``` - ## Аргумент {#argument} - `structure` — список столбцов и их типов, строка типа [String](../../sql-reference/data-types/string.md). - - ## Возвращаемое значение {#returned_value} Временная таблица движка `Null` с указанной структурой. - - ## Пример {#example} Запрос с использованием функции `null`: @@ -50,7 +41,6 @@ INSERT INTO t SELECT * FROM numbers_mt(1000000000); DROP TABLE IF EXISTS t; ``` - ## Связанные разделы {#related} - [Движок таблицы Null](../../engines/table-engines/special/null.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md index 94698d52ea8..a84a5466a2a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md @@ -7,14 +7,10 @@ title: 'odbc' doc_type: 'reference' --- - - # Табличная функция ODBC {#odbc-table-function} Возвращает таблицу, подключённую через [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - - ## Синтаксис {#syntax} ```sql @@ -23,7 +19,6 @@ odbc(источник_данных, внешняя_таблица) odbc(именованная_коллекция) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -38,8 +33,6 @@ odbc(именованная_коллекция) Поля со значениями `NULL` из внешней таблицы преобразуются в значения по умолчанию для базового типа данных. Например, если поле удалённой таблицы MySQL имеет тип `INT NULL`, оно преобразуется в 0 (значение по умолчанию для типа данных ClickHouse `Int32`). - - ## Пример использования {#usage-example} **Получение данных из локального экземпляра MySQL через ODBC** @@ -117,7 +110,6 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') └────────┴──────────────┴───────┴────────────────┘ ``` - ## См. также {#see-also} - [Словари ODBC](/sql-reference/dictionaries#dbms) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md index e5b43d21f69..d78bcf1520a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # Табличная функция paimon {#paimon-table-function} Предоставляет интерфейс только для чтения к таблицам Apache [Paimon](https://paimon.apache.org/), хранящимся в Amazon S3, Azure, HDFS или локально, аналогичный работе с обычной таблицей. - - ## Синтаксис {#syntax} ```sql @@ -32,7 +29,6 @@ paimonHDFS(path_to_table, [,format] [,compression_method]) paimonLocal(path_to_table, [,format] [,compression_method]) ``` - ## Аргументы {#arguments} Описание аргументов совпадает с описанием аргументов в табличных функциях `s3`, `azureBlobStorage`, `HDFS` и `file` соответственно. @@ -42,8 +38,6 @@ paimonLocal(path_to_table, [,format] [,compression_method]) Таблица с заданной структурой для чтения данных из указанной таблицы Paimon. - - ## Определение именованной коллекции {#defining-a-named-collection} Ниже приведён пример настройки именованной коллекции для хранения URL-адреса и учётных данных: @@ -67,13 +61,10 @@ SELECT * FROM paimonS3(paimon_conf, filename = 'test_table') DESCRIBE paimonS3(paimon_conf, filename = 'test_table') ``` - ## Псевдонимы {#aliases} Табличная функция `paimon` теперь является псевдонимом для `paimonS3`. - - ## Виртуальные столбцы {#virtual-columns} - `_path` — путь к файлу. Тип: `LowCardinality(String)`. @@ -82,8 +73,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') - `_time` — время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение равно `NULL`. - `_etag` — ETag файла. Тип: `LowCardinality(String)`. Если ETag неизвестен, значение равно `NULL`. - - ## Поддерживаемые типы данных {#data-types-supported} | Тип данных Paimon | Тип данных ClickHouse @@ -106,8 +95,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') |ARRAY |Array | |MAP |Map | - - ## Поддерживаемые партиции {#partition-supported} Типы данных, поддерживаемые в ключах партиций Paimon: * `CHAR` @@ -125,8 +112,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') * `FLOAT` * `DOUBLE` - - ## См. также {#see-also} * [Табличная функция Paimon Cluster](/sql-reference/table-functions/paimonCluster.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md index cddef529a9f..95d55299420 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md @@ -9,7 +9,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # Табличная функция paimonCluster {#paimoncluster-table-function} @@ -18,8 +17,6 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; Позволяет обрабатывать файлы из Apache [Paimon](https://paimon.apache.org/) параллельно на множестве узлов заданного кластера. На инициаторе создаётся подключение ко всем узлам кластера, и каждый файл динамически распределяется между ними. Рабочий узел запрашивает у инициатора следующую задачу для обработки и выполняет её. Это повторяется до тех пор, пока все задачи не будут выполнены. - - ## Синтаксис {#syntax} ```sql @@ -30,7 +27,6 @@ paimonAzureCluster(cluster_name, connection_string|storage_account_url, containe paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) ``` - ## Аргументы {#arguments} - `cluster_name` — имя кластера, которое используется для построения набора адресов и параметров подключения к удалённым и локальным серверам. @@ -40,8 +36,6 @@ paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) Таблица с указанной структурой для чтения данных из кластера из указанной таблицы Paimon. - - ## Виртуальные столбцы {#virtual-columns} - `_path` — путь к файлу. Тип: `LowCardinality(String)`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md index 7b385ba275b..d2b7c5605e4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md @@ -7,21 +7,16 @@ title: 'postgresql' doc_type: 'reference' --- - - # Табличная функция PostgreSQL {#postgresql-table-function} Позволяет выполнять запросы `SELECT` и `INSERT` к данным, которые хранятся на удалённом сервере PostgreSQL. - - ## Синтаксис {#syntax} ```sql postgresql({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]}) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -36,8 +31,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] Аргументы также могут быть переданы с использованием [именованных коллекций](operations/named-collections.md). В этом случае `host` и `port` должны быть указаны отдельно. Такой подход рекомендуется для продакшен-среды. - - ## Возвращаемое значение {#returned_value} Объект таблицы с теми же столбцами, что и исходная таблица PostgreSQL. @@ -46,8 +39,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] В запросе `INSERT`, чтобы отличить табличную функцию `postgresql(...)` от имени таблицы со списком имён столбцов, необходимо использовать ключевые слова `FUNCTION` или `TABLE FUNCTION`. См. примеры ниже. ::: - - ## Детали реализации {#implementation-details} Запросы `SELECT` на стороне PostgreSQL выполняются в виде `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только для чтения с фиксацией (commit) после каждого запроса `SELECT`. @@ -78,7 +69,6 @@ SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database' Поддерживаются приоритеты реплик для источника словаря PostgreSQL. Чем больше число в отображении, тем ниже приоритет. Наивысший приоритет — `0`. - ## Примеры {#examples} Таблица в PostgreSQL: @@ -157,7 +147,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); ``` - ## Связанные материалы {#related} - [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md index afcb9bf2c8d..db5d60b78e7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md @@ -7,14 +7,10 @@ title: 'prometheusQuery' doc_type: 'reference' --- - - # Табличная функция prometheusQuery {#prometheusquery-table-function} Выполняет запрос Prometheus по данным таблицы TimeSeries. - - ## Синтаксис {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQuery(db_name.time_series_table, 'promql_query', evaluation_time) prometheusQuery('time_series_table', 'promql_query', evaluation_time) ``` - ## Аргументы {#arguments} - `db_name` — имя базы данных, в которой находится таблица TimeSeries. @@ -31,8 +26,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) - `promql_query` — запрос, написанный в [синтаксисе PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/). - `evaluation_time` — метка времени вычисления. Чтобы вычислить запрос на текущий момент времени, используйте `now()` в качестве значения `evaluation_time`. - - ## Возвращаемое значение {#returned_value} Функция может возвращать различные наборы столбцов в зависимости от типа результата запроса, переданного в параметр `promql_query`: @@ -44,8 +37,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## Пример {#example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md index 9e38ba647e9..e41673b0d44 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md @@ -7,14 +7,10 @@ title: 'prometheusQueryRange' doc_type: 'reference' --- - - # Табличная функция prometheusQuery {#prometheusquery-table-function} Вычисляет запрос Prometheus, используя данные из таблицы TimeSeries в заданном интервале времени оценки. - - ## Синтаксис {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQueryRange(db_name.time_series_table, 'promql_query', start_time, end_ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, step) ``` - ## Аргументы {#arguments} - `db_name` - имя базы данных, в которой находится таблица TimeSeries. @@ -33,8 +28,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, - `end_time` - время окончания диапазона вычисления. - `step` - шаг, используемый для перебора времени вычисления от `start_time` до `end_time` (включительно). - - ## Возвращаемое значение {#returned_value} Функция может возвращать различные столбцы в зависимости от типа результата запроса, переданного в параметре `promql_query`: @@ -46,8 +39,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## Пример {#example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md index b73acd275be..384cf36cd2c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md @@ -7,21 +7,16 @@ title: 'redis' doc_type: 'reference' --- - - # Табличная функция redis {#redis-table-function} Эта табличная функция предназначена для интеграции ClickHouse с [Redis](https://redis.io/). - - ## Синтаксис {#syntax} ```sql redis(host:port, key, structure[, db_index[, password[, pool_size]]]) ``` - ## Аргументы {#arguments} | Argument | Description | @@ -39,14 +34,10 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]]) [Именованные коллекции](/operations/named-collections.md) в настоящий момент не поддерживаются для табличной функции `redis`. - - ## Возвращаемое значение {#returned_value} Объект-таблица, в котором ключ — это ключ Redis, а остальные столбцы вместе образуют значение Redis. - - ## Пример использования {#usage-example} Чтение данных из Redis: @@ -68,7 +59,6 @@ INSERT INTO TABLE FUNCTION redis( 'key String, v1 String, v2 UInt32') values ('1', '1', 1); ``` - ## См. также {#related} - [Табличный движок `Redis`](/engines/table-engines/integrations/redis.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md index 00727edb8c4..821fc5ae1f4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md @@ -7,16 +7,12 @@ title: 'remote, remoteSecure' doc_type: 'reference' --- - - # Табличные функции remote, remoteSecure {#remote-remotesecure-table-function} Табличная функция `remote` позволяет получать доступ к удалённым серверам «на лету», то есть без создания таблицы [Distributed](../../engines/table-engines/special/distributed.md). Табличная функция `remoteSecure` аналогична `remote`, но использует защищённое соединение. Обе функции могут использоваться в запросах `SELECT` и `INSERT`. - - ## Синтаксис {#syntax} ```sql @@ -28,7 +24,6 @@ remoteSecure(addresses_expr, [db.table, user [, password], sharding_key]) remoteSecure(named_collection[, option=value [,..]]) ``` - ## Параметры {#parameters} | Аргумент | Описание | @@ -42,14 +37,10 @@ remoteSecure(named_collection[, option=value [,..]]) Аргументы также могут передаваться с помощью [именованных коллекций](operations/named-collections.md). - - ## Возвращаемое значение {#returned-value} Таблица, расположенная на удалённом сервере. - - ## Использование {#usage} Поскольку табличные функции `remote` и `remoteSecure` заново устанавливают соединение для каждого запроса, рекомендуется вместо них использовать таблицу `Distributed`. Кроме того, если заданы имена хостов, они разрешаются, и ошибки разрешения имён не учитываются при работе с различными репликами. При обработке большого числа запросов всегда создавайте таблицу `Distributed` заранее и не используйте табличную функцию `remote`. @@ -81,7 +72,6 @@ localhost example01-01-1,example01-02-1 ``` - ## Примеры {#examples} ### Выборка данных с удалённого сервера: {#selecting-data-from-a-remote-server} @@ -171,7 +161,6 @@ remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD') SELECT * from imdb.actors ``` - ## Глоббинг {#globs-in-addresses} Шаблоны в фигурных скобках `{ }` используются для генерации набора шардов и для указания реплик. Если фигурных скобок несколько пар, генерируется декартово произведение соответствующих наборов. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md index c769af4799d..e8028e150f5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md @@ -13,7 +13,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Табличная функция s3 {#s3-table-function} Предоставляет табличный интерфейс для чтения и записи файлов в [Amazon S3](https://aws.amazon.com/s3/) и [Google Cloud Storage](https://cloud.google.com/storage/). Эта табличная функция аналогична [функции hdfs](../../sql-reference/table-functions/hdfs.md), но поддерживает возможности, специфичные для S3. @@ -22,8 +21,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; При использовании табличной функции `s3` с [`INSERT INTO...SELECT`](../../sql-reference/statements/insert-into#inserting-the-results-of-select) данные читаются и вставляются в потоковом режиме. В памяти находятся только несколько блоков данных, пока блоки непрерывно читаются из S3 и отправляются в целевую таблицу. - - ## Синтаксис {#syntax} ```sql @@ -74,13 +71,10 @@ URL для GCS имеет следующий формат, так как endpoin | `no_sign_request` | по умолчанию отключен. | | `expiration_window_seconds` | значение по умолчанию — 120. | - ## Возвращаемое значение {#returned_value} Таблица заданной структуры, предназначенная для чтения или записи данных в указанный файл. - - ## Примеры {#examples} Выбор первых 5 строк таблицы из файла в S3 `https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv`: @@ -135,7 +129,6 @@ FROM s3( ::: - ## Использование {#usage} Предположим, что у нас есть несколько файлов со следующими URI в S3: @@ -218,7 +211,6 @@ SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bu SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); ``` - Примечание. В файле конфигурации сервера можно указать пользовательские сопоставления URL-адресов. Пример: ```sql @@ -254,7 +246,6 @@ SELECT count(*) FROM s3(creds, url='https://s3-object-url.csv') ``` - ## Партиционированная запись {#partitioned-write} ### Стратегия разбиения {#partition-strategy} @@ -301,7 +292,6 @@ INSERT INTO TABLE FUNCTION В результате данные записываются в три файла в разных бакетах: `my_bucket_1/file.csv`, `my_bucket_10/file.csv` и `my_bucket_20/file.csv`. - ## Доступ к публичным бакетам {#accessing-public-buckets} ClickHouse пытается получить учетные данные из множества разных источников. @@ -318,7 +308,6 @@ FROM s3( LIMIT 5; ``` - ## Использование учетных данных S3 (ClickHouse Cloud) {#using-s3-credentials-clickhouse-cloud} Для непубличных бакетов пользователи могут передать `aws_access_key_id` и `aws_secret_access_key` функции. Например: @@ -339,7 +328,6 @@ SELECT count() FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.co Дополнительные примеры можно найти [здесь](/cloud/data-sources/secure-s3#access-your-s3-bucket-with-the-clickhouseaccess-role) - ## Работа с архивами {#working-with-archives} Предположим, что у нас есть несколько архивных файлов со следующими URI в S3: @@ -365,13 +353,10 @@ TAR Архивы ZIP и TAR можно читать из любого поддерживаемого хранилища, а архивы 7Z — только с локальной файловой системы, на которой установлен ClickHouse. ::: - ## Вставка данных {#inserting-data} Обратите внимание, что строки можно вставлять только в новые файлы. Операции слияния или разбиения файлов не выполняются. После того как файл записан, последующие вставки в него завершатся с ошибкой. Подробнее см. [здесь](/integrations/s3#inserting-data). - - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к файлу. Тип: `LowCardinality(String)`. В случае архива показывает путь в формате: `"{path_to_archive}::{path_to_file_inside_archive}"`. @@ -379,8 +364,6 @@ TAR - `_size` — Размер файла в байтах. Тип: `Nullable(UInt64)`. Если размер файла неизвестен, значение — `NULL`. В случае архива показывает несжатый размер файла внутри архива. - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - - ## настройка use_hive_partitioning {#hive-style-partitioning} Эта настройка подсказывает ClickHouse, что при чтении нужно разбирать файлы с секционированием в стиле Hive. На операцию записи она не влияет. Для симметричных операций чтения и записи используйте аргумент `partition_strategy`. @@ -393,7 +376,6 @@ TAR SELECT * FROM s3('s3://data/path/date=*/country=*/code=*/*.parquet') WHERE date > '2020-01-01' AND country = 'Netherlands' AND code = 42; ``` - ## Доступ к бакетам с оплатой по запросам (requester pays) {#accessing-requester-pays-buckets} Чтобы получить доступ к бакету с оплатой по запросам (requester pays), во всех запросах нужно передавать заголовок `x-amz-request-payer = requester`. Это можно сделать, передав параметр `headers('x-amz-request-payer' = 'requester')` в функцию s3. Например: @@ -412,15 +394,12 @@ FROM s3('https://coiled-datasets-rp.s3.us-east-1.amazonaws.com/1trc/measurements Пиковое использование памяти: 192.27 КиБ. ``` - ## Настройки хранения {#storage-settings} - [s3_truncate_on_insert](operations/settings/settings.md#s3_truncate_on_insert) - позволяет усечь файл перед вставкой в него. По умолчанию отключено. - [s3_create_new_file_on_insert](operations/settings/settings.md#s3_create_new_file_on_insert) - позволяет создавать новый файл при каждой вставке, если формат имеет суффикс. По умолчанию отключено. - [s3_skip_empty_files](operations/settings/settings.md#s3_skip_empty_files) - позволяет пропускать пустые файлы при чтении. По умолчанию включено. - - ## Вложенные схемы Avro {#nested-avro-schemas} При чтении файлов Avro, содержащих **вложенные записи**, которые различаются между файлами (например, в некоторых файлах есть дополнительное поле внутри вложенного объекта), ClickHouse может вернуть ошибку вида: @@ -449,7 +428,6 @@ FROM s3('https://bucket-name/*.avro', 'Avro') SETTINGS schema_inference_mode='union'; ``` - ## Связанные материалы {#related} - [Движок S3](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md index 2139ba153c3..8c937d200b0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md @@ -9,16 +9,12 @@ title: 's3Cluster' doc_type: 'reference' --- - - # Табличная функция s3Cluster {#s3cluster-table-function} Это расширение табличной функции [s3](sql-reference/table-functions/s3.md). Позволяет обрабатывать файлы из [Amazon S3](https://aws.amazon.com/s3/) и [Google Cloud Storage](https://cloud.google.com/storage/) параллельно на нескольких узлах заданного кластера. На узле-инициаторе она устанавливает соединение со всеми узлами кластера, раскрывает шаблоны с использованием символа `*` в путях к файлам S3 и динамически распределяет каждый файл. На рабочем узле она запрашивает у инициатора следующую задачу для обработки и обрабатывает её. Это повторяется до тех пор, пока все задачи не будут выполнены. - - ## Синтаксис {#syntax} ```sql @@ -26,7 +22,6 @@ s3Cluster(cluster_name, url[, NOSIGN | access_key_id, secret_access_key,[session s3Cluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## Аргументы {#arguments} | Argument | Description | @@ -51,14 +46,10 @@ s3Cluster(cluster_name, named_collection[, option=value [,..]]) | `no_sign_request` | По умолчанию отключён. | | `expiration_window_seconds` | Значение по умолчанию — 120. | - - ## Возвращаемое значение {#returned_value} Таблица заданной структуры, используемая для чтения или записи данных в указанный файл. - - ## Примеры {#examples} Выберите данные из всех файлов в каталогах `/root/data/clickhouse` и `/root/data/database/`, используя все узлы кластера `cluster_simple`: @@ -93,19 +84,14 @@ SELECT count(*) FROM s3Cluster( ) ``` - ## Доступ к приватным и публичным бакетам {#accessing-private-and-public-buckets} Пользователи могут использовать те же подходы, что и описанные для функции S3 [здесь](/sql-reference/table-functions/s3#accessing-public-buckets). - - ## Оптимизация производительности {#optimizing-performance} Подробнее об оптимизации производительности функции s3 читайте в [нашем подробном руководстве](/integrations/s3/performance). - - ## Связанные разделы {#related} - [Движок S3](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md index 37a23f0dc8a..9ef87619974 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md @@ -7,34 +7,25 @@ title: 'sqlite' doc_type: 'reference' --- - - # Табличная функция SQLite {#sqlite-table-function} Позволяет выполнять запросы к данным, хранящимся в базе данных [SQLite](../../engines/database-engines/sqlite.md). - - ## Синтаксис {#syntax} ```sql sqlite('db_path', 'table_name') ``` - ## Аргументы {#arguments} - `db_path` — Путь к файлу базы данных SQLite. [String](../../sql-reference/data-types/string.md). - `table_name` — Имя таблицы в базе данных SQLite. [String](../../sql-reference/data-types/string.md). - - ## Возвращаемое значение {#returned_value} - Объект таблицы с теми же столбцами, что и в исходной таблице `SQLite`. - - ## Пример {#example} Запрос: @@ -53,7 +44,6 @@ SELECT * FROM sqlite('sqlite.db', 'table1') ORDER BY col2; └───────┴──────┘ ``` - ## См. также {#related} - [SQLite](../../engines/table-engines/integrations/sqlite.md) — движок таблиц diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md index 6eb366d36c5..4a6ae6504f5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md @@ -7,15 +7,11 @@ title: 'timeSeriesSelector' doc_type: 'reference' --- - - # Табличная функция timeSeriesSelector {#timeseriesselector-table-function} Считывает временные ряды из таблицы TimeSeries, отфильтрованные селектором и ограниченные временными метками указанного интервала. Эта функция аналогична [range selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#range-vector-selectors), но также используется для реализации [instant selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#instant-vector-selectors). - - ## Синтаксис {#syntax} ```sql @@ -24,7 +20,6 @@ timeSeriesSelector(db_name.time_series_table, 'instant_query', min_time, max_tim timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) ``` - ## Аргументы {#arguments} - `db_name` — имя базы данных, в которой находится таблица TimeSeries. @@ -33,8 +28,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) - `min_time` — начальная метка времени (включительно). - `max_time` — конечная метка времени (включительно). - - ## Возвращаемое значение {#returned_value} Функция возвращает три столбца: @@ -44,8 +37,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) Порядок возвращаемых данных не гарантируется. - - ## Пример {#example} ```sql diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md index b9260cb2235..2eb2349b61e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Табличная функция url {#url-table-function} Функция `url` создаёт таблицу на основе `URL` с заданными `format` и `structure`. Функция `url` может использоваться в запросах `SELECT` и `INSERT` к данным в таблицах [URL](../../engines/table-engines/special/url.md). - - ## Синтаксис {#syntax} ```sql url(URL [,format] [,structure] [,headers]) ``` - ## Параметры {#parameters} | Параметр | Описание | @@ -35,14 +31,10 @@ url(URL [,format] [,structure] [,headers]) | `structure` | Структура таблицы в формате `'UserID UInt64, Name String'`. Определяет имена и типы столбцов. Тип: [String](../../sql-reference/data-types/string.md). | | `headers` | Заголовки в формате `'headers('key1'='value1', 'key2'='value2')'`. Позволяет задать заголовки для HTTP-запроса. | - - ## Возвращаемое значение {#returned_value} Таблица с указанным форматом и структурой, содержащая данные из заданного `URL`-адреса. - - ## Примеры {#examples} Получение первых трёх строк таблицы, содержащей столбцы типов `String` и [UInt32](../../sql-reference/data-types/int-uint.md), с HTTP-сервера, который отвечает в формате [CSV](/interfaces/formats/CSV). @@ -59,14 +51,11 @@ INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FO SELECT * FROM test_table; ``` - ## Глоб-шаблоны в URL {#globs-in-url} Шаблоны в фигурных скобках `{ }` используются для формирования набора шардов или указания резервных адресов. Поддерживаемые типы шаблонов и примеры см. в описании функции [remote](remote.md#globs-in-addresses). Символ `|` внутри шаблонов используется для указания резервных адресов. Они перебираются в том же порядке, в котором перечислены в шаблоне. Количество сгенерированных адресов ограничивается настройкой [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements). - - ## Виртуальные столбцы {#virtual-columns} - `_path` — Путь к `URL`. Тип: `LowCardinality(String)`. @@ -75,8 +64,6 @@ SELECT * FROM test_table; - `_time` — Время последнего изменения файла. Тип: `Nullable(DateTime)`. Если время неизвестно, значение — `NULL`. - `_headers` — Заголовки HTTP-ответа. Тип: `Map(LowCardinality(String), LowCardinality(String))`. - - ## настройка use_hive_partitioning {#hive-style-partitioning} Когда настройка `use_hive_partitioning` установлена в 1, ClickHouse будет распознавать секционирование в стиле Hive в пути (`/name=value/`) и позволит использовать столбцы секций как виртуальные столбцы в запросе. Эти виртуальные столбцы будут иметь те же имена, что и в пути секционирования, но с префиксом `_`. @@ -89,20 +76,15 @@ SELECT * FROM test_table; SELECT * FROM url('http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## Настройки хранения {#storage-settings} - [engine_url_skip_empty_files](/operations/settings/settings.md#engine_url_skip_empty_files) — позволяет пропускать пустые файлы при чтении. По умолчанию отключено. - [enable_url_encoding](/operations/settings/settings.md#enable_url_encoding) — позволяет включать или отключать декодирование/кодирование пути в URI. По умолчанию включено. - - ## Разрешения {#permissions} Функция `url` требует прав `CREATE TEMPORARY TABLE`. Поэтому она не будет работать для пользователей с настройкой [`readonly`](/operations/settings/permissions-for-queries#readonly) = 1. Требуется как минимум `readonly` = 2. - - ## См. также {#related} - [Виртуальные столбцы](/engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md index 1af09b3da36..077b58ec0be 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md @@ -7,21 +7,16 @@ title: 'urlCluster' doc_type: 'reference' --- - - # Функция таблицы urlCluster {#urlcluster-table-function} Позволяет обрабатывать файлы из URL параллельно с нескольких узлов в указанном кластере. На узле-инициаторе она устанавливает соединение со всеми узлами кластера, раскрывает символ «звёздочка» в пути URL к файлам и динамически распределяет каждый файл. На рабочем узле она запрашивает у инициатора следующую задачу и обрабатывает её. Это повторяется до тех пор, пока все задачи не будут выполнены. - - ## Синтаксис {#syntax} ```sql urlCluster(cluster_name, URL, format, structure) ``` - ## Аргументы {#arguments} | Аргумент | Описание | @@ -31,14 +26,10 @@ urlCluster(cluster_name, URL, format, structure) | `format` | [Формат](/sql-reference/formats) данных. Тип: [String](../../sql-reference/data-types/string.md). | | `structure` | Структура таблицы в формате `'UserID UInt64, Name String'`. Определяет имена и типы столбцов. Тип: [String](../../sql-reference/data-types/string.md). | - - ## Возвращаемое значение {#returned_value} Таблица заданного формата и структуры с данными из указанного `URL`. - - ## Примеры {#examples} Получение первых трёх строк таблицы со столбцами типов `String` и [UInt32](../../sql-reference/data-types/int-uint.md) от HTTP-сервера, который отвечает в формате [CSV](/interfaces/formats/CSV). @@ -65,14 +56,11 @@ if __name__ == "__main__": SELECT * FROM urlCluster('cluster_simple','http://127.0.0.1:12345', CSV, 'column1 String, column2 UInt32') ``` - ## Шаблоны (globs) в URL {#globs-in-url} Шаблоны в фигурных скобках `{ }` используются для генерации набора шардов или указания резервных (failover) адресов. Поддерживаемые типы шаблонов и примеры см. в описании функции [remote](remote.md#globs-in-addresses). Символ `|` внутри шаблонов используется для указания резервных адресов. Они перебираются в том же порядке, в котором перечислены в шаблоне. Количество сгенерированных адресов ограничивается параметром [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements). - - ## См. также {#related} - [Движок HDFS](/engines/table-engines/integrations/hdfs) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md index 8349aee01fa..76f495e4ec3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md @@ -8,8 +8,6 @@ title: 'values' doc_type: 'reference' --- - - # Табличная функция Values {#values-table-function} Табличная функция `Values` позволяет создать временное хранилище и заполнить @@ -19,8 +17,6 @@ doc_type: 'reference' Values — регистронезависимая функция. То есть `VALUES` и `values` одинаково допустимы. ::: - - ## Синтаксис {#syntax} Базовый синтаксис табличной функции `VALUES` выглядит следующим образом: @@ -40,7 +36,6 @@ VALUES( ) ``` - ## Аргументы {#arguments} - `column1_name Type1, ...` (необязательный аргумент). [String](/sql-reference/data-types/string), @@ -55,14 +50,10 @@ VALUES( для подробностей. ::: - - ## Возвращаемое значение {#returned-value} - Возвращает временную таблицу, содержащую указанные значения. - - ## Примеры {#examples} ```sql title="Query" @@ -200,7 +191,6 @@ FROM VALUES( └──────────┘ ``` - ## См. также {#see-also} - [Формат Values](/interfaces/formats/Values) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md index 25b9a63ea62..4f528a32c2a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md @@ -7,33 +7,24 @@ title: 'view' doc_type: 'reference' --- - - # Табличная функция view {#view-table-function} Преобразует подзапрос в таблицу. Функция реализует представления (см. [CREATE VIEW](/sql-reference/statements/create/view)). Результирующая таблица не хранит данные, а содержит только указанный запрос `SELECT`. При чтении из таблицы ClickHouse выполняет этот запрос и удаляет из результата все ненужные столбцы. - - ## Синтаксис {#syntax} ```sql view(подзапрос) ``` - ## Аргументы {#arguments} - `subquery` — запрос типа `SELECT`. - - ## Возвращаемое значение {#returned_value} - Таблица. - - ## Примеры {#examples} Входная таблица: @@ -74,7 +65,6 @@ SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)); SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)); ``` - ## См. также {#related} - [Табличный движок View](/engines/table-engines/special/view/) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md index e8d262dffdc..c67d8d5fa5a 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # Табличная функция ytsaurus {#ytsaurus-table-function} Табличная функция ytsaurus позволяет считывать данные из кластера YTsaurus. - - ## Синтаксис {#syntax} ```sql @@ -31,7 +28,6 @@ ytsaurus(http_proxy_url, cypress_path, oauth_token, format) Введите команду `set allow_experimental_ytsaurus_table_function = 1`. ::: - ## Аргументы {#arguments} - `http_proxy_url` — URL HTTP-прокси YTsaurus. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/transactions.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/transactions.md index 91befe1f7fe..f4aaf8c1dcc 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/transactions.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/transactions.md @@ -8,11 +8,8 @@ doc_type: 'guide' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # Поддержка транзакционности (ACID) {#transactional-acid-support} - - ## Случай 1: INSERT в один раздел одной таблицы семейства MergeTree* {#case-1-insert-into-one-partition-of-one-table-of-the-mergetree-family} Операция является транзакционной (ACID), если вставляемые строки упакованы и вставляются одним блоком (см. примечания): @@ -22,35 +19,25 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - Durable (долговечность): успешный INSERT записывается в файловую систему до ответа клиенту, на одну реплику или несколько реплик (управляется настройкой `insert_quorum`), и ClickHouse может попросить ОС синхронизировать данные файловой системы с носителем (управляется настройкой `fsync_after_insert`). - INSERT в несколько таблиц одним оператором возможен, если задействованы материализованные представления (INSERT от клиента выполняется в таблицу, у которой есть связанные материализованные представления). - - ## Случай 2: INSERT в несколько партиций одной таблицы семейства MergeTree* {#case-2-insert-into-multiple-partitions-of-one-table-of-the-mergetree-family} Аналогично случаю 1 выше, с таким уточнением: - Если таблица имеет много партиций и INSERT затрагивает многие из них, то вставка в каждую партицию является самостоятельной транзакцией - - ## Случай 3: INSERT в одну распределённую таблицу семейства MergeTree* {#case-3-insert-into-one-distributed-table-of-the-mergetree-family} Аналогичен случаю 1 выше, но с одной особенностью: - операция INSERT в таблицу движка Distributed не является транзакционной в целом, тогда как вставка в каждый шард — транзакционная - - ## Случай 4: Использование таблицы Buffer {#case-4-using-a-buffer-table} - вставка в таблицы Buffer не обладает свойствами атомарности, изолированности, согласованности и долговечности - - ## Случай 5: Использование async_insert {#case-5-using-async_insert} То же, что и в случае 1 выше, со следующим уточнением: - атомарность обеспечивается даже если `async_insert` включён и `wait_for_async_insert` установлен в 1 (значение по умолчанию), но если `wait_for_async_insert` установлен в 0, то атомарность не гарантируется. - - ## Примечания {#notes} - строки, вставленные клиентом в некотором формате данных, упаковываются в один блок, когда: - формат вставки построчный (например, CSV, TSV, Values, JSONEachRow и т. д.), а данные содержат меньше, чем `max_insert_block_size` строк (~1 000 000 по умолчанию) или меньше, чем `min_chunk_bytes_for_parallel_parsing` байт (10 МБ по умолчанию), если используется параллельный разбор (он включён по умолчанию) @@ -63,8 +50,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - «согласованность» в терминах ACID не охватывает семантику распределённых систем, см. https://jepsen.io/consistency; она управляется другими настройками (select_sequential_consistency) - это объяснение не охватывает новую функциональность транзакций, которая позволяет использовать полнофункциональные транзакции над несколькими таблицами, материализованными представлениями, для нескольких SELECT и т. д. (см. следующий раздел о Transactions, Commit и Rollback) - - ## Транзакции, фиксация (commit) и откат (rollback) {#transactions-commit-and-rollback} @@ -204,7 +189,6 @@ ENGINE = MergeTree ORDER BY n ``` - ```response Ok. ``` @@ -322,7 +306,6 @@ is_readonly: 1 state: RUNNING ``` - ## Подробности {#more-details} Ознакомьтесь с этой [мета‑задачей](https://github.com/ClickHouse/ClickHouse/issues/48794), чтобы найти гораздо более обширные тесты и быть в курсе прогресса. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md index e32dac15bc6..2d66489b83c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md @@ -7,15 +7,11 @@ title: 'Оконные функции' doc_type: 'reference' --- - - # Оконные функции {#window-functions} Оконные функции позволяют выполнять вычисления над набором строк, связанных с текущей строкой. Часть таких вычислений аналогична тем, что можно выполнить с агрегатной функцией, но оконная функция не приводит к объединению строк в единый результирующий набор — отдельные строки по‑прежнему возвращаются. - - ## Стандартные оконные функции {#standard-window-functions} ClickHouse поддерживает стандартную грамматику для определения окон и оконных функций. В таблице ниже указано, поддерживается ли та или иная возможность. @@ -36,8 +32,6 @@ ClickHouse поддерживает стандартную грамматику | `lag/lead(value, offset)` | ✅
Вы также можете использовать один из следующих обходных решений:
1) `any(value) over (.... rows between preceding and preceding)`, или `following` для `lead`
2) `lagInFrame/leadInFrame`, которые являются аналогами, но учитывают оконный фрейм. Чтобы получить поведение, идентичное `lag/lead`, используйте `rows between unbounded preceding and unbounded following` | | ntile(buckets) | ✅
Задайте окно следующим образом: (partition by x order by y rows between unbounded preceding and unbounded following). | - - ## Оконные функции ClickHouse {#clickhouse-specific-window-functions} Также доступна следующая оконная функция ClickHouse: @@ -51,7 +45,6 @@ ClickHouse поддерживает стандартную грамматику - `0` для первой строки, - ${\text{metric}_i - \text{metric}_{i-1} \over \text{timestamp}_i - \text{timestamp}_{i-1}} * \text{interval}$ для $i$-й строки. - ## Синтаксис {#syntax} ```text @@ -97,7 +90,6 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column] * [`lagInFrame(x)`](./lagInFrame.md) - Возвращает значение, вычисленное для строки, которая находится на заданное количество строк раньше текущей строки в упорядоченном фрейме. * [`leadInFrame(x)`](./leadInFrame.md) - Возвращает значение, вычисленное для строки, которая находится на заданное количество строк позже текущей строки в упорядоченном фрейме. - ## Примеры {#examples} Рассмотрим несколько примеров использования оконных функций. @@ -196,7 +188,6 @@ SELECT FROM salaries; ``` - ```text ┌─игрок───────────┬─зарплата─┬─команда───────────────────┬─максКоманды─┬───разница─┐ │ Charles Juarez │ 190000 │ New Coreystad Archdukes │ 190000 │ 0 │ @@ -280,7 +271,6 @@ ORDER BY └──────────┴───────┴───────┴──────────────┘ ``` - ```sql -- краткая форма — без выражения границ, без ORDER BY, -- эквивалент `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` @@ -355,7 +345,6 @@ ORDER BY └──────────┴───────┴───────┴────────────────────┴──────────────┘ ``` - ```sql -- фрейм ограничен началом партиции и текущей строкой, но порядок сортировки обратный SELECT @@ -451,7 +440,6 @@ ORDER BY value ASC; ``` - ┌─part_key─┬─value─┬─order─┬─frame_values─┬─rn_1─┬─rn_2─┬─rn_3─┬─rn_4─┐ │ 1 │ 1 │ 1 │ [5,4,3,2,1] │ 5 │ 5 │ 5 │ 2 │ │ 1 │ 2 │ 2 │ [5,4,3,2] │ 4 │ 4 │ 4 │ 2 │ @@ -520,7 +508,6 @@ ORDER BY value ASC; ``` - ┌─frame_values_1─┬─second_value─┐ │ [1] │ ᴺᵁᴸᴸ │ │ [1,2] │ 2 │ @@ -532,7 +519,6 @@ ORDER BY ``` ``` - ## Примеры из реальной практики {#real-world-examples} Ниже приведены примеры, демонстрирующие решения распространённых практических задач. @@ -646,7 +632,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('cpu_temp', '2020-01-01 00:00:00', 87), ('cpu_temp', '2020-01-01 00:00:01', 77), ('cpu_temp', '2020-01-01 00:00:02', 93), @@ -725,7 +710,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('ambient_temp', '2020-01-01 00:00:00', 16), ('ambient_temp', '2020-01-01 12:00:00', 16), ('ambient_temp', '2020-01-02 11:00:00', 9), @@ -769,7 +753,6 @@ ORDER BY └──────────────┴─────────────────────┴───────┴─────────────────────────┘ ```` - ## Ссылки {#references} ### GitHub Issues {#github-issues} @@ -804,8 +787,6 @@ https://dev.mysql.com/doc/refman/8.0/en/window-functions-usage.html https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html - - ## Связанные материалы {#related-content} - Блог: [Работа с данными временных рядов в ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md index e4dd3b86b7c..2fb6c0ccebd 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md @@ -19,14 +19,10 @@ title: 'Уроки — рекомендации по отладке' description: 'Найдите решения наиболее распространённых проблем ClickHouse, включая медленные запросы, ошибки, связанные с памятью, а также проблемы с подключением и конфигурацией.' --- - - # Операции ClickHouse: практические советы по отладке от сообщества {#clickhouse-operations-community-debugging-insights} *Это руководство является частью подборки материалов, подготовленной на основе встреч сообщества. Для дополнительных практических решений и рекомендаций вы можете [подобрать материалы под конкретную проблему](./community-wisdom.md).* *Столкнулись с высокими операционными затратами? Ознакомьтесь с руководством сообщества по [оптимизации затрат](./cost-optimization.md).* - - ## Основные системные таблицы {#essential-system-tables} Эти системные таблицы являются важнейшими для отладки в продакшене: @@ -86,7 +82,6 @@ GROUP BY database, table ORDER BY count() DESC; ``` - ## Распространённые проблемы в продакшене {#common-production-issues} ### Проблемы с дисковым пространством {#disk-space-problems} @@ -126,7 +121,6 @@ WHERE is_done = 0; Сначала тестируйте изменения схемы на небольших наборах данных. - ## Память и производительность {#memory-and-performance} ### Внешняя агрегация {#external-aggregation} @@ -170,7 +164,6 @@ SETTINGS max_bytes_before_external_group_by = 1000000000; -- порог 1 ГБ * [Пользовательский ключ партиционирования](/engines/table-engines/mergetree-family/custom-partitioning-key) - ## Краткая справка {#quick-reference} | Проблема | Обнаружение | Решение | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md index 4da87bf5f42..746d9dee251 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md @@ -21,24 +21,18 @@ title: 'Уроки — материализованные представлен description: 'Практические примеры использования материализованных представлений, типичные проблемы и их решения' --- - - # Материализованные представления: как они могут обернуться обоюдоострым мечом {#materialized-views-the-double-edged-sword} *Это руководство — часть серии материалов, подготовленных по результатам митапов сообщества. Для получения дополнительных практических решений и рекомендаций вы можете [просматривать материалы по конкретным проблемам](./community-wisdom.md).* *Слишком много частей тормозят вашу базу данных? Ознакомьтесь с руководством сообщества [Too Many Parts](./too-many-parts.md).* *Узнайте больше о [материализованных представлениях](/materialized-views).* - - ## Антипаттерн хранения с 10-кратным ростом {#storage-antipattern} **Реальная проблема в продакшене:** *«У нас было материализованное представление. Таблица сырых логов занимала около 20 ГБ, но представление для этой таблицы разрослось до 190 ГБ, то есть почти в 10 раз больше исходной таблицы. Это произошло потому, что мы создавали по одной строке на каждый атрибут, а в каждом логе может быть до 10 атрибутов.»* **Правило:** Если ваш `GROUP BY` создаёт больше строк, чем сокращает, вы строите дорогой индекс, а не материализованное представление. - - ## Проверка состояния материализованного представления в продакшене {#mv-health-validation} Этот запрос помогает предсказать, будет ли материализованное представление сжимать данные или раздувать их объём до того, как вы его создадите. Запустите его для вашей реальной таблицы и столбцов, чтобы избежать сценария «разрастания до 190 ГБ». @@ -62,7 +56,6 @@ WHERE your_filter_conditions; -- Если aggregation_ratio < 10%, вы получите хорошую степень сжатия ``` - ## Когда материализованные представления становятся проблемой {#mv-problems} **Предупреждающие признаки, за которыми стоит следить:** @@ -73,7 +66,5 @@ WHERE your_filter_conditions; Вы можете сравнить производительность вставки до и после добавления материализованных представлений с помощью `system.query_log`, отслеживая тенденции по длительности выполнения запросов. - - ## Источники видео {#video-sources} - [ClickHouse at CommonRoom - Kirill Sapchuk](https://www.youtube.com/watch?v=liTgGiTuhJE) — источник кейса о «чрезмерном энтузиазме вокруг материализованных представлений» и «раздувании объёма данных с 20 GB до 190 GB» \ No newline at end of file diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md index 5eca382f291..4a99c89671b 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md @@ -21,15 +21,11 @@ title: 'Уроки по оптимизации производительнос description: 'Примеры стратегий оптимизации производительности из реальной практики' --- - - # Оптимизация производительности: стратегии, проверенные сообществом {#performance-optimization} *Это руководство — часть подборки материалов, основанных на результатах встреч сообщества. Для получения дополнительных практических решений и идей вы можете [просматривать материалы по конкретным проблемам](./community-wisdom.md).* *Столкнулись с проблемами материализованных представлений? Ознакомьтесь с руководством сообщества по [Materialized Views](./materialized-views.md).* *Если вы сталкиваетесь с медленными запросами и вам нужно больше примеров, у нас также есть руководство по [оптимизации запросов](/optimize/query-optimization).* - - ## Располагаете столбцы по кардинальности (от низкой к высокой) {#cardinality-ordering} Первичный индекс ClickHouse работает лучше всего, когда в ключе сначала идут столбцы с низкой кардинальностью — это позволяет эффективно пропускать большие блоки данных. Столбцы с высокой кардинальностью, расположенные дальше в ключе, обеспечивают более детальную сортировку внутри этих блоков. Начинайте со столбцов с небольшим числом уникальных значений (например, status, category, country) и заканчивайте столбцами с большим числом уникальных значений (например, user_id, timestamp, session_id). @@ -37,8 +33,6 @@ description: 'Примеры стратегий оптимизации прои - [Выбор первичного ключа](/best-practices/choosing-a-primary-key) - [Первичные индексы](/primary-indexes) - - ## Важна временная гранулярность {#time-granularity} При использовании меток времени в предложении ORDER BY учитывайте компромисс между кардинальностью и точностью. Метки времени с микросекундной точностью создают очень высокую кардинальность (почти одно уникальное значение на каждую строку), что снижает эффективность разреженного первичного индекса ClickHouse. Округлённые метки времени создают меньшую кардинальность, что позволяет эффективнее пропускать данные при чтении за счёт индекса, но при этом вы теряете точность для временных запросов. @@ -68,7 +62,6 @@ FROM github.github_events WHERE created_at >= '2024-01-01'; ``` - ## Сосредоточьтесь на отдельных запросах, а не на средних значениях {#focus-on-individual-queries-not-averages} При отладке производительности ClickHouse не полагайтесь на среднее время выполнения запросов или общесистемные метрики. Вместо этого выясняйте, почему конкретные запросы выполняются медленно. Система может демонстрировать хорошую среднюю производительность, в то время как отдельные запросы страдают от нехватки памяти, неэффективной фильтрации или операций с высокой кардинальностью. @@ -77,8 +70,6 @@ WHERE created_at >= '2024-01-01'; Когда запрос выполняется медленно, не ограничивайтесь анализом средних значений. Спросите себя: «Почему ИМЕННО этот запрос был медленным?» и изучите фактический характер использования ресурсов. - - ## Работа с памятью и сканированием строк {#memory-and-row-scanning} Sentry — это ориентированная на разработчиков платформа отслеживания ошибок, ежедневно обрабатывающая миллиарды событий от более чем 4 млн разработчиков. Их ключевое наблюдение: *«Именно кардинальность ключа группировки в этой ситуации определяет потребление памяти»* — агрегации с высокой кардинальностью уничтожают производительность из‑за исчерпания памяти, а не из‑за объёма сканируемых строк. @@ -95,7 +86,6 @@ WHERE cityHash64(user_id) % 10 = 0 -- Всегда одни и те же 10% п Это гарантирует, что одни и те же пользователи появляются в каждом запросе, обеспечивая сопоставимые результаты для разных периодов времени. Ключевая идея: `cityHash64()` выдаёт стабильные хэш-значения для одного и того же ввода, поэтому `user_id = 12345` всегда будет хэшироваться в одно и то же значение, гарантируя, что этот пользователь либо всегда будет присутствовать в вашей 10% выборке, либо никогда — без мерцания результатов между запросами. - ## Оптимизация битовых масок в Sentry {#bit-mask-optimization} При агрегации по высококардинальным столбцам (например, URL) каждое уникальное значение создаёт отдельное состояние агрегации в памяти, что может привести к её исчерпанию. Решение Sentry: вместо группировки по фактическим строкам URL выполнять группировку по логическим выражениям, которые сворачиваются в битовые маски. @@ -139,7 +129,6 @@ LIMIT 20 От инженерной команды Sentry: «Эти ресурсоёмкие запросы выполняются более чем в 10 раз быстрее, а использование памяти в 100 раз ниже (и, что ещё важнее, ограничено). Наши крупнейшие клиенты больше не сталкиваются с ошибками при поиске реплеев, и теперь мы можем поддерживать клиентов любого размера, не исчерпывая память». - ## Видеоматериалы {#video-sources} - [Lost in the Haystack - Optimizing High Cardinality Aggregations](https://www.youtube.com/watch?v=paK84-EUJCA) - практический опыт Sentry по оптимизации использования памяти в продакшене diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md index 87d4abfd56e..2a549a3abc7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md @@ -21,14 +21,10 @@ title: 'Уроки — проблема «Too many parts»' description: 'Решения и предотвращение проблемы «Too many parts»' --- - - # Проблема слишком большого количества частей {#the-too-many-parts-problem} *Это руководство является частью сборника выводов, полученных на встречах сообщества. Для получения большего количества практических решений и инсайтов вы можете [подобрать материалы по конкретным проблемам](./community-wisdom.md).* *Нужны дополнительные советы по оптимизации производительности? Ознакомьтесь с руководством с инсайтами от сообщества по теме [Performance Optimization](./performance-optimization.md).* - - ## Понимание проблемы {#understanding-the-problem} ClickHouse выдает ошибку «Too many parts», чтобы предотвратить серьезную деградацию производительности. Мелкие части данных вызывают несколько проблем: низкую производительность запросов из‑за чтения и слияния большего числа файлов во время выполнения запросов, повышенное потребление памяти, поскольку каждая часть требует метаданных в памяти, снижение эффективности сжатия, так как меньшие блоки данных сжимаются менее эффективно, более высокие накладные расходы на операции ввода‑вывода (I/O) из‑за большего количества файловых дескрипторов и операций позиционирования в файлах, а также более медленные фоновые слияния, поскольку планировщик слияний получает больше работы. @@ -38,8 +34,6 @@ ClickHouse выдает ошибку «Too many parts», чтобы предот - [Части](/parts) - [Системная таблица parts](/operations/system-tables/parts) - - ## Раннее выявление проблемы {#recognize-parts-problem} Этот запрос отслеживает фрагментацию таблиц, анализируя количество и размеры частей во всех активных таблицах. Он выявляет таблицы с чрезмерным количеством или слишком мелкими частями, которым может потребоваться оптимизация слияния. Используйте его регулярно, чтобы обнаруживать проблемы фрагментации до того, как они начнут влиять на производительность запросов. @@ -76,7 +70,6 @@ ORDER BY total_parts DESC LIMIT 20; ``` - ## Видеоматериалы {#video-sources} - [Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse](https://www.youtube.com/watch?v=AsMPEfN5QtM) — сотрудник команды ClickHouse объясняет асинхронные INSERT и проблему слишком большого числа частей diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md b/i18n/ru/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md index e72ba69a945..dc7bd6de7ab 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md @@ -6,23 +6,18 @@ description: 'Описание утилиты clickhouse-static-files-disk-uploa doc_type: 'guide' --- - - # clickhouse-static-files-disk-uploader {#clickhouse-static-files-disk-uploader} Формирует каталог данных, содержащий метаданные для указанной таблицы ClickHouse. Эти метаданные можно использовать для создания таблицы ClickHouse на другом сервере с набором данных только для чтения, размещённым на диске `web`. Не используйте этот инструмент для миграции данных. Вместо этого используйте [команды `BACKUP` и `RESTORE`](/operations/backup). - - ## Использование {#usage} ```bash $ clickhouse static-files-disk-uploader [args] ``` - ## Команды {#commands} |Команда|Описание| @@ -34,8 +29,6 @@ $ clickhouse static-files-disk-uploader [args] |`--url [url]`|URL веб-сервера для режима `test`| |`--output-dir [dir]`|Каталог для вывода файлов в режиме `non-test`| - - ## Получение пути к метаданным для указанной таблицы {#retrieve-metadata-path-for-the-specified-table} При использовании `clickhouse-static-files-disk-uploader` требуется получить путь к метаданным нужной таблицы. @@ -60,7 +53,6 @@ SELECT data_paths └───────────────────────────────────────────────────────┘ ``` - ## Выгрузите каталог метаданных таблицы в локальную файловую систему {#output-table-metadata-directory-to-the-local-filesystem} Используя целевой каталог вывода `output` и заданный путь к метаданным, выполните следующую команду: @@ -75,7 +67,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st Путь к данным: "/Users/john/store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee", целевой путь: "output" ``` - ## Выгрузка каталога метаданных таблицы на внешний URL {#output-table-metadata-directory-to-an-external-url} Этот шаг аналогичен выгрузке каталога данных в локальную файловую систему, но с добавлением флага `--test-mode`. Вместо указания выходного каталога необходимо указать целевой URL с помощью флага `--url`. @@ -86,7 +77,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st $ clickhouse static-files-disk-uploader --test-mode --url http://nginx:80/test1 --metadata-path ./store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee/ ``` - ## Использование каталога метаданных таблицы для создания таблицы ClickHouse {#using-the-table-metadata-directory-to-create-a-clickhouse-table} Получив каталог метаданных таблицы, вы можете использовать его для создания таблицы ClickHouse на другом сервере. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/tutorial.md b/i18n/ru/docusaurus-plugin-content-docs/current/tutorial.md index 8f6bd24e1b3..f7e45f17f64 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/tutorial.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/tutorial.md @@ -9,12 +9,8 @@ show_related_blogs: true doc_type: 'guide' --- - - # Расширенное руководство {#advanced-tutorial} - - ## Overview {#overview} Узнайте, как выполнять приём и запросы данных в ClickHouse на примере набора данных о такси Нью-Йорка. @@ -25,7 +21,6 @@ doc_type: 'guide' - ## Создание новой таблицы {#create-a-new-table} Набор данных о такси Нью‑Йорка содержит сведения о миллионах поездок, включая такие столбцы, как сумма чаевых, платные дороги, тип оплаты и многое другое. Создайте таблицу для хранения этих данных. @@ -89,8 +84,6 @@ doc_type: 'guide' ORDER BY pickup_datetime; ``` - - ## Добавьте набор данных {#add-the-dataset} Теперь, когда вы создали таблицу, добавьте данные о поездках на такси в Нью‑Йорке из CSV‑файлов в S3. @@ -159,8 +152,6 @@ doc_type: 'guide' Этот запрос должен вернуть 1 999 657 строк. - - ## Анализ данных {#analyze-the-data} Выполните несколько запросов для анализа данных. Изучите приведённые примеры или попробуйте свой собственный SQL-запрос. @@ -267,8 +258,6 @@ doc_type: 'guide' Ожидаемый результат

- - ```response ┌──────────────avg_tip─┬───────────avg_fare─┬──────avg_passenger─┬──count─┬─trip_minutes─┐ │ 1.9600000381469727 │ 8 │ 1 │ 1 │ 27511 │ @@ -340,8 +329,6 @@ doc_type: 'guide'

- - 7. Выберите поездки до аэропортов Ла-Гуардия или JFK: ```sql SELECT @@ -382,8 +369,6 @@ doc_type: 'guide'

- - ## Создание словаря {#create-a-dictionary} Словарь — это отображение пар «ключ-значение», хранящихся в памяти. Подробности см. в разделе [Dictionaries](/sql-reference/dictionaries/index.md). @@ -467,7 +452,6 @@ LAYOUT(HASHED_ARRAY()) ORDER BY total DESC ``` - Этот запрос подсчитывает количество поездок на такси по районам, которые заканчиваются либо в аэропорту LaGuardia, либо в аэропорту JFK. Результат выглядит следующим образом: обратите внимание, что есть довольно много поездок, для которых район посадки неизвестен: ```response @@ -484,7 +468,6 @@ LAYOUT(HASHED_ARRAY()) 7 строк в наборе. Затрачено: 0.019 сек. Обработано 2.00 млн строк, 4.00 МБ (105.70 млн строк/сек., 211.40 МБ/сек.) ``` - ## Выполнение соединения {#perform-a-join} Напишите несколько запросов, которые соединяют `taxi_zone_dictionary` с таблицей `trips`. @@ -537,7 +520,6 @@ LAYOUT(HASHED_ARRAY())
- ## Дальнейшие шаги {#next-steps} Узнайте больше о ClickHouse из следующих разделов документации: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md index ace0c429197..d74bbb1d414 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md @@ -21,20 +21,16 @@ import img_history from '@site/static/images/use-cases/AI_ML/AIChat/5_history.pn import img_result_actions from '@site/static/images/use-cases/AI_ML/AIChat/6_result_actions.png'; import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_editor.png'; - # Использование AI Chat в ClickHouse Cloud {#using-ai-chat-in-clickhouse-cloud} > В этом руководстве описывается, как включить и использовать функцию AI Chat в консоли ClickHouse Cloud. - ## Предварительные требования {#prerequisites} 1. У вас должен быть доступ к организации ClickHouse Cloud с включёнными функциями ИИ (если они недоступны, обратитесь к администратору вашей организации или в службу поддержки). - - ## Откройте панель AI Chat {#open-panel} 1. Перейдите к сервису ClickHouse Cloud. @@ -43,8 +39,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## Примите условия использования данных (первый запуск) {#consent} 1. При первом запуске отобразится диалоговое окно с описанием обработки данных и сторонних субпроцессоров LLM. @@ -52,8 +46,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## Выберите режим чата {#modes} AI Chat в настоящее время поддерживает: @@ -65,30 +57,22 @@ AI Chat в настоящее время поддерживает: - - ## Создание и отправка сообщения {#compose} 1. Введите ваш вопрос (например: «Создайте материализованное представление для агрегации ежедневных событий по пользователю»). 2. Нажмите Enter, чтобы отправить (используйте Shift + Enter для перехода на новую строку). 3. Пока модель обрабатывает запрос, вы можете нажать «Stop», чтобы прервать обработку. - - ## Шаги «размышления» в режиме «Агент» {#thinking-steps} В режиме «Агент» могут отображаться разворачиваемые промежуточные шаги «размышления» или планирования. Они показывают, как именно помощник формирует свой ответ. При необходимости сворачивайте или разворачивайте их. - - ## Создание нового чата {#new-chats} Нажмите кнопку «New Chat», чтобы очистить контекст и начать новый сеанс. - - ## Просмотр истории чатов {#history} 1. В нижней части окна отображаются ваши недавние чаты. @@ -97,8 +81,6 @@ AI Chat в настоящее время поддерживает: - - ## Работа с сгенерированным SQL {#sql-actions} Когда ассистент возвращает SQL-запрос: @@ -111,8 +93,6 @@ AI Chat в настоящее время поддерживает: - - ## Остановка или прерывание ответа {#interrupt} Если ответ занимает слишком много времени или отклоняется от темы: @@ -120,8 +100,6 @@ AI Chat в настоящее время поддерживает: 1. Нажмите кнопку «Stop» (она отображается во время обработки). 2. Сообщение будет помечено как прерванное; затем вы сможете уточнить запрос и отправить его снова. - - ## Сочетания клавиш {#shortcuts} | Действие | Сочетание клавиш | diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md index 444d23d3b15..627998afd72 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md @@ -22,14 +22,12 @@ import img5 from '@site/static/images/use-cases/AI_ML/MCP/5connected_mcp_claude. import img6 from '@site/static/images/use-cases/AI_ML/MCP/6slash_mcp_claude.png'; import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; - # Включение удалённого MCP‑сервера ClickHouse Cloud {#enabling-the-clickhouse-cloud-remote-mcp-server} > В этом руководстве описано, как включить и использовать удалённый MCP‑сервер ClickHouse Cloud. В качестве примера мы будем использовать Claude Code в роли MCP‑клиента, но можно использовать любой LLM‑клиент с поддержкой MCP. - ## Включение удалённого MCP-сервера для вашего сервиса ClickHouse Cloud {#enable-remote-mcp-server} 1. Подключитесь к сервису ClickHouse Cloud, нажмите кнопку `Connect` и включите удалённый MCP-сервер для этого сервиса @@ -44,7 +42,6 @@ import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; https://mcp.clickhouse.cloud/mcp ``` - ## Добавление сервера ClickHouse MCP в Claude Code {#add-clickhouse-mcp-server-claude-code} 1. В рабочем каталоге выполните следующую команду, чтобы добавить конфигурацию MCP-сервера ClickHouse Cloud в Claude Code. В этом примере мы назвали MCP-сервер в конфигурации Claude Code `clickhouse_cloud`. @@ -71,7 +68,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc [user@host ~/Documents/repos/mcp_test] $ claude ``` - ## Аутентификация в ClickHouse Cloud через OAuth {#authenticate-via-oauth} 1. При первом сеансе Claude Code откроет окно браузера. В противном случае вы можете инициировать подключение, выполнив команду `/mcp` в Claude Code и выбрав MCP-сервер `clickhouse_cloud`. @@ -82,8 +78,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc - - ## Использование удалённого сервера MCP ClickHouse Cloud в Claude Code {#use-rempte-mcp-from-claude-code} 1. В Claude Code убедитесь, что удалённый сервер MCP подключён diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md index 723911803e7..888b837a394 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md @@ -18,7 +18,6 @@ import FindMCPServers from '@site/static/images/use-cases/AI_ML/MCP/find-mcp-ser import MCPPermission from '@site/static/images/use-cases/AI_ML/MCP/mcp-permission.png'; import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-conversation.png'; - # Использование MCP-сервера ClickHouse с Claude Desktop {#using-clickhouse-mcp-server-with-claude-desktop} > В этом руководстве описывается, как настроить Claude Desktop с MCP-сервером ClickHouse с помощью uv @@ -37,20 +36,15 @@ import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-c - ## Установка uv {#install-uv} Вам необходимо установить [uv](https://docs.astral.sh/uv/), чтобы выполнять инструкции из этого руководства. Если вы не хотите использовать uv, вам потребуется обновить конфигурацию сервера MCP, чтобы использовать другой менеджер пакетов. - - ## Загрузите Claude Desktop {#download-claude-desktop} Вам также потребуется установить приложение Claude Desktop, которое можно загрузить с [веб‑сайта Claude Desktop](https://claude.ai/desktop). - - ## Настройка сервера ClickHouse MCP {#configure-clickhouse-mcp-server} После установки Claude Desktop можно переходить к настройке [сервера ClickHouse MCP](https://github.com/ClickHouse/mcp-clickhouse). @@ -113,7 +107,6 @@ MCP mcp-clickhouse: spawn uv ENOENT Если это произойдёт, вам нужно будет обновить `command`, указав полный путь к `uv`. Например, если вы установили его через Cargo, путь будет таким: `/Users/<username>/.cargo/bin/uv` ::: - ## Использование MCP-сервера ClickHouse {#using-clickhouse-mcp-server} После перезапуска Claude Desktop найдите MCP-сервер ClickHouse, нажав на значок `Search and tools`: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md index a8fd56522db..6cbd4f2295e 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md @@ -15,7 +15,6 @@ import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.png'; - # Использование MCP-сервера ClickHouse с LibreChat {#using-clickhouse-mcp-server-with-librechat} > В данном руководстве описывается настройка LibreChat с MCP-сервером ClickHouse с использованием Docker @@ -23,7 +22,6 @@ import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.pn - ## Установите Docker {#install-docker} Вам потребуется Docker для запуска LibreChat и MCP-сервера. Чтобы установить Docker: @@ -34,8 +32,6 @@ import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.pn
Для получения дополнительной информации см. [документацию по Docker](https://docs.docker.com/get-docker/). - - ## Клонируйте репозиторий LibreChat {#clone-librechat-repo} Откройте консоль (Command Prompt, терминал или PowerShell) и клонируйте @@ -46,7 +42,6 @@ git clone https://github.com/danny-avila/LibreChat.git cd LibreChat ``` - ## Создайте и отредактируйте файл .env {#create-and-edit-env-file} Скопируйте пример конфигурационного файла из `.env.example` в `.env`: @@ -59,7 +54,6 @@ cp .env.example .env многих популярных провайдеров LLM, включая OpenAI, Anthropic, AWS Bedrock и др., например: - ```text title=".venv" #============# # Anthropic # {#anthropic} @@ -78,7 +72,6 @@ ANTHROPIC_API_KEY=user_provided не изменяйте файл .env и переходите к следующим шагам. ::: - ## Создайте файл librechat.yaml {#create-librechat-yaml-file} Выполните следующую команду, чтобы создать новый файл `librechat.yaml`: @@ -89,7 +82,6 @@ cp librechat.example.yaml librechat.yaml Это создаёт основной [конфигурационный файл](https://www.librechat.ai/docs/configuration/librechat_yaml) для LibreChat. - ## Добавление сервера ClickHouse MCP в Docker Compose {#add-clickhouse-mcp-server-to-docker-compose} Теперь мы добавим сервер ClickHouse MCP в файл Docker Compose LibreChat, @@ -142,7 +134,6 @@ services: /> - ## Настройка сервера MCP в librechat.yaml {#configure-mcp-server-in-librechat-yaml} Откройте `librechat.yaml` и разместите следующую конфигурацию в конце файла: @@ -168,7 +159,6 @@ socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple', 'sa socialLogins: [] ``` - ## Добавление локальной LLM‑модели с помощью Ollama (необязательно) {#add-local-llm-using-ollama} ### Установка Ollama {#install-ollama} @@ -208,7 +198,6 @@ custom: modelDisplayLabel: "Ollama" ``` - ## Запустите все сервисы {#start-all-services} Из корневого каталога проекта LibreChat выполните следующую команду, чтобы запустить сервисы: @@ -219,7 +208,6 @@ docker compose up Дождитесь, пока все сервисы будут полностью запущены. - ## Откройте LibreChat в браузере {#open-librechat-in-browser} После запуска всех сервисов откройте браузер и перейдите по адресу `http://localhost:3080/` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md index c69cbb7fe55..6ed6cc94053 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md @@ -18,7 +18,6 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/allm_conversat import MCPServers from '@site/static/images/use-cases/AI_ML/MCP/allm_mcp-servers.png'; import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png'; - # Использование сервера MCP ClickHouse с AnythingLLM {#using-clickhouse-mcp-server-with-anythingllm} > В этом руководстве описано, как настроить [AnythingLLM](https://anythingllm.com/) с сервером MCP ClickHouse с использованием Docker @@ -26,7 +25,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png' - ## Установите Docker {#install-docker} Вам понадобится Docker, чтобы запустить LibreChat и MCP-сервер. Чтобы установить Docker: @@ -37,8 +35,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png'
Для получения дополнительной информации см. [документацию Docker](https://docs.docker.com/get-docker/). - - ## Загрузка Docker-образа AnythingLLM {#pull-anythingllm-docker-image} Выполните следующую команду, чтобы загрузить Docker-образ AnythingLLM на локальную машину: @@ -47,7 +43,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png' docker pull anythingllm/anythingllm ``` - ## Настройка расположения хранилища {#setup-storage-location} Создайте каталог для хранилища и инициализируйте файл окружения: @@ -58,7 +53,6 @@ mkdir -p $STORAGE_LOCATION && \ touch "$STORAGE_LOCATION/.env" ``` - ## Настройка файла конфигурации сервера MCP {#configure-mcp-server-config-file} Создайте каталог `plugins`: @@ -96,7 +90,6 @@ mkdir -p "$STORAGE_LOCATION/plugins" [host, имя пользователя и пароль](https://clickhouse.com/docs/getting-started/quick-start/cloud#connect-with-your-app) собственного сервиса ClickHouse Cloud. - ## Запустите Docker-контейнер AnythingLLM {#start-anythingllm-docker-container} Запустите Docker-контейнер AnythingLLM следующей командой: @@ -113,7 +106,6 @@ mintplexlabs/anythingllm После запуска откройте в браузере адрес `http://localhost:3001`. Выберите модель, которую хотите использовать, и укажите свой API-ключ. - ## Дождитесь запуска MCP Servers {#wait-for-mcp-servers-to-start-up} Нажмите значок инструмента в левой нижней части интерфейса: @@ -125,8 +117,6 @@ mintplexlabs/anythingllm - - ## Чат с ClickHouse MCP Server в AnythingLLM {#chat-with-clickhouse-mcp-server-with-anythingllm} Теперь мы готовы начать чат. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md index 34c9d1fe79f..12cde81d5cc 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md @@ -25,7 +25,6 @@ import AddConnection from '@site/static/images/use-cases/AI_ML/MCP/7_add_connect import OpenAIModels from '@site/static/images/use-cases/AI_ML/MCP/8_openai_models_more.png'; import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation.png'; - # Использование MCP-сервера ClickHouse с Open WebUI {#using-clickhouse-mcp-server-with-open-webui} > В данном руководстве описывается настройка [Open WebUI](https://github.com/open-webui/open-webui) с MCP-сервером ClickHouse @@ -33,14 +32,11 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation - ## Установите uv {#install-uv} Вам нужно установить [uv](https://docs.astral.sh/uv/), чтобы выполнить инструкции из этого руководства. Если вы не хотите использовать uv, вам потребуется обновить конфигурацию MCP Server, чтобы использовать другой менеджер пакетов. - - ## Запуск Open WebUI {#launch-open-webui} Чтобы запустить Open WebUI, выполните следующую команду: @@ -51,7 +47,6 @@ uv run --with open-webui open-webui serve Перейдите по адресу [http://localhost:8080/](http://localhost:8080/), чтобы открыть веб-интерфейс. - ## Настройка сервера ClickHouse MCP {#configure-clickhouse-mcp-server} Чтобы настроить сервер ClickHouse MCP, нам нужно будет представить интерфейс сервера MCP в виде конечных точек OpenAPI. @@ -93,7 +88,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - ## Настройка OpenAI {#configure-openai} По умолчанию Open WebUI работает с моделями Ollama, но мы также можем добавить конечные точки API, совместимые с OpenAI. @@ -109,8 +103,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - - ## Общение с ClickHouse MCP Server через Open WebUI {#chat-to-clickhouse-mcp-server} Теперь можно начать диалог, и Open WebUI при необходимости обратится к MCP Server: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md index 6b977d7edee..d51f10b9716 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md @@ -14,14 +14,12 @@ import {CardHorizontal} from '@clickhouse/click-ui/bundled' import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; - # Использование сервера ClickHouse MCP совместно с Ollama {#using-clickhouse-mcp-server-with-ollama} > В этом руководстве объясняется, как использовать сервер ClickHouse MCP совместно с Ollama. - ## Установите Ollama {#install-ollama} Ollama — это библиотека для запуска больших языковых моделей (LLM) на вашем компьютере. @@ -93,7 +91,6 @@ ollama show qwen3 Из этого вывода видно, что у модели qwen3 по умолчанию чуть больше 8 миллиардов параметров. - ## Установите MCPHost {#install-mcphost} На момент написания этой инструкции (июль 2025 года) нет встроенной поддержки использования Ollama с MCP Servers. @@ -108,7 +105,6 @@ go install github.com/mark3labs/mcphost@latest Исполняемый файл будет установлен в `~/go/bin`, поэтому нужно убедиться, что этот каталог входит в переменную окружения `PATH`. - ## Настройка сервера ClickHouse MCP {#configure-clickhouse-mcp-server} Мы можем настраивать серверы MCP с помощью MCPHost в файлах YAML или JSON. @@ -157,7 +153,6 @@ export CLICKHOUSE_PASSWORD="" Теоретически вы должны иметь возможность указать эти переменные под ключом `environment` в конфигурационном файле MCP, но на практике это не работает. ::: - ## Running MCPHost {#running-mcphost} После того как вы настроили сервер ClickHouse MCP, вы можете запустить MCPHost, выполнив следующую команду: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md index 7fd81a723bd..b9bec682cbd 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md @@ -26,14 +26,12 @@ import ToolsCalled from '@site/static/images/use-cases/AI_ML/MCP/8_janai_tools_c import ToolsCalledExpanded from '@site/static/images/use-cases/AI_ML/MCP/9_janai_tools_called_expanded.png'; import Result from '@site/static/images/use-cases/AI_ML/MCP/10_janai_result.png'; - # Использование MCP-сервера ClickHouse с Jan.ai {#using-clickhouse-mcp-server-with-janai} > В этом руководстве описывается использование MCP-сервера ClickHouse с [Jan.ai](https://jan.ai/docs). - ## Установка Jan.ai {#install-janai} Jan.ai — это открытое приложение, являющееся альтернативой ChatGPT и работающее на 100 % офлайн. @@ -41,8 +39,6 @@ Jan.ai — это открытое приложение, являющееся а Это нативное приложение, поэтому после загрузки вы можете просто запустить его. - - ## Добавить LLM в Jan.ai {#add-llm-to-janai} Мы можем включить модели через меню настроек. @@ -51,8 +47,6 @@ Jan.ai — это открытое приложение, являющееся а - - ## Включение MCP Servers {#enable-mcp-servers} На момент написания этой инструкции MCP Servers являются экспериментальной функцией в Jan.ai. @@ -62,8 +56,6 @@ Jan.ai — это открытое приложение, являющееся а После переключения этого переключателя в левом меню появится пункт `MCP Servers`. - - ## Настройка ClickHouse MCP Server {#configure-clickhouse-mcp-server} Если нажать на меню `MCP Servers`, откроется список MCP-серверов, к которым можно подключиться: @@ -84,8 +76,6 @@ Jan.ai — это открытое приложение, являющееся а - - ## Общение с ClickHouse MCP Server через Jan.ai {#chat-to-clickhouse-mcp-server} Пришло время обсудить данные, хранящиеся в ClickHouse! diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md index 6715c954b5d..f40315060da 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать агента ИИ с помощью Agno и ClickHouse MCP Server {#how-to-build-an-ai-agent-with-agno-and-the-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать агента ИИ на базе [Agno](https://github.com/agno-agi/agno), способного взаимодействовать с @@ -21,8 +19,6 @@ doc_type: 'guide' Этот пример доступен в виде ноутбука в [репозитории с примерами](https://github.com/ClickHouse/examples/blob/main/ai/mcp/agno/agno.ipynb). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установка библиотек {#install-libraries} Установите библиотеку Agno с помощью следующих команд: @@ -44,7 +39,6 @@ pip install -q agno pip install -q ipywidgets ``` - ## Настройка учетных данных {#setup-credentials} Далее необходимо указать свой API-ключ Anthropic: @@ -75,7 +69,6 @@ env = { } ``` - ## Инициализация MCP-сервера и агента Agno {#initialize-mcp-and-agent} Теперь настройте ClickHouse MCP-сервер для подключения к ClickHouse SQL playground diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md index c274526eed6..eaeb757f3a6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать AI-агента с помощью Chainlit и ClickHouse MCP Server {#how-to-build-an-ai-agent-with-chainlit-and-the-clickhouse-mcp-server} В этом руководстве показано, как объединить мощный фреймворк чат-интерфейсов Chainlit @@ -20,14 +18,10 @@ doc_type: 'guide' с минимальным количеством кода, а сервер ClickHouse MCP обеспечивает бесшовную интеграцию с высокопроизводительной колоночной базой данных ClickHouse. - - ## Предварительные требования {#prerequisites} - Вам потребуется ключ API Anthropic - У вас должен быть установлен [`uv`](https://docs.astral.sh/uv/getting-started/installation/) - - ## Базовое приложение Chainlit {#basic-chainlit-app} Вы можете увидеть пример простого чат-приложения, запустив следующую команду: @@ -38,7 +32,6 @@ uv run --with anthropic --with chainlit chainlit run chat_basic.py -w -h Затем откройте в браузере `http://localhost:8000` - ## Добавление ClickHouse MCP Server {#adding-clickhouse-mcp-server} Дело становится интереснее, если мы добавим ClickHouse MCP Server. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md index 1a04de70a2f..0edd539e102 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать AI-агента с помощью Claude Agent SDK и ClickHouse MCP Server {#how-to-build-an-ai-agent-with-claude-agent-sdk-and-the-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать AI-агента на базе [Claude Agent SDK](https://docs.claude.com/en/api/agent-sdk/overview), который может взаимодействовать с @@ -21,8 +19,6 @@ doc_type: 'guide' Этот пример доступен в виде блокнота в [репозитории с примерами](https://github.com/ClickHouse/examples/blob/main/ai/mcp/claude-agent/claude-agent.ipynb). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установка библиотек {#install-libraries} Установите библиотеку Claude Agent SDK, выполнив следующие команды: @@ -44,7 +39,6 @@ pip install -q claude-agent-sdk pip install -q ipywidgets ``` - ## Настройка учетных данных {#setup-credentials} Далее вам нужно будет указать свой ключ API Anthropic: @@ -70,7 +64,6 @@ env = { } ``` - ## Инициализация MCP-сервера и агента Claude Agent SDK {#initialize-mcp-and-agent} Теперь настройте ClickHouse MCP-сервер для подключения к ClickHouse SQL playground diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md index 0938accc952..c5300001a9f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать AI-агента с помощью CopilotKit и ClickHouse MCP Server {#how-to-build-an-ai-agent-with-copilotkit-and-the-clickhouse-mcp-server} Это пример того, как создать агентское приложение, используя данные, хранящиеся в @@ -25,15 +23,11 @@ ClickHouse. В нем используется [ClickHouse MCP Server](https://g Код этого примера доступен в [репозитории с примерами](https://github.com/ClickHouse/examples/edit/main/ai/mcp/copilotkit). ::: - - ## Предварительные требования {#prerequisites} - `Node.js >= 20.14.0` - `uv >= 0.1.0` - - ## Установка зависимостей {#install-dependencies} Клонируйте проект локально: `git clone https://github.com/ClickHouse/examples` и @@ -42,8 +36,6 @@ ClickHouse. В нем используется [ClickHouse MCP Server](https://g Можете пропустить этот раздел и просто запустить скрипт `./install.sh` для установки зависимостей. Если вы хотите установить зависимости вручную, следуйте инструкциям ниже. - - ## Ручная установка зависимостей {#install-dependencies-manually} 1. Установите зависимости: @@ -67,13 +59,10 @@ uv sync uv add fastmcp ``` - ## Настройка приложения {#configure-the-application} Скопируйте файл `env.example` в `.env` и отредактируйте его, указав значение `ANTHROPIC_API_KEY`. - - ## Используйте свою LLM {#use-your-own-llm} Если вы предпочитаете использовать другого провайдера LLM вместо Anthropic, вы можете изменить @@ -81,8 +70,6 @@ uv add fastmcp [Здесь](https://docs.copilotkit.ai/guides/bring-your-own-llm) приведён список поддерживаемых провайдеров. - - ## Использование собственного кластера ClickHouse {#use-your-own-clickhouse-cluster} По умолчанию пример настроен на подключение к @@ -95,8 +82,6 @@ uv add fastmcp - `CLICKHOUSE_PASSWORD` - `CLICKHOUSE_SECURE` - - # Запуск приложения {#run-the-application} Выполните `npm run dev`, чтобы запустить сервер разработки. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md index e483fc84299..1310fc706eb 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать AI-агента LlamaIndex с использованием ClickHouse MCP Server {#how-to-build-a-llamaindex-ai-agent-using-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать AI-агента [LlamaIndex](https://docs.llamaindex.ai), который @@ -21,8 +19,6 @@ doc_type: 'guide' Этот пример доступен в виде ноутбука в [репозитории примеров](https://github.com/ClickHouse/examples/blob/main/ai/mcp/llamaindex/llamaindex.ipynb). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установите библиотеки {#install-libraries} Установите необходимые библиотеки, выполнив следующие команды: @@ -43,7 +38,6 @@ pip install -q --upgrade pip pip install -q llama-index clickhouse-connect llama-index-llms-anthropic llama-index-tools-mcp ``` - ## Настройка учётных данных {#setup-credentials} Далее вам нужно указать свой ключ API Anthropic: @@ -62,7 +56,6 @@ os.environ["ANTHROPIC_API_KEY"] = getpass.getpass("Введите API-ключ A вы можете найти инструкции по настройке учетных данных в [документации LlamaIndex «LLMs»](https://docs.llamaindex.ai/en/stable/examples/) ::: - ## Инициализируйте MCP Server {#initialize-mcp-and-agent} Теперь настройте ClickHouse MCP Server для работы с ClickHouse SQL Playground. @@ -93,7 +86,6 @@ mcp_tool_spec = McpToolSpec( ) ``` - tools = await mcp_tool_spec.to_tool_list_async() ```` @@ -112,7 +104,6 @@ agent_worker = FunctionCallingAgentWorker.from_tools( agent = AgentRunner(agent_worker) ```` - ## Инициализация LLM {#initialize-llm} Инициализируйте модель Claude Sonnet 4.0 следующим кодом: @@ -122,7 +113,6 @@ from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-sonnet-4-0") ``` - ## Запуск агента {#run-agent} Теперь можно задать агенту вопрос: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md index 8ff8788eda8..f7b7a44a3d7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать агента OpenAI с использованием ClickHouse MCP Server {#how-to-build-an-openai-agent-using-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать агента [OpenAI](https://github.com/openai/openai-agents-python), который @@ -21,8 +19,6 @@ doc_type: 'guide' Этот пример доступен в виде ноутбука в [репозитории с примерами](https://github.com/ClickHouse/examples/blob/main/ai/mcp/openai-agents/openai-agents.ipynb). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установка библиотек {#install-libraries} Установите необходимую библиотеку, выполнив следующие команды: @@ -43,7 +38,6 @@ pip install -q --upgrade pip pip install -q openai-agents ``` - ## Настройка учетных данных {#setup-credentials} Далее вам нужно будет указать свой ключ API OpenAI: @@ -57,7 +51,6 @@ os.environ["OPENAI_API_KEY"] = getpass.getpass("Введите API-ключ Open Введите API-ключ OpenAI: ········ ``` - ## Инициализация MCP Server и агента OpenAI {#initialize-mcp-and-agent} Теперь настройте ClickHouse MCP Server так, чтобы он указывал на ClickHouse SQL playground, @@ -156,7 +149,6 @@ async with MCPServerStdio( simple_render_chunk(chunk) ``` - ```response title="Ответ" Выполняется: Какой самый крупный проект на GitHub на данный момент в 2025 году? 🔧 Tool: list_databases({}) diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md index 4097dce484b..e43d1c0862d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать агента PydanticAI с использованием сервера ClickHouse MCP {#how-to-build-a-pydanticai-agent-using-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать агента [PydanticAI](https://ai.pydantic.dev/mcp/client/#__tabbed_1_1), @@ -21,8 +19,6 @@ doc_type: 'guide' Этот пример доступен в виде ноутбука в [репозитории примеров](https://github.com/ClickHouse/examples/blob/main/ai/mcp/pydanticai/pydantic.ipynb). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установка библиотек {#install-libraries} Установите необходимые библиотеки, выполнив следующие команды: @@ -44,7 +39,6 @@ pip install -q "pydantic-ai-slim[mcp]" pip install -q "pydantic-ai-slim[anthropic]" # замените на соответствующий пакет при использовании другого провайдера LLM ``` - ## Настройка учетных данных {#setup-credentials} Далее необходимо указать ключ API Anthropic: @@ -75,7 +69,6 @@ env = { } ``` - ## Инициализация MCP Server и агента PydanticAI {#initialize-mcp} Теперь настройте ClickHouse MCP Server так, чтобы он использовал песочницу ClickHouse SQL: @@ -98,7 +91,6 @@ server = MCPServerStdio( agent = Agent('anthropic:claude-sonnet-4-0', mcp_servers=[server]) ``` - ## Задайте вопрос агенту {#ask-agent} Наконец, вы можете задать вопрос агенту: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md index 991773a937e..c718620d0e7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать агента SlackBot с помощью ClickHouse MCP Server {#how-to-build-a-slackbot-agent-using-clickhouse-mcp-server} В этом руководстве вы узнаете, как создать агента [SlackBot](https://slack.com/intl/en-gb/help/articles/202026038-An-introduction-to-Slackbot). @@ -22,8 +20,6 @@ doc_type: 'guide' Код этого примера доступен в [репозитории с примерами](https://github.com/ClickHouse/examples/blob/main/ai/mcp/slackbot/README.md). ::: - - ## Предварительные требования {#prerequisites} - Необходимо установить [`uv`](https://docs.astral.sh/uv/getting-started/installation/) @@ -32,23 +28,18 @@ doc_type: 'guide' - ## Создайте приложение Slack {#create-a-slack-app} 1. Перейдите на [slack.com/apps](https://slack.com/apps) и нажмите `Create New App`. 2. Выберите вариант `From scratch` и задайте имя приложению. 3. Выберите рабочее пространство Slack. - - ## Установите приложение в рабочее пространство {#install-the-app-to-your-workspace} Далее добавьте созданное на предыдущем шаге приложение в рабочее пространство. Следуйте инструкциям из раздела ["Добавление приложений в рабочее пространство Slack"](https://slack.com/intl/en-gb/help/articles/202035138-Add-apps-to-your-Slack-workspace) в документации Slack. - - ## Настройка параметров приложения Slack {#configure-slack-app-settings} - Перейдите в `App Home` @@ -74,8 +65,6 @@ doc_type: 'guide' - `message:im` - Сохраните изменения. - - ## Добавьте переменные окружения (`.env`) {#add-env-vars} Создайте файл `.env` в корне проекта со следующими переменными окружения, чтобы ваше приложение могло подключаться к [SQL-песочнице ClickHouse](https://sql.clickhouse.com/). @@ -94,7 +83,6 @@ CLICKHOUSE_SECURE=true Вы можете настроить переменные ClickHouse для использования собственного сервера ClickHouse или облачного экземпляра, если хотите. - ## Использование бота {#using-the-bot} 1. **Запустите бота:** diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md index 2a9573a5966..c02788b31d6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # Как создать агента ИИ на базе ClickHouse с помощью Streamlit {#how-to-build-a-clickhouse-backed-ai-agent-with-streamlit} В этом руководстве вы узнаете, как создать веб-агента ИИ с использованием [Streamlit](https://streamlit.io/), способного взаимодействовать с [SQL-песочницей ClickHouse](https://sql.clickhouse.com/) через [MCP-сервер ClickHouse](https://github.com/ClickHouse/mcp-clickhouse) и [Agno](https://github.com/agno-agi/agno). @@ -21,8 +19,6 @@ doc_type: 'guide' Исходный код этого примера вы можете найти в [репозитории examples](https://github.com/ClickHouse/examples/tree/main/ai/mcp/streamlit). ::: - - ## Предварительные требования {#prerequisites} - В вашей системе должен быть установлен Python. @@ -33,7 +29,6 @@ doc_type: 'guide' - ## Установка библиотек {#install-libraries} Установите необходимые библиотеки, выполнив следующие команды: @@ -42,7 +37,6 @@ doc_type: 'guide' pip install streamlit agno ipywidgets ``` - ## Создайте файл с утилитами {#create-utilities} Создайте файл `utils.py` с двумя вспомогательными функциями. Первая — это @@ -70,7 +64,6 @@ def apply_styles():
""", unsafe_allow_html=True) ``` - ## Настройка учётных данных {#setup-credentials} Установите ключ API Anthropic в переменную окружения: @@ -84,7 +77,6 @@ export ANTHROPIC_API_KEY="ваш_ключ_api" вы можете найти инструкции по настройке учетных данных в документации [Agno «Integrations»](https://docs.agentops.ai/v2/integrations/ag2) ::: - ## Импорт необходимых библиотек {#import-libraries} Начните с создания основного файла приложения Streamlit (например, `app.py`) и добавьте импорты: @@ -109,7 +101,6 @@ import threading from queue import Queue ``` - ## Определите функцию потоковой передачи агента {#define-agent-function} Добавьте основную функцию агента, которая подключается к [SQL-песочнице ClickHouse](https://sql.clickhouse.com/) и осуществляет потоковую передачу ответов: @@ -160,7 +151,6 @@ async def stream_clickhouse_agent(message): yield chunk.content ``` - ## Добавьте синхронные функции-обёртки {#add-wrapper-functions} Добавьте вспомогательные функции для обработки асинхронного стриминга в Streamlit: @@ -183,7 +173,6 @@ async def _agent_stream_to_queue(message, queue): queue.put(chunk) ``` - ## Создайте интерфейс Streamlit {#create-interface} Добавьте компоненты пользовательского интерфейса Streamlit и функции чата: @@ -213,7 +202,6 @@ if prompt := st.chat_input("Чем могу помочь?"): st.session_state.messages.append({"role": "assistant", "content": response}) ``` - ## Запуск приложения {#run-application} Чтобы запустить веб-приложение AI-агента ClickHouse, выполните diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md index 59f5ccc2939..516c92d2a1f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md @@ -18,7 +18,6 @@ import image_7 from '@site/static/images/use-cases/AI_ML/jupyter/7.png'; import image_8 from '@site/static/images/use-cases/AI_ML/jupyter/8.png'; import image_9 from '@site/static/images/use-cases/AI_ML/jupyter/9.png'; - # Исследование данных с помощью Jupyter Notebook и chDB {#exploring-data-with-jupyter-notebooks-and-chdb} В этом руководстве вы узнаете, как исследовать данные в ClickHouse Cloud в Jupyter Notebook с помощью [chDB](/chdb) — быстрого встроенного SQL OLAP-движка на базе ClickHouse. @@ -112,7 +111,6 @@ result = chdb.query("SELECT 'Привет, ClickHouse!' as message") print(result) ``` - ## Исследование данных {#exploring-the-data} После того как набор данных UK price paid настроен, а chDB запущен в Jupyter Notebook, мы можем приступить к исследованию наших данных. @@ -237,7 +235,6 @@ df_2 = chdb.query(query, "DataFrame") df_2.head() ``` -
Чтение из нескольких источников за один шаг Также можно читать данные из нескольких источников за один шаг. Для этого вы можете использовать приведённый ниже запрос с `JOIN`: @@ -323,7 +320,6 @@ plt.show() После 2012 года рост существенно ускорился, резко поднявшись примерно с £400 000 до более чем £1 000 000 к 2019 году. В отличие от объёма продаж, цены испытали минимальное влияние кризиса 2008 года и сохранили восходящую тенденцию. Вот это да! - ## Итоги {#summary} В этом руководстве показано, как chDB обеспечивает удобное исследование данных в Jupyter-ноутбуках за счет подключения ClickHouse Cloud к локальным источникам данных. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md index e07cdf79ce9..dd6be1bbe4d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md @@ -24,7 +24,6 @@ Glue поддерживает множество различных формат интеграции доступны только таблицы Iceberg. ::: - ## Настройка Glue в AWS {#configuring} Чтобы подключиться к каталогу Glue, необходимо определить регион вашего @@ -49,7 +48,6 @@ SETTINGS aws_secret_access_key = '' ``` - ## Выполнение запросов к каталогу данных Glue с помощью ClickHouse {#query-glue-catalog} Теперь, когда подключение установлено, можно приступать к выполнению запросов к Glue: @@ -88,7 +86,6 @@ SELECT count(*) FROM `iceberg-benchmark.hitsiceberg`; SHOW CREATE TABLE `iceberg-benchmark.hitsiceberg`; ``` - ```sql title="Response" ┌─statement───────────────────────────────────────────────┐ 1.│ CREATE TABLE glue.`iceberg-benchmark.hitsiceberg` │ diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md index 6b0c0901ae2..3d555b7b629 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md @@ -33,7 +33,6 @@ Lakekeeper — это реализация REST-каталога с открыт `SET allow_experimental_database_iceberg = 1;` ::: - ## Локальная среда разработки {#local-development-setup} Для локальной разработки и тестирования вы можете использовать контейнеризованную среду Lakekeeper. Такой подход оптимален для обучения, прототипирования и использования в средах разработки. @@ -231,7 +230,6 @@ docker-compose logs -f Настройка Lakekeeper требует предварительной загрузки примерных данных в таблицы Iceberg. Убедитесь, что в среде уже созданы и заполнены таблицы, прежде чем пытаться выполнять к ним запросы из ClickHouse. Доступность таблиц зависит от конкретной конфигурации docker-compose и скриптов загрузки примерных данных. ::: - ### Подключение к локальному каталогу Lakekeeper {#connecting-to-local-lakekeeper-catalog} Подключитесь к контейнеру ClickHouse: @@ -250,7 +248,6 @@ ENGINE = DataLakeCatalog('http://lakekeeper:8181/catalog', 'minio', 'ClickHouse_ SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/warehouse-rest', warehouse = 'demo' ``` - ## Выполнение запросов к таблицам каталога Lakekeeper с помощью ClickHouse {#querying-lakekeeper-catalog-tables-using-clickhouse} Теперь, когда соединение установлено, вы можете начинать выполнять запросы по каталогу Lakekeeper. Например: @@ -334,7 +331,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## Загрузка данных из вашего Data Lake в ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} Если вам нужно загрузить данные из каталога Lakekeeper в ClickHouse, начните с создания локальной таблицы в ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md index 73a355a41b3..b400465ee63 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md @@ -34,7 +34,6 @@ Nessie — это транзакционный каталог с открыты `SET allow_experimental_database_iceberg = 1;` ::: - ## Локальная среда разработки {#local-development-setup} Для локальной разработки и тестирования вы можете использовать контейнеризованную среду Nessie. Такой подход идеально подходит для обучения, прототипирования и разработки. @@ -148,7 +147,6 @@ docker-compose logs -f Конфигурация Nessie использует хранилище версий в памяти и требует, чтобы сначала в таблицы Iceberg были загружены примеры данных. Перед выполнением запросов к этим таблицам через ClickHouse убедитесь, что в среде они уже созданы и заполнены. ::: - ### Подключение к локальному каталогу Nessie {#connecting-to-local-nessie-catalog} Подключитесь к контейнеру ClickHouse: @@ -167,7 +165,6 @@ ENGINE = DataLakeCatalog('http://nessie:19120/iceberg', 'admin', 'password') SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/my-bucket', warehouse = 'warehouse' ``` - ## Запросы к таблицам каталога Nessie с помощью ClickHouse {#querying-nessie-catalog-tables-using-clickhouse} Теперь, когда подключение установлено, вы можете выполнять запросы через каталог Nessie. Например: @@ -251,7 +248,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## Загрузка данных из вашего хранилища Data Lake в ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} Если вам нужно загрузить данные из каталога Nessie в ClickHouse, сначала создайте локальную таблицу ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md index d64fa50c538..c8204861fef 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md @@ -23,7 +23,6 @@ Microsoft OneLake поддерживает несколько форматов `SET allow_database_iceberg = 1;` ::: - ## Сбор необходимых параметров OneLake {#gathering-requirements} Прежде чем выполнять запросы к вашей таблице в Microsoft Fabric, вам нужно собрать следующую информацию: @@ -43,7 +42,6 @@ Microsoft OneLake поддерживает несколько форматов SET allow_database_iceberg=1 ``` - ### Подключение к OneLake {#connect-onelake} ```sql @@ -59,7 +57,6 @@ onelake_client_id = '', onelake_client_secret = '' ``` - ## Выполнение запросов к OneLake с помощью ClickHouse {#querying-onelake-using-clickhouse} Теперь, когда подключение настроено, вы можете выполнять запросы к OneLake: @@ -120,7 +117,6 @@ source_file: green_tripdata_2017-05.parquet Чтобы просмотреть DDL таблицы: - ```sql SHOW CREATE TABLE onelake_catalog.`year_2017.green_tripdata_2017` @@ -155,7 +151,6 @@ Query id: 8bd5bd8e-83be-453e-9a88-32de12ba7f24 └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## Загрузка данных из вашего озера данных (Data Lake) в ClickHouse {#loading-data-from-onelake-into-clickhouse} Если вам нужно загрузить данные из OneLake в ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md index 6b33f7746da..e923c3cb0f0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md @@ -32,7 +32,6 @@ REST Catalog — это стандартизированная специфик `SET allow_experimental_database_iceberg = 1;` ::: - ## Локальная среда разработки {#local-development-setup} Для локальной разработки и тестирования вы можете использовать контейнеризованную установку REST-каталога. Такой подход подходит для обучения, прототипирования и использования в средах разработки. @@ -89,7 +88,6 @@ docker-compose logs -f Настройка REST-каталога требует, чтобы демонстрационные данные сначала были загружены в таблицы Iceberg. Убедитесь, что в среде Spark таблицы созданы и заполнены, прежде чем пытаться выполнять к ним запросы из ClickHouse. Доступность таблиц зависит от конкретной конфигурации docker-compose и скриптов загрузки демонстрационных данных. ::: - ### Подключение к локальному REST-каталогу {#connecting-to-local-rest-catalog} Подключитесь к своему контейнеру с ClickHouse: @@ -111,7 +109,6 @@ SETTINGS warehouse = 'demo' ``` - ## Выполнение запросов к таблицам REST‑каталога с помощью ClickHouse {#querying-rest-catalog-tables-using-clickhouse} Теперь, когда соединение установлено, вы можете начинать выполнять запросы через REST‑каталог. Например: @@ -195,7 +192,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## Загрузка данных из вашего озера данных (Data Lake) в ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} Если вам нужно загрузить данные из каталога REST в ClickHouse, сначала создайте локальную таблицу ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md index 080f699d2a1..b2e8156f310 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md @@ -29,7 +29,6 @@ Databricks поддерживает несколько форматов данн `SET allow_experimental_database_unity_catalog = 1;` ::: - ## Настройка Unity в Databricks {#configuring-unity-in-databricks} Чтобы разрешить ClickHouse взаимодействовать с каталогом Unity, необходимо убедиться, что Unity Catalog настроен на взаимодействие с внешним клиентом. Этого можно добиться, следуя руководству ["Enable external data access to Unity Catalog"](https://docs.databricks.com/aws/en/external-access/admin). @@ -54,7 +53,6 @@ ENGINE = DataLakeCatalog('https://.cloud.databricks.com/api/2.1/un SETTINGS warehouse = 'CATALOG_NAME', catalog_credential = '', catalog_type = 'unity' ``` - ### Чтение данных из Iceberg {#read-iceberg} ```sql @@ -64,7 +62,6 @@ SETTINGS catalog_type = 'rest', catalog_credential = ':); ``` - ## Загрузка данных из озера данных в ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} Если вам нужно загрузить данные из Databricks в ClickHouse, начните с создания локальной таблицы ClickHouse: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md index e08ae6358d4..7ba5ed1bf85 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md @@ -19,7 +19,6 @@ import observability_23 from '@site/static/images/use-cases/observability/observ import observability_24 from '@site/static/images/use-cases/observability/observability-24.png'; import Image from '@theme/IdealImage'; - # Использование Grafana и ClickHouse для Observability {#using-grafana-and-clickhouse-for-observability} Grafana является предпочтительным инструментом визуализации данных Observability в ClickHouse. Это достигается с помощью официального плагина ClickHouse для Grafana. Пользователи могут следовать инструкциям по установке, приведённым [здесь](/integrations/grafana). @@ -55,7 +54,6 @@ SELECT Timestamp as timestamp, Body as body, SeverityText as level, TraceId as t Конструктор запросов предоставляет простой способ изменения запроса, избавляя пользователей от необходимости писать SQL. Фильтрацию, включая поиск логов, содержащих ключевые слова, можно выполнять прямо в конструкторе запросов. Пользователи, которым нужно писать более сложные запросы, могут переключиться в SQL-редактор. Если возвращаются необходимые столбцы и `logs` выбрано в качестве типа запроса (Query Type), результаты будут отображаться как логи. Требуемые столбцы для отображения логов перечислены [здесь](https://grafana.com/developers/plugin-tools/tutorials/build-a-logs-data-source-plugin#logs-data-frame-format). - ### Переход от логов к трассам {#logs-to-traces} Если логи содержат идентификаторы трассировок (trace IDs), пользователи могут переходить к соответствующей трассе для конкретной строки лога. @@ -85,7 +83,6 @@ WHERE ( Timestamp >= $__fromTime AND Timestamp <= $__toTime ) Пользователи, желающие писать более сложные запросы, могут переключиться на `SQL Editor`. - ### Просмотр деталей трейса {#view-trace-details} Как показано выше, идентификаторы трейсов (Trace ID) отображаются как ссылки, по которым можно перейти. При нажатии на идентификатор трейса пользователь может выбрать просмотр связанных спанов по ссылке `View Trace`. При этом выполняется следующий запрос (при условии использования столбцов OTel) для получения спанов в требуемой структуре и отображения результата в виде диаграммы водопада. @@ -120,7 +117,6 @@ LIMIT 1000 - ### Переход от трейсов к логам {#traces-to-logs} Если логи содержат идентификаторы трассировки (trace_id), пользователи могут переходить от трейса к связанным с ним логам. Чтобы просмотреть логи, нажмите на trace_id и выберите `View Logs`. Будет выполнен следующий запрос при условии использования стандартных столбцов OTel. @@ -135,7 +131,6 @@ ORDER BY timestamp ASC LIMIT 1000 - ## Дашборды {#dashboards} Пользователи могут создавать дашборды в Grafana, используя источник данных ClickHouse. Мы рекомендуем [документацию по источнику данных для Grafana и ClickHouse](https://github.com/grafana/clickhouse-datasource) для получения дополнительной информации, в частности разделы о [макросах](https://github.com/grafana/clickhouse-datasource?tab=readme-ov-file#macros) и [переменных](https://grafana.com/docs/grafana/latest/dashboards/variables/). @@ -165,7 +160,6 @@ LIMIT 100000 - ### Многолинейные графики {#multi-line-charts} Многолинейные графики будут автоматически построены для запроса, если соблюдаются следующие условия: @@ -191,7 +185,6 @@ LIMIT 100000 - ### Визуализация геоданных {#visualizing-geo-data} Ранее мы рассмотрели обогащение данных наблюдаемости геокоординатами с использованием IP-словарей. Предположим, у вас есть столбцы `latitude` и `longitude`; тогда данные наблюдаемости можно визуализировать с помощью функции `geohashEncode`. Она формирует геохэши, совместимые с диаграммой Geo Map в Grafana. Ниже приведены пример запроса и его визуализация: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md index 770fe86e6c7..2ad5ac0ba3f 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md @@ -16,7 +16,6 @@ import observability_8 from '@site/static/images/use-cases/observability/observa import observability_9 from '@site/static/images/use-cases/observability/observability-9.png'; import Image from '@theme/IdealImage'; - # Интеграция OpenTelemetry для сбора данных {#integrating-opentelemetry-for-data-collection} Любому решению в области наблюдаемости необходим механизм сбора и экспорта логов и трассировок. Для этой цели ClickHouse рекомендует [проект OpenTelemetry (OTel)](https://opentelemetry.io/). @@ -111,7 +110,6 @@ OpenTelemetry состоит из ряда компонентов. Помимо Мы рекомендуем использовать структурированное логирование и по возможности записывать логи в формате JSON (например, ndjson). Это упростит необходимую последующую обработку логов — либо до отправки в ClickHouse с помощью [Collector processors](https://opentelemetry.io/docs/collector/configuration/#processors), либо на этапе вставки с использованием материализованных представлений. Структурированные логи в конечном итоге сократят объем последующей обработки и снизят требуемое потребление CPU в вашем решении на базе ClickHouse. - ### Пример {#example} В качестве примера мы предоставляем наборы данных с логами в структурированном (JSON) и неструктурированном виде, каждый примерно по 10 млн строк, доступные по следующим ссылкам: @@ -165,7 +163,6 @@ service: При использовании структурированных логов сообщения на выходе будут иметь следующий вид: - ```response LogRecord #98 ObservedTimestamp: 2024-06-19 13:21:16.414259 +0000 UTC @@ -205,7 +202,6 @@ Flags: 0 Пользователям, которым необходимо собирать локальные файлы логов или логи Kubernetes, мы рекомендуем ознакомиться с параметрами конфигурации, доступными для [filelog receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/filelogreceiver/README.md#configuration), а также с тем, как реализованы [отслеживание смещений (offsets)](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#offset-tracking) и [обработка многострочных логов](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#example---multiline-logs-parsing). - ## Сбор логов Kubernetes {#collecting-kubernetes-logs} Для сбора логов Kubernetes мы рекомендуем [руководство по Kubernetes в документации OpenTelemetry](https://opentelemetry.io/docs/kubernetes/). Для обогащения логов и метрик метаданными подов рекомендуется использовать [Kubernetes Attributes Processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor). Это может приводить к появлению динамических метаданных, например меток, которые хранятся в столбце `ResourceAttributes`. В настоящее время ClickHouse использует тип `Map(String, String)` для этого столбца. Дополнительные сведения по обработке и оптимизации этого типа см. в разделах [Использование Map](/use-cases/observability/schema-design#using-maps) и [Извлечение из Map](/use-cases/observability/schema-design#extracting-from-maps). @@ -278,7 +274,6 @@ $GOBIN/telemetrygen traces --otlp-insecure --traces 300 Полная схема сообщений трассировки представлена [здесь](https://opentelemetry.io/docs/concepts/signals/traces/). Настоятельно рекомендуем ознакомиться с этой схемой. - ## Обработка — фильтрация, преобразование и обогащение {#processing---filtering-transforming-and-enriching} Как было показано в предыдущем примере с установкой временной метки для события лога, пользователям, как правило, требуется фильтровать, преобразовывать и обогащать сообщения о событиях. Это можно сделать с помощью ряда возможностей в OpenTelemetry: @@ -339,7 +334,6 @@ service: ./otelcol-contrib --config config-unstructured-logs-with-processor.yaml ``` - ## Экспорт в ClickHouse {#exporting-to-clickhouse} Экспортеры отправляют данные в один или несколько бэкендов или целевых назначений. Экспортеры могут работать по pull- или push-модели. Чтобы отправлять события в ClickHouse, пользователям необходимо использовать push-экспортер [ClickHouse exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md). @@ -402,7 +396,6 @@ service: Обратите внимание на следующие важные настройки: - * **pipelines** - Конфигурация выше демонстрирует использование [pipelines](https://opentelemetry.io/docs/collector/configuration/#pipelines), состоящих из набора receivers, processors и exporters, с отдельным конвейером для логов (logs) и трассировок (traces). * **endpoint** - Взаимодействие с ClickHouse настраивается с помощью параметра `endpoint`. Строка подключения `tcp://localhost:9000?dial_timeout=10s&compress=lz4&async_insert=1` обеспечивает обмен по протоколу TCP. Если пользователи предпочитают HTTP по причинам, связанным с маршрутизацией трафика, измените эту строку подключения, как описано [здесь](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options). Полные детали подключения, включая возможность указать имя пользователя и пароль в этой строке подключения, описаны [здесь](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options). @@ -430,7 +423,6 @@ $GOBIN/telemetrygen traces --otlp-insecure --traces 300 После запуска убедитесь, что в журнале появились записи логов, выполнив простой запрос: - ```sql SELECT * FROM otel_logs @@ -491,7 +483,6 @@ Links.TraceState: [] Links.Attributes: [] ``` - ## Базовая схема {#out-of-the-box-schema} По умолчанию экспортер ClickHouse создаёт целевую таблицу для журналов, которая используется как для логов, так и для трейсов. Это можно отключить с помощью параметра `create_schema`. Кроме того, имена таблиц для логов и трейсов, по умолчанию `otel_logs` и `otel_traces`, можно изменить через указанные выше настройки. @@ -540,7 +531,6 @@ SETTINGS ttl_only_drop_parts = 1 Несколько важных замечаний по этой схеме: - - По умолчанию таблица разбивается на партиции по дате с помощью `PARTITION BY toDate(Timestamp)`. Это делает удаление устаревших данных эффективным. - `TTL` задаётся через `TTL toDateTime(Timestamp) + toIntervalDay(3)` и соответствует значению, заданному в конфигурации коллектора. [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) означает, что удаляются только целые части, когда все строки в них устарели. Это эффективнее, чем удаление строк внутри частей, которое приводит к дорогостоящей операции удаления. Мы рекомендуем всегда устанавливать этот параметр. См. раздел [Управление данными с помощью TTL](/observability/managing-data#data-management-with-ttl-time-to-live) для получения дополнительной информации. - Таблица использует классический движок [`MergeTree`](/engines/table-engines/mergetree-family/mergetree). Он рекомендован для логов и трейсов и, как правило, не требует изменения. @@ -593,7 +583,6 @@ SETTINGS ttl_only_drop_parts = 1 Мы рекомендуем пользователям отключить автоматическое создание схемы и создавать таблицы вручную. Это позволяет изменять первичные и вторичные ключи, а также даёт возможность добавлять дополнительные столбцы для оптимизации производительности запросов. Для получения дополнительной информации см. раздел [Schema design](/use-cases/observability/schema-design). - ## Оптимизация вставок {#optimizing-inserts} Чтобы обеспечить высокую производительность вставки при сохранении строгих гарантий согласованности, пользователям следует придерживаться простых правил при вставке данных Observability в ClickHouse через коллектор. При корректной конфигурации OTel collector соблюдение следующих правил не должно вызывать затруднений. Это также позволяет избежать [типичных проблем](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse), с которыми пользователи сталкиваются при первом использовании ClickHouse. @@ -692,7 +681,6 @@ service: exporters: [otlp] ``` - [clickhouse-gateway-config.yaml](https://www.otelbin.io/#config=receivers%3A*N__otlp%3A*N____protocols%3A*N____grpc%3A*N____endpoint%3A_0.0.0.0%3A4317*N*Nprocessors%3A*N__batch%3A*N____timeout%3A_5s*N____send*_batch*_size%3A_10000*N*Nexporters%3A*N__clickhouse%3A*N____endpoint%3A_tcp%3A%2F%2Flocalhost%3A9000*Qdial*_timeout*E10s*Acompress*Elz4*N____ttl%3A_96h*N____traces*_table*_name%3A_otel*_traces*N____logs*_table*_name%3A_otel*_logs*N____create*_schema%3A_true*N____timeout%3A_10s*N____database%3A_default*N____sending*_queue%3A*N____queue*_size%3A_10000*N____retry*_on*_failure%3A*N____enabled%3A_true*N____initial*_interval%3A_5s*N____max*_interval%3A_30s*N____max*_elapsed*_time%3A_300s*N*Nservice%3A*N__pipelines%3A*N____logs%3A*N______receivers%3A_%5Botlp%5D*N______processors%3A_%5Bbatch%5D*N______exporters%3A_%5Bclickhouse%5D%7E\&distro=otelcol-contrib%7E\&distroVersion=v0.103.1%7E) ```yaml @@ -740,7 +728,6 @@ service: В качестве примера управления более крупными архитектурами на основе шлюзов и связанных с этим уроков мы рекомендуем этот [блог‑пост](https://clickhouse.com/blog/building-a-logging-platform-with-clickhouse-and-saving-millions-over-datadog). - ### Добавление Kafka {#adding-kafka} Читатели могут заметить, что приведённые выше архитектуры не используют Kafka в качестве очереди сообщений. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md index d8b3c15537a..0e22f4d34b7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md @@ -11,7 +11,6 @@ import observability_1 from '@site/static/images/use-cases/observability/observa import observability_2 from '@site/static/images/use-cases/observability/observability-2.png'; import Image from '@theme/IdealImage'; - # Использование ClickHouse для обеспечения наблюдаемости {#using-clickhouse-for-observability} ## Введение {#introduction} @@ -86,7 +85,6 @@ import Image from '@theme/IdealImage'; Хотя ClickHouse может использоваться для хранения данных метрик, это направление в ClickHouse развито менее полно; ожидается поддержка таких возможностей, как формат данных Prometheus и язык запросов PromQL. ::: - ### Распределённая трассировка {#distributed-tracing} Распределённая трассировка — критически важная составляющая наблюдаемости. Распределённая трассировка, или просто трейс, отображает путь запроса через систему. Запрос может исходить от конечного пользователя или приложения и распространяться по системе, как правило приводя к последовательности действий между микросервисами. Записывая эту последовательность и позволяя коррелировать последующие события, трассировка даёт пользователю системы наблюдаемости или SRE возможность диагностировать проблемы в потоке обработки приложения независимо от того, насколько сложна архитектура или насколько активно используются serverless-компоненты. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md index fceda76cc06..793798f56c6 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md @@ -10,7 +10,6 @@ doc_type: 'guide' import observability_14 from '@site/static/images/use-cases/observability/observability-14.png'; import Image from '@theme/IdealImage'; - # Управление данными {#managing-data} Развертывания ClickHouse для задач наблюдаемости неизбежно связаны с большими объемами данных, которыми необходимо управлять. ClickHouse предлагает ряд возможностей для управления такими данными. @@ -78,7 +77,6 @@ WHERE `table` = 'otel_logs' У нас может быть дополнительная таблица `otel_logs_archive`, которую мы используем для хранения более старых данных. Данные могут быть эффективно перемещены в эту таблицу по разделам (это всего лишь изменение метаданных). - ```sql CREATE TABLE otel_logs_archive AS otel_logs --перемещаем данные в архивную таблицу @@ -145,7 +143,6 @@ ORDER BY c DESC Эта возможность используется механизмом TTL при включении настройки [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts). Дополнительную информацию см. в разделе [Управление данными с помощью TTL](#data-management-with-ttl-time-to-live). ::: - ### Применения {#applications} Выше показано, как данные могут эффективно перемещаться и обрабатываться на уровне партиций. На практике пользователи чаще всего будут использовать операции с партициями в сценариях наблюдаемости в двух случаях: @@ -193,7 +190,6 @@ TTLs применяются не сразу, а по расписанию, ка **Важно: мы рекомендуем использовать настройку [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) ** (применяется в схеме по умолчанию). Когда эта настройка включена, ClickHouse удаляет целую часть, если все строки в ней имеют истёкший TTL. Удаление целых частей вместо частичной очистки строк с истёкшим TTL (достигаемой с помощью ресурсоёмких мутаций при `ttl_only_drop_parts=0`) позволяет использовать меньшие значения `merge_with_ttl_timeout` и снижать влияние на производительность системы. Если данные разбиваются на партиции по той же единице, по которой у вас настроено истечение TTL, например по дням, части естественным образом будут содержать данные только из заданного интервала. Это гарантирует, что `ttl_only_drop_parts=1` может эффективно применяться. - ### TTL на уровне столбца {#column-level-ttl} В приведённом выше примере срок жизни задаётся на уровне таблицы. Пользователи также могут задавать срок жизни данных на уровне столбца. По мере устаревания данных это можно использовать для удаления столбцов, ценность которых для расследований не оправдывает ресурсных затрат на их хранение. Например, мы рекомендуем сохранять столбец `Body` на случай, если будут добавлены новые динамические метаданные, которые не были извлечены во время вставки, например новая метка Kubernetes. После некоторого периода, например одного месяца, может стать очевидно, что эти дополнительные метаданные не полезны — и, следовательно, нет смысла продолжать хранить столбец `Body`. @@ -215,7 +211,6 @@ ORDER BY (ServiceName, Timestamp) Указание TTL на уровне столбца требует от пользователей самостоятельного определения собственной схемы. Это нельзя настроить в OTel collector. ::: - ## Повторное сжатие данных {#recompressing-data} Хотя для наборов данных наблюдаемости мы обычно рекомендуем `ZSTD(1)`, пользователи могут экспериментировать с другими алгоритмами сжатия или более высокими уровнями сжатия, например `ZSTD(3)`. Помимо возможности указать это при создании схемы, сжатие можно настроить так, чтобы оно изменялось по истечении заданного периода времени. Это может быть целесообразно, если кодек или алгоритм сжатия обеспечивает более высокую степень сжатия, но ухудшает производительность запросов. Такой компромисс может быть приемлем для более старых данных, к которым обращаются реже, но не для свежих данных, которые используются чаще, в том числе при расследованиях инцидентов. @@ -254,7 +249,6 @@ TTL Timestamp + INTERVAL 4 DAY RECOMPRESS CODEC(ZSTD(3)) Дополнительные сведения и примеры по настройке TTL см. [здесь](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes). Примеры добавления и изменения TTL для таблиц и столбцов приведены [здесь](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl). О том, как TTL позволяет реализовывать иерархии хранилища, такие как архитектуры hot‑warm, см. раздел [Уровни хранилища](#storage-tiers). - ## Уровни хранения {#storage-tiers} В ClickHouse пользователи могут создавать уровни хранения на разных дисках, например «горячие»/недавние данные на SSD и более старые данные в S3. Такая архитектура позволяет использовать более дешевое хранилище для старых данных, для которых допустимы более высокие SLA по запросам из‑за их редкого использования при расследованиях. @@ -351,7 +345,6 @@ LIMIT 5 Чтобы обеспечить запись этого значения для всех будущих данных, мы можем изменить наше материализованное представление с помощью синтаксиса `ALTER TABLE`, как показано ниже: - ```sql ALTER TABLE otel_logs_mv MODIFY QUERY @@ -378,7 +371,6 @@ FROM otel_logs Для последующих строк значение в столбце `Size` будет заполняться в момент вставки. - ### Создание новых таблиц {#create-new-tables} В качестве альтернативы описанному выше процессу пользователи могут просто создать новую целевую таблицу с новой схемой. Любые материализованные представления затем можно изменить так, чтобы они использовали эту новую таблицу, с помощью вышеупомянутой команды `ALTER TABLE MODIFY QUERY`. При таком подходе пользователи могут версионировать свои таблицы, например `otel_logs_v3`. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md index 89c1a88be99..024939349d5 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md @@ -13,7 +13,6 @@ import observability_12 from '@site/static/images/use-cases/observability/observ import observability_13 from '@site/static/images/use-cases/observability/observability-13.png'; import Image from '@theme/IdealImage'; - # Проектирование схемы для наблюдаемости {#designing-a-schema-for-observability} Мы рекомендуем пользователям всегда создавать собственную схему для логов и трейсов по следующим причинам: @@ -82,7 +81,6 @@ LIMIT 5 В целом мы рекомендуем выполнять разбор JSON в ClickHouse для структурированных логов. Мы уверены, что ClickHouse обеспечивает самое быстрое выполнение разбора JSON. Однако мы понимаем, что пользователи могут хотеть отправлять логи в другие системы и не реализовывать эту логику в SQL. ::: - ```sql SELECT path(JSONExtractString(Body, 'request_path')) AS path, count() AS c FROM otel_logs @@ -156,7 +154,6 @@ LIMIT 5 Пользователи также могут выполнять обработку с использованием процессоров и операторов OTel collector, как описано [здесь](/observability/integrating-opentelemetry#processing---filtering-transforming-and-enriching). В большинстве случаев пользователи увидят, что ClickHouse значительно эффективнее по использованию ресурсов и быстрее, чем процессоры OTel collector. Основной недостаток выполнения всей обработки событий с помощью SQL — это привязка вашего решения к ClickHouse. Например, пользователи могут захотеть отправлять обработанные логи из OTel collector в другие системы, например в S3. ::: - ### Материализованные столбцы {#materialized-columns} Материализованные столбцы являются самым простым способом извлечь структуру из других столбцов. Значения таких столбцов всегда вычисляются во время вставки и не могут быть указаны в запросах INSERT. @@ -224,7 +221,6 @@ LIMIT 5 Материализованные столбцы по умолчанию не возвращаются в результате `SELECT *`. Это необходимо для сохранения свойства, что результат `SELECT *` всегда можно вставить обратно в таблицу с помощью команды INSERT. Такое поведение можно отключить, установив `asterisk_include_materialized_columns=1`, а также включить в Grafana (см. `Additional Settings -> Custom Settings` в конфигурации источника данных). ::: - ## Материализованные представления {#materialized-views} [Материализованные представления](/materialized-views) предоставляют более мощный способ применения SQL-фильтрации и преобразований к логам и трейсам. @@ -268,7 +264,6 @@ CREATE TABLE otel_logs Рассмотрим следующий запрос. Он преобразует строки в нужный нам формат, извлекая все столбцы из `LogAttributes` (предполагаем, что они заполняются коллектором с использованием оператора `json_parser`), а также устанавливая `SeverityText` и `SeverityNumber` (на основе некоторых простых условий и определения [этих столбцов](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext)). В данном случае мы также выбираем только те столбцы, про которые знаем, что они будут заполнены, игнорируя такие столбцы, как `TraceId`, `SpanId` и `TraceFlags`. - ```sql SELECT Body, @@ -354,7 +349,6 @@ ORDER BY (ServiceName, Timestamp) Обратите внимание, насколько сильно мы изменили нашу схему. На практике у пользователей, вероятно, также будут столбцы трассировок (Trace), которые они захотят сохранить, а также столбец `ResourceAttributes` (обычно он содержит метаданные Kubernetes). Grafana может использовать столбцы трассировок для связывания логов и трассировок — см. ["Using Grafana"](/observability/grafana). ::: - Ниже мы создаём материализованное представление `otel_logs_mv`, которое выполняет указанную выше выборку для таблицы `otel_logs` и отправляет результаты в `otel_logs_v2`. ```sql @@ -417,7 +411,6 @@ SeverityNumber: 9 Эквивалентное материализованное представление, которое опирается на извлечение столбцов из колонки `Body` с помощью JSON-функций, показано ниже: - ```sql CREATE MATERIALIZED VIEW otel_logs_mv TO otel_logs_v2 AS SELECT Body, @@ -440,7 +433,6 @@ SELECT Body, FROM otel_logs ``` - ### Осторожно с типами {#beware-types} Вышеописанные материализованные представления опираются на неявное приведение типов — особенно при использовании map `LogAttributes`. ClickHouse часто прозрачно приводит извлечённое значение к типу целевой таблицы, сокращая необходимый синтаксис. Однако мы рекомендуем всегда тестировать представления, выполняя их оператор `SELECT` совместно с оператором [`INSERT INTO`](/sql-reference/statements/insert-into) в целевую таблицу с той же схемой. Это позволит убедиться, что типы обрабатываются корректно. Особое внимание следует уделить следующим случаям: @@ -495,7 +487,6 @@ groupArrayDistinctArray(mapKeys(LogAttributes)): ['remote_user','run_time','requ Мы не рекомендуем использовать точки в именах столбцов Map и в дальнейшем можем признать такое использование устаревшим. Используйте `_`. ::: - ## Использование алиасов {#using-aliases} Запросы к типам `Map` выполняются медленнее, чем к обычным столбцам — см. раздел ["Ускорение запросов"](#accelerating-queries). Кроме того, синтаксис таких запросов более сложен и может быть неудобен для пользователей. Чтобы решить последнюю проблему, мы рекомендуем использовать столбцы типа `ALIAS`. @@ -573,7 +564,6 @@ LIMIT 5 По умолчанию `SELECT *` исключает столбцы типа ALIAS. Это поведение можно изменить, установив `asterisk_include_alias_columns=1`. ::: - ## Оптимизация типов {#optimizing-types} [Общие рекомендации ClickHouse](/data-modeling/schema-design#optimizing-types) по оптимизации типов также относятся к данному сценарию использования ClickHouse. @@ -694,7 +684,6 @@ LIMIT 4; Получено 4 строки. Прошло: 0.259 сек. ``` - :::note В приведённом выше запросе происходит много всего. Тем, кому интересно, рекомендуется прочитать это отличное [объяснение](https://clickhouse.com/blog/geolocating-ips-in-clickhouse-and-grafana#using-bit-functions-to-convert-ip-ranges-to-cidr-notation). Иначе просто считайте, что выше вычисляется CIDR для диапазона IP-адресов. ::: @@ -775,7 +764,6 @@ SELECT dictGet('ip_trie', ('country_code', 'latitude', 'longitude'), CAST('85.24 Возвращаясь к нашему исходному набору логов, мы можем использовать описанное выше, чтобы агрегировать логи по странам. Далее предполагается, что мы используем схему, полученную из нашего ранее созданного материализованного представления, в которой есть извлечённый столбец `RemoteAddress`. - ```sql SELECT dictGet('ip_trie', 'country_code', tuple(RemoteAddress)) AS country, formatReadableQuantity(count()) AS num_requests @@ -833,7 +821,6 @@ ORDER BY (ServiceName, Timestamp) Указанные выше страны и координаты предоставляют возможности визуализации, выходящие за рамки простой группировки и фильтрации по стране. В качестве примера см. раздел ["Визуализация геоданных"](/observability/grafana#visualizing-geo-data). - ### Использование regex-словарей (разбор user agent) {#using-regex-dictionaries-user-agent-parsing} Разбор [строк user agent](https://en.wikipedia.org/wiki/User_agent) — это классическая задача для регулярных выражений и распространённое требование для наборов данных, основанных на логах и трассировках. ClickHouse предоставляет эффективный разбор строк user agent с использованием Regular Expression Tree Dictionaries. @@ -929,7 +916,6 @@ LAYOUT(regexp_tree); После загрузки словарей мы можем задать пример значения заголовка User-Agent и протестировать новые возможности извлечения данных с их помощью: - ```sql WITH 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:127.0) Gecko/20100101 Firefox/127.0' AS user_agent SELECT @@ -1006,7 +992,6 @@ ORDER BY (ServiceName, Timestamp, Status) После перезапуска коллектора и приёма структурированных логов на основе ранее описанных шагов мы можем выполнять запросы к только что извлечённым столбцам Device, Browser и Os. - ```sql SELECT Device, Browser, Os FROM otel_logs_v2 @@ -1024,7 +1009,6 @@ Os: ('Other','0','0','0') Обратите внимание на использование кортежей для этих столбцов user agent. Кортежи рекомендуются для сложных структур, иерархия которых известна заранее. Подстолбцы обеспечивают ту же производительность, что и обычные столбцы (в отличие от ключей `Map`), при этом поддерживают неоднородные типы данных. ::: - ### Дополнительные материалы {#further-reading} Дополнительные примеры и подробности о словарях вы найдете в следующих статьях: @@ -1111,7 +1095,6 @@ FINAL 1 row in set. Elapsed: 0.039 sec. ``` - Мы фактически сократили количество строк здесь с 10 млн (в `otel_logs`) до 113, сохранив результат нашего запроса. Важно, что если в таблицу `otel_logs` вставляются новые логи, новые значения будут отправлены в `bytes_per_hour` для соответствующего часа, где они будут автоматически асинхронно объединяться в фоновом режиме — за счёт хранения только одной строки в час `bytes_per_hour` таким образом всегда будет и небольшой, и актуальной. Поскольку объединение строк выполняется асинхронно, при выполнении запроса пользователем может существовать более одной строки на час. Чтобы гарантировать, что все необъединённые строки будут объединены во время выполнения запроса, у нас есть два варианта: @@ -1165,7 +1148,6 @@ LIMIT 5 Выигрыш может быть ещё больше на больших наборах данных с более сложными запросами. Примеры см. [здесь](https://github.com/ClickHouse/clickpy). ::: - #### Более сложный пример {#a-more-complex-example} Приведённый выше пример агрегирует простое количество в час, используя [SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree). Статистика, выходящая за рамки простых сумм, требует другого движка целевой таблицы: [AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree). @@ -1244,7 +1226,6 @@ ORDER BY Hour DESC Обратите внимание, что здесь мы используем `GROUP BY` вместо `FINAL`. - ### Использование материализованных представлений (инкрементальных) для быстрых выборок {#using-materialized-views-incremental--for-fast-lookups} Пользователям следует учитывать свои шаблоны доступа при выборе ключа сортировки ClickHouse — включать в него столбцы, которые часто используются в условиях фильтрации и агрегации. Это может быть ограничивающим фактором в сценариях наблюдаемости, где у пользователей более разнообразные шаблоны доступа, которые невозможно выразить одним набором столбцов. Лучше всего это иллюстрируется на примере, встроенном в стандартные схемы OTel. Рассмотрим схему по умолчанию для трассировок: @@ -1316,7 +1297,6 @@ WHERE TraceId != '' GROUP BY TraceId ``` - Представление по сути гарантирует, что таблица `otel_traces_trace_id_ts` содержит минимальную и максимальную метку времени для каждого трейса. Эта таблица, упорядоченная по `TraceId`, позволяет эффективно извлекать эти метки времени. В свою очередь, эти диапазоны меток времени могут использоваться при выполнении запросов к основной таблице `otel_traces`. Конкретнее, при получении трейса по его идентификатору Grafana использует следующий запрос: ```sql @@ -1350,7 +1330,6 @@ LIMIT 1000 Тот же подход можно применить для похожих паттернов доступа. Аналогичный пример по моделированию данных разбирается [здесь](/materialized-view/incremental-materialized-view#lookup-table). - ### Использование проекций {#using-projections} Проекции ClickHouse позволяют указать несколько предложений `ORDER BY` для таблицы. @@ -1460,7 +1439,6 @@ FORMAT `Null` В приведённом выше примере мы указываем в проекции столбцы, использованные в предыдущем запросе. Это означает, что только эти столбцы будут храниться на диске как часть проекции и будут упорядочены по Status. Если бы мы вместо этого использовали здесь `SELECT *`, сохранялись бы все столбцы. Хотя это позволило бы большему числу запросов (использующих любые подмножества столбцов) воспользоваться проекцией, потребовалось бы дополнительное дисковое пространство. Для измерения занимаемого дискового пространства и степени сжатия см. ["Measuring table size & compression"](#measuring-table-size--compression). - ### Вторичные индексы / индексы пропуска данных {#secondarydata-skipping-indices} Независимо от того, насколько хорошо настроен первичный ключ в ClickHouse, некоторые запросы неизбежно будут требовать полного сканирования таблицы. Хотя необходимость таких сканирований можно снизить с помощью материализованных представлений (и проекций для некоторых запросов), они требуют дополнительного обслуживания, а пользователи должны знать об их наличии, чтобы эффективно их использовать. В то время как традиционные реляционные базы данных решают эту задачу с помощью вторичных индексов, в колоночных базах данных, таких как ClickHouse, они неэффективны. Вместо этого ClickHouse использует индексы пропуска данных (skip indexes), которые могут существенно повысить производительность запросов, позволяя базе данных пропускать крупные блоки данных без подходящих значений. @@ -1616,7 +1594,6 @@ WHERE Referer LIKE '%ultra%' Фильтр Блума обычно будет быстрее только в том случае, если он меньше самого столбца. Если он больше, выигрыш в производительности, скорее всего, будет незначительным. Сравните размер фильтра с размером столбца, используя следующие запросы: - ```sql SELECT name, @@ -1654,7 +1631,6 @@ WHERE `table` = 'otel_logs_bloom' Дополнительные сведения о вторичных пропускающих индексах можно найти [здесь](/optimize/skipping-indexes#skip-index-functions). - ### Извлечение из типов Map {#extracting-from-maps} Тип Map широко используется в схемах OTel. Для этого типа требуется, чтобы значения и ключи имели один и тот же тип — этого достаточно для метаданных, таких как метки Kubernetes. Имейте в виду, что при запросе подключа типа Map загружается весь родительский столбец. Если Map содержит много ключей, это может привести к существенному снижению производительности запроса, поскольку с диска нужно прочитать больше данных, чем если бы ключ существовал как отдельный столбец. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md index 1428c759603..3d1668cc3b8 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md @@ -14,15 +14,14 @@ import hyperdx_26 from '@site/static/images/use-cases/observability/hyperdx-26.p Для каждого компонента ClickStack доступны следующие параметры конфигурации: - ## Изменение настроек {#modifying-settings} ### Docker {#docker} -Если вы используете [All in One](/use-cases/observability/clickstack/deployment/all-in-one), [HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only) или [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only), просто передайте нужное значение через переменную окружения, например: +Если вы используете [All in One](/use-cases/observability/clickstack/deployment/all-in-one), [HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only) или [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only), просто передайте нужную настройку через переменную окружения, например: ```shell -docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` @@ -43,7 +42,6 @@ services: # ... прочие настройки ``` - ### Helm {#helm} #### Настройка параметров (необязательно) {#customizing-values} @@ -97,7 +95,6 @@ ingress: value: abc ``` - ## HyperDX {#hyperdx} ### Настройки источника данных {#datasource-settings} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md index bcc1a60da34..66529e0f431 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md @@ -31,7 +31,6 @@ ClickStack поддерживает визуализацию событий и Визуализации могут создаваться на основе трассировок, метрик, логов или любых пользовательски определённых широких схем событий. - ## Создание визуализаций {#creating-visualizations} Интерфейс **Chart Explorer** в HyperDX позволяет визуализировать метрики, трейсы и логи во времени, что упрощает создание быстрых визуализаций для анализа данных. Этот интерфейс также используется при создании дашбордов. В следующем разделе пошагово разбирается процесс создания визуализации с помощью Chart Explorer. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md index 55ff2ca2cdd..f84908f43a7 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md @@ -23,7 +23,6 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo Этот вариант поддерживает аутентификацию, что позволяет сохранять дашборды, оповещения и сохранённые поисковые запросы между сеансами и пользователями. - ### Подходит для {#suitable-for} * демонстраций @@ -40,35 +39,39 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo Следующая команда запустит коллектор OpenTelemetry (на портах 4317 и 4318) и интерфейс HyperDX (на порту 8080). ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` -### Перейдите к интерфейсу HyperDX {#navigate-to-hyperdx-ui} +:::note Обновление имени образа +Образы ClickStack теперь публикуются как `clickhouse/clickstack-*` (ранее `docker.hyperdx.io/hyperdx/*`). +::: + +### Переход к интерфейсу HyperDX {#navigate-to-hyperdx-ui} Перейдите по адресу [http://localhost:8080](http://localhost:8080), чтобы получить доступ к интерфейсу HyperDX. -Создайте пользователя, указав имя пользователя и пароль, который соответствует требованиям. +Создайте пользователя, указав имя и пароль, соответствующие требованиям. -После нажатия `Create` будут созданы источники данных для интегрированного экземпляра ClickHouse. +При нажатии кнопки `Create` будут созданы источники данных для встроенного экземпляра ClickHouse. -Пример использования альтернативного экземпляра ClickHouse смотрите в разделе «[Создание подключения ClickHouse Cloud](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)». +Пример использования другого экземпляра ClickHouse приведён в разделе ["Создание подключения ClickHouse Cloud"](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection). ### Приём данных {#ingest-data} -Инструкции по приёму данных смотрите в разделе «[Приём данных](/use-cases/observability/clickstack/ingesting-data)». +Информацию о приёме данных см. в разделе ["Приём данных"](/use-cases/observability/clickstack/ingesting-data). ## Сохранение данных и настроек {#persisting-data-and-settings} -Чтобы сохранять данные и настройки при перезапусках контейнера, пользователи могут изменить приведённую выше команду docker, чтобы смонтировать каталоги `/data/db`, `/var/lib/clickhouse` и `/var/log/clickhouse-server`. Например: +Чтобы сохранять данные и настройки между перезапусками контейнера, пользователи могут изменить приведённую выше команду docker, чтобы смонтировать каталоги по путям `/data/db`, `/var/lib/clickhouse` и `/var/log/clickhouse-server`. Например: ```shell -# убедитесь, что каталоги существуют {#ensure-directories-exist} +# убедитесь, что каталоги существуют mkdir -p .volumes/db .volumes/ch_data .volumes/ch_logs -# измените команду для монтирования путей {#modify-command-to-mount-paths} +# измените команду для монтирования путей docker run \ -p 8080:8080 \ -p 4317:4317 \ @@ -76,7 +79,7 @@ docker run \ -v "$(pwd)/.volumes/db:/data/db" \ -v "$(pwd)/.volumes/ch_data:/var/lib/clickhouse" \ -v "$(pwd)/.volumes/ch_logs:/var/log/clickhouse-server" \ - docker.hyperdx.io/hyperdx/hyperdx-all-in-one + clickhouse/clickstack-all-in-one:latest ``` @@ -89,18 +92,18 @@ docker run \ ## Настройка портов {#customizing-ports-deploy} -Если вам нужно изменить порты приложения (8080) или API (8000), на которых работает HyperDX Local, вам потребуется изменить команду `docker run`, чтобы пробросить нужные порты и задать несколько переменных окружения. +Если вам нужно изменить порты приложения (8080) или API (8000), на которых запущен HyperDX Local, необходимо скорректировать команду `docker run`, чтобы пробросить нужные порты и задать несколько переменных окружения. -Порты OpenTelemetry настраиваются простым изменением флагов проброса портов. Например, можно заменить `-p 4318:4318` на `-p 4999:4318`, чтобы изменить HTTP-порт OpenTelemetry на 4999. +Порты OpenTelemetry можно настроить, просто изменив флаги проброса портов. Например, замените `-p 4318:4318` на `-p 4999:4318`, чтобы изменить HTTP-порт OpenTelemetry на 4999. ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 clickhouse/clickstack-all-in-one:latest ``` ## Использование ClickHouse Cloud {#using-clickhouse-cloud} -Этот дистрибутив можно использовать с ClickHouse Cloud. При этом локальный экземпляр ClickHouse всё равно будет развёрнут, но использоваться не будет, а OTel collector можно настроить на работу с экземпляром ClickHouse Cloud с помощью переменных окружения `CLICKHOUSE_ENDPOINT`, `CLICKHOUSE_USER` и `CLICKHOUSE_PASSWORD`. +Этот дистрибутив можно использовать с ClickHouse Cloud. Хотя локальный экземпляр ClickHouse по-прежнему будет развёрнут (и будет игнорироваться), OTel collector можно настроить на использование экземпляра ClickHouse Cloud с помощью переменных окружения `CLICKHOUSE_ENDPOINT`, `CLICKHOUSE_USER` и `CLICKHOUSE_PASSWORD`. Например: @@ -109,22 +112,22 @@ export CLICKHOUSE_ENDPOINT= export CLICKHOUSE_USER= export CLICKHOUSE_PASSWORD= -docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` `CLICKHOUSE_ENDPOINT` должен указывать на HTTPS-эндпоинт ClickHouse Cloud, включая порт `8443`, например: `https://mxl4k3ul6a.us-east-2.aws.clickhouse.com:8443` -После подключения к интерфейсу HyperDX перейдите в [`Team Settings`](http://localhost:8080/team) и создайте подключение к вашему сервису ClickHouse Cloud, а затем настройте необходимые источники. Пример последовательности действий см. [здесь](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection). +После подключения к интерфейсу HyperDX перейдите в [`Team Settings`](http://localhost:8080/team) и создайте подключение к вашему сервису ClickHouse Cloud, а затем добавьте необходимые источники. Пример последовательности действий см. [здесь](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection). -## Настройка OTel collector {#configuring-collector} +## Настройка коллектора OTel {#configuring-collector} -Конфигурацию OTel collector при необходимости можно изменить — см. раздел ["Изменение конфигурации OTel collector"](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration). +При необходимости конфигурацию коллектора OTel можно изменить — см. раздел [«Изменение конфигурации»](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration). Например: ```shell -docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md index 9af92b671ce..c15d8007dd4 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md @@ -33,7 +33,6 @@ Docker Compose открывает дополнительные порты для Эти порты обеспечивают интеграцию с широким набором источников телеметрии и делают коллектор OpenTelemetry готовым к промышленной эксплуатации для различных сценариев ингестии. - ### Подходит для {#suitable-for} * Локального тестирования @@ -117,7 +116,6 @@ HYPERDX_OPAMP_PORT=4320 HYPERDX_OTEL_EXPORTER_CLICKHOUSE_DATABASE=default ``` - ### Настройка коллектора OTel {#configuring-collector} Конфигурацию коллектора OTel можно изменить при необходимости — см. раздел ["Изменение конфигурации"](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration). diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md index 34f91fdbdae..bc7843a2089 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md @@ -34,7 +34,6 @@ helm install my-clickstack clickstack/clickstack \ --set otel.opampServerUrl="http://my-clickstack-clickstack-app.default.svc.cluster.local:4320" ``` - ### Дополнительные соображения по GKE {#other-gke-considerations} ```yaml @@ -53,7 +52,6 @@ clickhouse: - "10.0.0.0/8" # Резервный вариант для других конфигураций ``` - ## Amazon EKS {#amazon-eks} Для развертывания в EKS рассмотрите следующие распространённые конфигурации: @@ -79,7 +77,6 @@ hyperdx: enabled: true ``` - ## Azure AKS {#azure-aks} Для развертывания в AKS: @@ -97,7 +94,6 @@ clickhouse: - "10.0.0.0/8" ``` - ## Контрольный список для продакшн-развертывания в облаке {#production-cloud-deployment-checklist} Перед развертыванием ClickStack в продакшене у любого провайдера облачных услуг: @@ -127,7 +123,6 @@ hyperdx: memory: 4Gi ``` - ### Высокая доступность {#high-availability} ```yaml @@ -148,7 +143,6 @@ hyperdx: topologyKey: kubernetes.io/hostname ``` - ### Персистентное хранилище {#persistent-storage} Убедитесь, что для хранения данных настроены персистентные тома. @@ -167,7 +161,6 @@ clickhouse: * **EKS**: `gp3` или `io2` * **AKS**: `managed-premium` или `managed-csi` - ### Примечания по совместимости с браузерами {#browser-compatibility-notes} Для развертываний, работающих только по HTTP (разработка/тестирование), некоторые браузеры могут показывать ошибки криптографического API из‑за требований к защищённому контексту. Для продуктивных развертываний всегда используйте HTTPS с корректными TLS‑сертификатами, настроенными через конфигурацию входного шлюза. diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md index 3cc6acc72e2..376b9c091a0 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md @@ -34,14 +34,12 @@ hyperdx: helm upgrade my-clickstack clickstack/clickstack -f values.yaml ``` - ### Метод 2: Обновление через `helm upgrade` с флагом `--set` {#api-key-set-flag} ```shell helm upgrade my-clickstack clickstack/clickstack --set hyperdx.apiKey="ваш-api-ключ-здесь" ``` - ### Перезапустите поды, чтобы применить изменения {#restart-pods} После обновления ключа API перезапустите поды, чтобы применить новую конфигурацию: @@ -54,7 +52,6 @@ kubectl rollout restart deployment my-clickstack-clickstack-app my-clickstack-cl Чарт автоматически создаёт секрет Kubernetes (`-app-secrets`) с вашим API-ключом. Дополнительная настройка секрета не требуется, если вы не планируете использовать внешний секрет. ::: - ## Управление секретами {#secret-management} Для работы с конфиденциальными данными, такими как API-ключи или учетные данные для доступа к базе данных, используйте секреты Kubernetes. @@ -83,7 +80,6 @@ data: kubectl apply -f secrets.yaml ``` - ### Создание пользовательского секрета {#creating-a-custom-secret} Создайте вручную пользовательский секрет Kubernetes: @@ -93,7 +89,6 @@ kubectl create secret generic hyperdx-secret \ --from-literal=API_KEY=my-secret-api-key ``` - ### Ссылка на Secret в values.yaml {#referencing-a-secret} ```yaml @@ -105,7 +100,6 @@ hyperdx: key: API_KEY ``` - ## Настройка входного шлюза {#ingress-setup} Чтобы открыть доступ к интерфейсу и API HyperDX по доменному имени, включите конфигурацию входного шлюза в файле `values.yaml`. @@ -124,7 +118,6 @@ hyperdx: Значение `hyperdx.frontendUrl` должно совпадать с именем хоста входного шлюза и включать протокол (например, `https://hyperdx.yourdomain.com`). Это обеспечивает корректную работу всех сгенерированных ссылок, куки и перенаправлений. ::: - ### Включение TLS (HTTPS) {#enabling-tls} Чтобы защитить развертывание с помощью HTTPS: @@ -149,7 +142,6 @@ hyperdx: tlsSecretName: "hyperdx-tls" ``` - ### Пример конфигурации входного шлюза {#example-ingress-configuration} Для наглядности ниже показан сгенерированный ресурс входного шлюза: @@ -181,7 +173,6 @@ spec: secretName: hyperdx-tls ``` - ### Частые проблемы с входным шлюзом {#common-ingress-pitfalls} **Конфигурация пути и переписывания (rewrite):** @@ -207,7 +198,6 @@ spec: kubectl -n ingress-nginx get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath="{.items[0].spec.containers[0].image}" ``` - ## Входной шлюз для OTel collector {#otel-collector-ingress} Если вам необходимо опубликовать конечные точки OTel collector (для трассировок, метрик и логов) через входной шлюз, используйте конфигурацию `additionalIngresses`. Это полезно для отправки телеметрических данных из‑вне кластера или при использовании пользовательского домена для collector. @@ -244,7 +234,6 @@ hyperdx: Если вам не нужно открывать OTel collector во внешний доступ, вы можете пропустить эту конфигурацию. Для большинства пользователей достаточно общей настройки входного шлюза. ::: - ## Диагностика проблем с входным шлюзом {#troubleshooting-ingress} **Проверьте ресурс входного шлюза:** @@ -282,7 +271,6 @@ curl -I https://hyperdx.yourdomain.com/_next/static/chunks/main-xxxx.js * После изменений очистите кэш браузера и кэш CDN/прокси, чтобы избежать использования устаревших версий ресурсов - ## Настройка значений {#customizing-values} Параметры можно настроить с помощью флагов `--set`: @@ -322,7 +310,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values.yaml ``` - ## Дальнейшие шаги {#next-steps} - [Варианты развертывания](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options) — внешние системы и минимальные развертывания diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md index 6c8747c4c75..12f3e87169c 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md @@ -56,7 +56,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-external-clickhouse.yaml ``` - ### Вариант 2: Внешний секрет (рекомендуется для production) {#external-clickhouse-secret} Для production-развертываний, где вы хотите хранить учетные данные отдельно от конфигурации Helm: @@ -174,7 +173,6 @@ hyperdx: Подробный пример подключения к ClickHouse Cloud см. в разделе [«Создание подключения к ClickHouse Cloud»](/docs/use-cases/observability/clickstack/getting-started#create-a-cloud-connection). - ## Внешний OTel collector {#external-otel-collector} Если у вас уже есть инфраструктура OTel collector: @@ -194,7 +192,6 @@ helm install my-clickstack clickstack/clickstack -f values-external-otel.yaml См. раздел [Настройка входного шлюза](/docs/use-cases/observability/clickstack/deployment/helm-configuration#otel-collector-ingress) для инструкций по публикации конечных точек OTel collector через входной шлюз. - ## Минимальное развертывание {#minimal-deployment} Для организаций с уже существующей инфраструктурой достаточно развернуть только HyperDX: @@ -233,7 +230,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-minimal.yaml ``` - ## Следующие шаги {#next-steps} - [Руководство по настройке](/docs/use-cases/observability/clickstack/deployment/helm-configuration) — ключи API, секреты и настройка входного шлюза diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md index 5004bdf87fa..a2e71580960 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md @@ -36,7 +36,6 @@ Helm-чарт для HyperDX можно найти [здесь](https://github.c * Настройку TLS и Входного шлюза * Управление секретами и настройку аутентификации - ### Подходит для {#suitable-for} * Пилотных проектов (proof of concept) @@ -262,7 +261,6 @@ helm install my-clickstack clickstack/clickstack -f values.yaml Для продакшн-развертываний с конфигурацией на основе секретов, внешними экземплярами OTel collector или минимальными конфигурациями см. [руководство «Варианты развертывания»](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options). ::: - ## Примечания для продакшена По умолчанию этот chart также устанавливает ClickHouse и OTel collector. Однако для продакшена рекомендуется управлять ClickHouse и OTel collector отдельно. @@ -283,7 +281,6 @@ helm install my-clickstack clickstack/clickstack \ * [Облачные развертывания](/docs/use-cases/observability/clickstack/deployment/helm-cloud) — облачные настройки и чек-лист для продакшна ::: - ## Конфигурация задач {#task-configuration} По умолчанию в чарте настроена одна задача в виде cronjob, отвечающая за проверку необходимости срабатывания алертов. Ниже приведены её параметры конфигурации: @@ -308,7 +305,6 @@ helm upgrade my-clickstack clickstack/clickstack -f values.yaml helm search repo clickstack ``` - ## Удаление ClickStack Чтобы удалить развертывание: @@ -319,23 +315,20 @@ helm uninstall my-clickstack Это удалит все ресурсы, связанные с релизом, однако постоянные данные (если они есть) могут остаться. - ## Устранение неполадок {#troubleshooting} -### Проверка логов +### Проверка логов {#customizing-values} ```shell kubectl logs -l app.kubernetes.io/name=clickstack ``` - -### Устранение неполадок при неудачной установке +### Устранение неполадок при неудачной установке {#using-secrets} ```shell helm install my-clickstack clickstack/clickstack --debug --dry-run ``` - ### Проверка развертывания ```shell @@ -379,7 +372,6 @@ helm install my-clickstack clickstack/clickstack \ --set "otel.env[0].value=--feature-gates=clickhouse.json" ``` - ## См. также {#related-documentation} ### Руководства по развертыванию {#deployment-guides} diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md index c143a8dc679..8ac306dddd3 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md @@ -38,7 +38,6 @@ import JSONSupport from '@site/i18n/ru/docusaurus-plugin-content-docs/current/us В этом режиме ингестия данных полностью остаётся на стороне пользователя. Вы можете выполнять приём данных в ClickHouse Cloud с помощью собственного развернутого коллектора OpenTelemetry, прямого приёма из клиентских библиотек, нативных табличных движков ClickHouse (таких как Kafka или S3), ETL-пайплайнов или ClickPipes — управляемого сервиса ингестии ClickHouse Cloud. Такой подход обеспечивает самый простой и высокопроизводительный способ эксплуатации ClickStack. - ### Подходит для {#suitable-for} Этот паттерн развертывания оптимален в следующих сценариях: diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md index ae7aa0922ef..5aa15ef371d 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md @@ -23,7 +23,6 @@ HyperDX может использоваться независимо от ост В этом режиме ингестия данных полностью остаётся на стороне пользователя. Вы можете осуществлять приём данных в ClickHouse, используя собственный развёрнутый OpenTelemetry collector, прямую ингестию из клиентских библиотек, родные для ClickHouse движки таблиц (такие как Kafka или S3), ETL‑конвейеры или управляемые сервисы ингестии, такие как ClickPipes. Такой подход обеспечивает максимальную гибкость и подходит командам, которые уже эксплуатируют ClickHouse и хотят добавить HyperDX поверх него для визуализации, поиска и оповещений. - ### Подходит для {#suitable-for} - Существующие пользователи ClickHouse diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md index 2e7fc58b51a..c66c9295967 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md @@ -23,7 +23,6 @@ import JSONSupport from '@site/i18n/ru/docusaurus-plugin-content-docs/current/us **Однако в этой сборке HyperDX аутентификация пользователей отключена** - ### Подходит для {#suitable-for} * Демонстраций @@ -37,31 +36,31 @@ import JSONSupport from '@site/i18n/ru/docusaurus-plugin-content-docs/current/us ### Развертывание с помощью Docker - В локальном режиме интерфейс HyperDX разворачивается на порту 8080. + В локальном режиме интерфейс HyperDX запускается на порту 8080. ```shell - docker run -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local + docker run -p 8080:8080 clickhouse/clickstack-local:latest ``` ### Перейдите в интерфейс HyperDX - Откройте [http://localhost:8080](http://localhost:8080), чтобы получить доступ к интерфейсу HyperDX. + Перейдите по адресу [http://localhost:8080](http://localhost:8080), чтобы получить доступ к интерфейсу HyperDX. - **Вам не потребуется создавать пользователя, так как аутентификация в этом режиме развертывания отключена.** + **Вам не будет предложено создать пользователя, так как аутентификация не включена в этом режиме развертывания.** - Подключитесь к своему внешнему кластеру ClickHouse, например ClickHouse Cloud. + Подключитесь к собственному внешнему кластеру ClickHouse, например ClickHouse Cloud. - Создайте источник, оставьте все значения по умолчанию и заполните поле `Table` значением `otel_logs`. Все остальные параметры должны быть определены автоматически, после чего вы сможете нажать `Save New Source`. + Создайте источник, сохраните все значения по умолчанию и заполните поле `Table` значением `otel_logs`. Все остальные настройки должны определиться автоматически, после чего можно нажать `Save New Source`. - + -Для образа, предназначенного только для локального режима, пользователям нужно задать параметр `BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true`, например: +Для образа, предназначенного только для локального режима, пользователям нужно задать только параметр `BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true`, например: ```shell -docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local +docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 clickhouse/clickstack-local:latest ``` diff --git a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md index b67dfdb71d9..af153e126fc 100644 --- a/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md +++ b/i18n/ru/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md @@ -23,7 +23,6 @@ import dashboard_kubernetes from '@site/static/images/use-cases/observability/hy - - ## 性能基准测试 {#performance-benchmarks} chDB 在各种场景中都能提供卓越的性能: - - * **[嵌入式引擎的 ClickBench 基准测试](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQXRoZW5hIChwYXJ0aXRpb25lZCkiOnRydWUsIkF0aGVuYSAoc2luZ2xlKSI6dHJ1ZSwiQXVyb3JhIGZvciBNeVNRTCI6dHJ1ZSwiQXVyb3JhIGZvciBQb3N0Z3JlU1FMIjp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIiOnRydWUsIkNpdHVzIjp0cnVlLCJjbGlja2hvdXNlLWxvY2FsIChwYXJ0aXRpb25lZCkiOnRydWUsImNsaWNraG91c2UtbG9jYWwgKHNpbmdsZSkiOnRydWUsIkNsaWNrSG91c2UiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoenN0ZCkiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQiOnRydWUsIkNsaWNrSG91c2UgKHdlYikiOnRydWUsIkNyYXRlREIiOnRydWUsIkRhdGFiZW5kIjp0cnVlLCJEYXRhRnVzaW9uIChzaW5nbGUpIjp0cnVlLCJBcGFjaGUgRG9yaXMiOnRydWUsIkRydWlkIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQpIjp0cnVlLCJEdWNrREIiOnRydWUsIkVsYXN0aWNzZWFyY2giOnRydWUsIkVsYXN0aWNzZWFyY2ggKHR1bmVkKSI6ZmFsc2UsIkdyZWVucGx1bSI6dHJ1ZSwiSGVhdnlBSSI6dHJ1ZSwiSHlkcmEiOnRydWUsIkluZm9icmlnaHQiOnRydWUsIktpbmV0aWNhIjp0cnVlLCJNYXJpYURCIENvbHVtblN0b3JlIjp0cnVlLCJNYXJpYURCIjpmYWxzZSwiTW9uZXREQiI6dHJ1ZSwiTW9uZ29EQiI6dHJ1ZSwiTXlTUUwgKE15SVNBTSkiOnRydWUsIk15U1FMIjp0cnVlLCJQaW5vdCI6dHJ1ZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2VsZWN0REIiOnRydWUsIlNpbmdsZVN0b3JlIjp0cnVlLCJTbm93Zmxha2UiOnRydWUsIlNRTGl0ZSI6dHJ1ZSwiU3RhclJvY2tzIjp0cnVlLCJUaW1lc2NhbGVEQiAoY29tcHJlc3Npb24pIjp0cnVlLCJUaW1lc2NhbGVEQiI6dHJ1ZX0sInR5cGUiOnsic3RhdGVsZXNzIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsIkphdmEiOmZhbHNlLCJjb2x1bW4tb3JpZW50ZWQiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQyI6ZmFsc2UsIlBvc3RncmVTUUwgY29tcGF0aWJsZSI6ZmFsc2UsIkNsaWNrSG91c2UgZGVyaXZhdGl2ZSI6ZmFsc2UsImVtYmVkZGVkIjp0cnVlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiUnVzdCI6ZmFsc2UsInNlYXJjaCI6ZmFsc2UsImRvY3VtZW50IjpmYWxzZSwidGltZS1zZXJpZXMiOmZhbHNlfSwibWFjaGluZSI6eyJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNS40eGxhcmdlLCA1MDBnYiBncDIiOnRydWUsIjE2IHRocmVhZHMiOnRydWUsIjIwIHRocmVhZHMiOnRydWUsIjI0IHRocmVhZHMiOnRydWUsIjI4IHRocmVhZHMiOnRydWUsIjMwIHRocmVhZHMiOnRydWUsIjQ4IHRocmVhZHMiOnRydWUsIjYwIHRocmVhZHMiOnRydWUsIm01ZC4yNHhsYXJnZSI6dHJ1ZSwiYzVuLjR4bGFyZ2UsIDIwMGdiIGdwMiI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDE1MDBnYiBncDIiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMjQiOnRydWUsIlMyIjp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZX0sImNsdXN0ZXJfc2l6ZSI6eyIxIjp0cnVlLCIyIjp0cnVlLCI0Ijp0cnVlLCI4Ijp0cnVlLCIxNiI6dHJ1ZSwiMzIiOnRydWUsIjY0Ijp0cnVlLCIxMjgiOnRydWUsInNlcnZlcmxlc3MiOnRydWUsInVuZGVmaW5lZCI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** - 综合性能对比 * **[DataFrame 处理性能](https://colab.research.google.com/drive/1FogLujJ_-ds7RGurDrUnK-U0IW8a8Qd0)** - 与其他 DataFrame 库的性能对比分析 * **[DataFrame Benchmark](https://benchmark.clickhouse.com/#eyJzeXN0ZW0iOnsiQWxsb3lEQiI6dHJ1ZSwiQWxsb3lEQiAodHVuZWQpIjp0cnVlLCJBdGhlbmEgKHBhcnRpdGlvbmVkKSI6dHJ1ZSwiQXRoZW5hIChzaW5nbGUpIjp0cnVlLCJBdXJvcmEgZm9yIE15U1FMIjp0cnVlLCJBdXJvcmEgZm9yIFBvc3RncmVTUUwiOnRydWUsIkJ5Q29uaXR5Ijp0cnVlLCJCeXRlSG91c2UiOnRydWUsImNoREIgKERhdGFGcmFtZSkiOnRydWUsImNoREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiY2hEQiI6dHJ1ZSwiQ2l0dXMiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF3cykiOnRydWUsIkNsaWNrSG91c2UgQ2xvdWQgKGF6dXJlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSBDbG91ZCAoZ2NwKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoZGF0YSBsYWtlLCBwYXJ0aXRpb25lZCkiOnRydWUsIkNsaWNrSG91c2UgKGRhdGEgbGFrZSwgc2luZ2xlKSI6dHJ1ZSwiQ2xpY2tIb3VzZSAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJDbGlja0hvdXNlIChQYXJxdWV0LCBzaW5nbGUpIjp0cnVlLCJDbGlja0hvdXNlICh3ZWIpIjp0cnVlLCJDbGlja0hvdXNlIjp0cnVlLCJDbGlja0hvdXNlICh0dW5lZCkiOnRydWUsIkNsaWNrSG91c2UgKHR1bmVkLCBtZW1vcnkpIjp0cnVlLCJDbG91ZGJlcnJ5Ijp0cnVlLCJDcmF0ZURCIjp0cnVlLCJDcnVuY2h5IEJyaWRnZSBmb3IgQW5hbHl0aWNzIChQYXJxdWV0KSI6dHJ1ZSwiRGF0YWJlbmQiOnRydWUsIkRhdGFGdXNpb24gKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRGF0YUZ1c2lvbiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiQXBhY2hlIERvcmlzIjp0cnVlLCJEcnVpZCI6dHJ1ZSwiRHVja0RCIChEYXRhRnJhbWUpIjp0cnVlLCJEdWNrREIgKFBhcnF1ZXQsIHBhcnRpdGlvbmVkKSI6dHJ1ZSwiRHVja0RCIjp0cnVlLCJFbGFzdGljc2VhcmNoIjp0cnVlLCJFbGFzdGljc2VhcmNoICh0dW5lZCkiOmZhbHNlLCJHbGFyZURCIjp0cnVlLCJHcmVlbnBsdW0iOnRydWUsIkhlYXZ5QUkiOnRydWUsIkh5ZHJhIjp0cnVlLCJJbmZvYnJpZ2h0Ijp0cnVlLCJLaW5ldGljYSI6dHJ1ZSwiTWFyaWFEQiBDb2x1bW5TdG9yZSI6dHJ1ZSwiTWFyaWFEQiI6ZmFsc2UsIk1vbmV0REIiOnRydWUsIk1vbmdvREIiOnRydWUsIk1vdGhlcmR1Y2siOnRydWUsIk15U1FMIChNeUlTQU0pIjp0cnVlLCJNeVNRTCI6dHJ1ZSwiT3hsYSI6dHJ1ZSwiUGFuZGFzIChEYXRhRnJhbWUpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgcGFydGl0aW9uZWQpIjp0cnVlLCJQYXJhZGVEQiAoUGFycXVldCwgc2luZ2xlKSI6dHJ1ZSwiUGlub3QiOnRydWUsIlBvbGFycyAoRGF0YUZyYW1lKSI6dHJ1ZSwiUG9zdGdyZVNRTCAodHVuZWQpIjpmYWxzZSwiUG9zdGdyZVNRTCI6dHJ1ZSwiUXVlc3REQiAocGFydGl0aW9uZWQpIjp0cnVlLCJRdWVzdERCIjp0cnVlLCJSZWRzaGlmdCI6dHJ1ZSwiU2luZ2xlU3RvcmUiOnRydWUsIlNub3dmbGFrZSI6dHJ1ZSwiU1FMaXRlIjp0cnVlLCJTdGFyUm9ja3MiOnRydWUsIlRhYmxlc3BhY2UiOnRydWUsIlRlbWJvIE9MQVAgKGNvbHVtbmFyKSI6dHJ1ZSwiVGltZXNjYWxlREIgKGNvbXByZXNzaW9uKSI6dHJ1ZSwiVGltZXNjYWxlREIiOnRydWUsIlVtYnJhIjp0cnVlfSwidHlwZSI6eyJDIjpmYWxzZSwiY29sdW1uLW9yaWVudGVkIjpmYWxzZSwiUG9zdGdyZVNRTCBjb21wYXRpYmxlIjpmYWxzZSwibWFuYWdlZCI6ZmFsc2UsImdjcCI6ZmFsc2UsInN0YXRlbGVzcyI6ZmFsc2UsIkphdmEiOmZhbHNlLCJDKysiOmZhbHNlLCJNeVNRTCBjb21wYXRpYmxlIjpmYWxzZSwicm93LW9yaWVudGVkIjpmYWxzZSwiQ2xpY2tIb3VzZSBkZXJpdmF0aXZlIjpmYWxzZSwiZW1iZWRkZWQiOmZhbHNlLCJzZXJ2ZXJsZXNzIjpmYWxzZSwiZGF0YWZyYW1lIjp0cnVlLCJhd3MiOmZhbHNlLCJhenVyZSI6ZmFsc2UsImFuYWx5dGljYWwiOmZhbHNlLCJSdXN0IjpmYWxzZSwic2VhcmNoIjpmYWxzZSwiZG9jdW1lbnQiOmZhbHNlLCJzb21ld2hhdCBQb3N0Z3JlU1FMIGNvbXBhdGlibGUiOmZhbHNlLCJ0aW1lLXNlcmllcyI6ZmFsc2V9LCJtYWNoaW5lIjp7IjE2IHZDUFUgMTI4R0IiOnRydWUsIjggdkNQVSA2NEdCIjp0cnVlLCJzZXJ2ZXJsZXNzIjp0cnVlLCIxNmFjdSI6dHJ1ZSwiYzZhLjR4bGFyZ2UsIDUwMGdiIGdwMiI6dHJ1ZSwiTCI6dHJ1ZSwiTSI6dHJ1ZSwiUyI6dHJ1ZSwiWFMiOnRydWUsImM2YS5tZXRhbCwgNTAwZ2IgZ3AyIjp0cnVlLCIxOTJHQiI6dHJ1ZSwiMjRHQiI6dHJ1ZSwiMzYwR0IiOnRydWUsIjQ4R0IiOnRydWUsIjcyMEdCIjp0cnVlLCI5NkdCIjp0cnVlLCJkZXYiOnRydWUsIjcwOEdCIjp0cnVlLCJjNW4uNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJBbmFseXRpY3MtMjU2R0IgKDY0IHZDb3JlcywgMjU2IEdCKSI6dHJ1ZSwiYzUuNHhsYXJnZSwgNTAwZ2IgZ3AyIjp0cnVlLCJjNmEuNHhsYXJnZSwgMTUwMGdiIGdwMiI6dHJ1ZSwiY2xvdWQiOnRydWUsImRjMi44eGxhcmdlIjp0cnVlLCJyYTMuMTZ4bGFyZ2UiOnRydWUsInJhMy40eGxhcmdlIjp0cnVlLCJyYTMueGxwbHVzIjp0cnVlLCJTMiI6dHJ1ZSwiUzI0Ijp0cnVlLCIyWEwiOnRydWUsIjNYTCI6dHJ1ZSwiNFhMIjp0cnVlLCJYTCI6dHJ1ZSwiTDEgLSAxNkNQVSAzMkdCIjp0cnVlLCJjNmEuNHhsYXJnZSwgNTAwZ2IgZ3AzIjp0cnVlfSwiY2x1c3Rlcl9zaXplIjp7IjEiOnRydWUsIjIiOnRydWUsIjQiOnRydWUsIjgiOnRydWUsIjE2Ijp0cnVlLCIzMiI6dHJ1ZSwiNjQiOnRydWUsIjEyOCI6dHJ1ZSwic2VydmVybGVzcyI6dHJ1ZX0sIm1ldHJpYyI6ImhvdCIsInF1ZXJpZXMiOlt0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlLHRydWUsdHJ1ZSx0cnVlXX0=)** - - - - ## 关于 chDB {#about-chdb} - 在 [博客](https://clickhouse.com/blog/chdb-embedded-clickhouse-rocket-engine-on-a-bicycle)上阅读 chDB 项目诞生的完整故事 @@ -89,8 +72,6 @@ chDB 在各种场景中都能提供卓越的性能: - 使用 [codapi 示例](https://antonz.org/trying-chdb/) 在浏览器中体验 chDB - 更多示例请参见 https://github.com/chdb-io/chdb/tree/main/examples - - ## 许可证 {#license} chDB 在 Apache 许可证第 2.0 版下发布。有关更多信息,请参阅 [LICENSE](https://github.com/chdb-io/chdb/blob/main/LICENSE.txt)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/bun.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/bun.md index ec5b872b650..07c020beb4b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/bun.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/bun.md @@ -48,7 +48,7 @@ bun run build chDB-bun 支持两种查询模式:用于一次性操作的临时查询,以及用于维护数据库状态的持久会话。 -### 临时查询 +### 临时查询 {#persistent-sessions} 适用于不需要保留状态的简单一次性查询: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/c.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/c.md index 6f5eab8084e..cc09515e49a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/c.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/c.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'c', 'cpp', 'embedded', 'clickhouse', 'sql', 'olap', 'api'] doc_type: 'guide' --- - - # 适用于 C 和 C++ 的 chDB {#chdb-for-c-and-c} chDB 提供原生的 C/C++ API,可将 ClickHouse 的功能直接嵌入到您的应用程序中。该 API 既支持简单查询,也支持高级特性,例如持久连接和查询结果的流式处理。 - - ## 安装 {#installation} ### 步骤 1:安装 libchdb {#install-libchdb} @@ -37,13 +33,11 @@ curl -sL https://lib.chdb.io | bash 将你的应用程序与 chDB 一起编译并链接: - ```bash # 使用 C 编译 {#c-compilation} gcc -o myapp myapp.c -lchdb ``` - # 使用 C++ 编译 {#c-compilation} g++ -o myapp myapp.cpp -lchdb @@ -51,7 +45,6 @@ g++ -o myapp myapp.cpp -lchdb ``` ``` - ## C 语言示例 {#c-examples} ### 基本连接和查询 {#basic-connection-queries} @@ -194,7 +187,6 @@ int main() { chdb_destroy_query_result(json_result); ``` - // 美化输出格式 chdb_result* pretty_result = chdb_query(*conn, query, "Pretty"); printf("Pretty Result:\n%.*s\n\n", @@ -209,7 +201,6 @@ return 0; ``` ``` - ## C++ 示例 {#cpp-example} ```cpp @@ -293,7 +284,6 @@ int main() { } ``` - ## 错误处理最佳实践 {#error-handling} ```c @@ -341,7 +331,6 @@ cleanup: } ``` - ## GitHub 仓库 {#github-repository} - **主仓库**: [chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/go.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/go.md index f8363cf2064..a6ae01b0fa7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/go.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/go.md @@ -7,14 +7,10 @@ keywords: ['chdb', 'go', 'golang', 'embedded', 'clickhouse', 'sql', 'olap'] doc_type: 'guide' --- - - # chDB for Go {#chdb-for-go} chDB-go 为 chDB 提供 Go 语言绑定,使你能够在 Go 应用程序中直接运行 ClickHouse 查询,且完全不依赖任何外部组件。 - - ## 安装 {#installation} ### 第 1 步:安装 libchdb {#install-libchdb} @@ -39,26 +35,20 @@ go install github.com/chdb-io/chdb-go@latest go get github.com/chdb-io/chdb-go ``` - ## 用法 {#usage} ### 命令行界面(CLI) {#cli} chDB-go 包含一个用于快速查询的命令行界面(CLI): - - ```bash # 简单查询 {#simple-query} ./chdb-go "SELECT 123" ``` - # 交互式模式 {#interactive-mode} ./chdb-go - - # 启用持久化存储的交互式模式 {#interactive-mode-with-persistent-storage} ./chdb-go --path /tmp/chdb @@ -242,15 +232,12 @@ func main() { } ``` - **流式查询的优势:** - **内存高效** - 处理大型数据集而无需将所有数据一次性加载到内存中 - **实时处理** - 从第一批数据到达时就可以开始处理 - **支持取消** - 可以使用 `Cancel()` 取消长时间运行的查询 - **错误处理** - 使用 `Error()` 在流式处理中检查错误 - - ## API 文档 {#api-documentation} chDB-go 提供高级和低级 API: @@ -258,8 +245,6 @@ chDB-go 提供高级和低级 API: - **[高级 API 文档](https://github.com/chdb-io/chdb-go/blob/main/chdb.md)** - 推荐用于大多数使用场景 - **[低级 API 文档](https://github.com/chdb-io/chdb-go/blob/main/lowApi.md)** - 适用于需要细粒度控制的高级使用场景 - - ## 系统要求 {#requirements} - Go 1.21 或更新版本 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md index a9929f01bc0..6e82d7efb12 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/nodejs.md @@ -7,21 +7,16 @@ keywords: ['chdb', 'nodejs', 'javascript', 'embedded', 'clickhouse', 'sql', 'ola doc_type: 'guide' --- - - # 适用于 Node.js 的 chDB {#chdb-for-nodejs} chDB-node 为 chDB 提供了 Node.js 绑定,让你能够在 Node.js 应用中直接运行 ClickHouse 查询,而无需任何外部依赖。 - - ## 安装 {#installation} ```bash npm 安装 chdb ``` - ## 用法 {#usage} chDB-node 支持两种查询模式:用于简单操作的独立查询,以及用于维护数据库状态的会话查询。 @@ -146,7 +141,6 @@ try { } ``` - ## 错误处理 {#error-handling} 在使用 chDB 时,请务必妥善处理错误: @@ -192,7 +186,6 @@ function safeSessionQuery() { safeSessionQuery(); ``` - ## GitHub 仓库 {#github-repository} - **GitHub 仓库**: [chdb-io/chdb-node](https://github.com/chdb-io/chdb-node) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/python.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/python.md index df3df56071d..4448eafdfd6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/python.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/python.md @@ -18,7 +18,6 @@ doc_type: 'guide' pip install chdb ``` - ## 使用方法 {#usage} ### 命令行界面 {#command-line-interface} @@ -33,7 +32,6 @@ python3 -m chdb "SELECT 1, 'abc'" Pretty python3 -m chdb "SELECT version()" JSON ``` - ### Python 基本用法 {#basic-python-usage} ```python @@ -49,7 +47,6 @@ print(f"读取字节数:{result.bytes_read()}") print(f"执行时间:{result.elapsed()} 秒") ``` - ### 基于连接的 API(推荐使用) {#connection-based-api} 为更好地进行资源管理并提升性能: @@ -84,7 +81,6 @@ cur.close() conn.close() ``` - ## 数据接入方式 {#data-input} ### 基于文件的数据源 {#file-based-data-sources} @@ -118,7 +114,6 @@ result = chdb.query(""" """, 'Pretty') ``` - ### 输出格式示例 {#output-format-examples} ```python @@ -139,7 +134,6 @@ pretty_result = chdb.query('SELECT * FROM system.numbers LIMIT 3', 'Pretty') print(pretty_result) ``` - ### DataFrame 操作 {#dataframe-operations} #### 旧版 DataFrame API {#legacy-dataframe-api} @@ -164,7 +158,6 @@ summary = result_df.query('SELECT b, sum(a) FROM __table__ GROUP BY b') print(summary) ``` - #### Python 表引擎(推荐) {#python-table-engine-recommended} ```python @@ -212,7 +205,6 @@ chdb.query(""" """).show() ``` - ### 有状态会话 {#stateful-sessions} 会话在多次操作之间保持查询状态,从而支持复杂的工作流: @@ -267,7 +259,6 @@ print(result) sess.close() # 可选 - 对象删除时自动关闭 ``` - ### 高级会话功能 {#advanced-session-features} ```python @@ -288,7 +279,6 @@ result = sess.query(""" 另请参见:[test_stateful.py](https://github.com/chdb-io/chdb/blob/main/tests/test_stateful.py)。 - ### Python DB-API 2.0 接口 {#python-db-api-20} 面向现有 Python 应用程序的标准数据库接口,以确保兼容性: @@ -337,7 +327,6 @@ cursor.executemany( ) ``` - ### 用户自定义函数(UDF) {#user-defined-functions} 使用自定义 Python 函数扩展 SQL: @@ -378,7 +367,6 @@ result = query(""" print(result) ``` - #### 具有自定义返回类型的高级 UDF {#advanced-udf-custom-return-types} ```python @@ -413,7 +401,6 @@ result = query(""" print(result) ``` - #### UDF 最佳实践 {#udf-best-practices} 1. **无状态函数**:UDF 应为无副作用的纯函数 @@ -449,7 +436,6 @@ query(""" """) ``` - ### 流式查询处理 {#streaming-queries} 以固定内存占用处理大规模数据集: @@ -520,7 +506,6 @@ stream.close() sess.close() ``` - ### Python 表引擎 {#python-table-engine} #### 查询 Pandas DataFrame 数据 {#query-pandas-dataframes} @@ -578,7 +563,6 @@ window_result = chdb.query(""" print(window_result) ``` - #### 使用 PyReader 的自定义数据源 {#custom-data-sources-pyreader} 为特定数据源实现自定义数据读取器: @@ -686,7 +670,6 @@ complex_json = chdb.query(""" print(complex_json) ```` - ## 性能和优化 {#performance-optimization} ### 基准测试 {#benchmarks} @@ -772,7 +755,6 @@ stream.close() sess.close() ``` - ## GitHub 仓库 {#github-repository} - **主仓库**:[chdb-io/chdb](https://github.com/chdb-io/chdb) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/rust.md b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/rust.md index 553f272dcd3..333bbb094e4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/rust.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/chdb/install/rust.md @@ -7,14 +7,10 @@ keywords: ['chdb', '嵌入式', 'clickhouse-lite', 'rust', '安装', 'ffi', '绑 doc_type: 'guide' --- - - # 适用于 Rust 的 chDB {#chdb-for-rust} chDB-rust 为 chDB 提供了实验性的 FFI(外部函数接口)绑定,使你可以在 Rust 应用程序中直接运行 ClickHouse 查询,而无需任何外部依赖。 - - ## 安装 {#installation} ### 安装 libchdb {#install-libchdb} @@ -25,7 +21,6 @@ chDB-rust 为 chDB 提供了实验性的 FFI(外部函数接口)绑定,使 curl -sL https://lib.chdb.io | bash ``` - ## 使用方法 {#usage} chDB Rust 提供无状态和有状态两种查询执行模式。 @@ -114,7 +109,6 @@ fn main() -> Result<(), Box> { } ``` - ## 构建与测试 {#building-testing} ### 构建项目 {#build-the-project} @@ -137,7 +131,6 @@ cargo test * `tempdir` (v0.3.7) - 用于测试中的临时目录管理 * `thiserror` (v1) - 错误处理工具库 - ## 错误处理 {#error-handling} chDB Rust 通过 `Error` 枚举提供了完善的错误处理机制: @@ -164,7 +157,6 @@ match execute("SELECT 1", None) { } ``` - ## GitHub 仓库 {#github-repository} 该项目的 GitHub 仓库位于 [chdb-io/chdb-rust](https://github.com/chdb-io/chdb-rust)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md index 7af3e78af2c..363d7e9a053 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/01_cloud_tiers.md @@ -7,8 +7,6 @@ keywords: ['云服务层级', '服务方案', '云定价层级', '云服务级 doc_type: 'reference' --- - - # ClickHouse Cloud 服务层级 {#clickhouse-cloud-tiers} ClickHouse Cloud 提供多个不同的服务层级。 @@ -17,8 +15,6 @@ ClickHouse Cloud 提供多个不同的服务层级。 **云服务层级概览:** - -
@@ -224,8 +220,6 @@ ClickHouse Cloud 提供多个不同的服务层级。 - - ## 基础版 {#basic} - 成本效益高的选项,支持单副本部署。 @@ -236,8 +230,6 @@ ClickHouse Cloud 提供多个不同的服务层级。 可以将服务升级到 Scale 或 Enterprise 层级以实现扩缩容。 ::: - - ## Scale {#scale} 专为需要更高 SLA 保证(2 个及以上副本部署)、可伸缩性和高级安全性的工作负载而设计。 @@ -248,8 +240,6 @@ ClickHouse Cloud 提供多个不同的服务层级。 - [灵活伸缩](/manage/scaling) 选项(纵向扩容/缩容、横向扩容/缩容)。 - [可配置备份](/cloud/manage/backups/configurable-backups)。 - - ## 企业版 {#enterprise} 面向大规模、关键业务部署,满足严苛的安全与合规要求。 @@ -268,8 +258,6 @@ ClickHouse Cloud 提供多个不同的服务层级。 在所有三个层级中,单副本服务均被设计为固定规格(`8 GiB`、`12 GiB`) ::: - - ## 升级到不同的层级 {#upgrading-to-a-different-tier} 您可以随时从 Basic 升级到 Scale,或从 Scale 升级到 Enterprise。降级层级则需要先停用高级功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md index 06c4573fce9..124b7dd734a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/02_query-insights.md @@ -14,29 +14,22 @@ import insights_recent from '@site/static/images/cloud/sqlconsole/insights_recen import insights_drilldown from '@site/static/images/cloud/sqlconsole/insights_drilldown.png'; import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_query_info.png'; - # 查询洞察 {#query-insights} **查询洞察(Query Insights)** 功能通过多种可视化和表格,使 ClickHouse 内置的查询日志更易用。ClickHouse 的 `system.query_log` 表是进行查询优化、调试,以及监控整个集群健康状况和性能的关键信息来源。 - - ## 查询概览 {#query-overview} 选择某个服务后,左侧边栏中的 **Monitoring** 导航项会展开,显示一个新的 **Query insights** 子项。点击该选项会打开新的 Query Insights 页面: - - ## 顶层指标 {#top-level-metrics} 顶部的统计卡片展示了在所选时间范围内的一些基础顶层查询指标。在其下方,可以看到三个时间序列图表,分别展示在选定时间窗口内按查询类型(select、insert、other)划分的查询量、延迟和错误率。延迟图表还可以进一步调整,以显示 p50、p90 和 p99 分位延迟: - - ## 最近查询 {#recent-queries} 在顶层指标下方会显示一个表格,其中包含在所选时间窗口内的查询日志记录(按标准化查询哈希和用户分组): @@ -45,8 +38,6 @@ import insights_query_info from '@site/static/images/cloud/sqlconsole/insights_q 可以按任意可用字段筛选和排序最近查询。还可以配置该表以显示或隐藏其他字段,例如表名、p90 和 p99 延迟。 - - ## 查询下钻 {#query-drill-down} 在“最近查询”表格中选择某个查询时,会打开一个飞出面板,其中包含该查询特有的指标和信息: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md index 6de5eb35c4b..eb7c0fbcf5a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/03_sql_console_features/04_dashboards.md @@ -20,13 +20,10 @@ import dashboards_9 from '@site/static/images/cloud/dashboards/9_dashboards.png' import dashboards_10 from '@site/static/images/cloud/dashboards/10_dashboards.png'; import dashboards_11 from '@site/static/images/cloud/dashboards/11_dashboards.png'; - # 仪表板 {#dashboards} SQL Console 的仪表板功能允许您收集并共享来自已保存查询的可视化结果。您可以先保存并可视化查询,将查询的可视化结果添加到仪表板中,并使用查询参数让仪表板具备交互功能。 - - ## 核心概念 {#core-concepts} ### 查询共享 {#query-sharing} @@ -39,14 +36,10 @@ SQL Console 的仪表板功能允许您收集并共享来自已保存查询的 你可以在可视化设置中选择“filter”类型,通过 **Global** 全局过滤器侧边栏来切换查询参数输入。你也可以通过在仪表板上链接到其他对象(例如表)来切换查询参数输入。请参阅下文快速入门指南中的“[配置过滤器](/cloud/manage/dashboards#configure-a-filter)”部分。 - - ## 快速开始 {#quick-start} 我们来创建一个仪表板,借助 [query\_log](/operations/system-tables/query_log) 系统表来监控我们的 ClickHouse 服务。 - - ## 快速开始 {#quick-start-1} ### 创建已保存查询 {#create-a-saved-query} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md index 0efcc03c683..2b99af79993 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/automatic_scaling/01_auto_scaling.md @@ -16,7 +16,6 @@ import scaling_configure from '@site/static/images/cloud/manage/scaling-configur import scaling_memory_allocation from '@site/static/images/cloud/manage/scaling-memory-allocation.png'; import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' - # 自动伸缩 {#automatic-scaling} 伸缩是指根据客户端需求调整可用资源的能力。Scale 和 Enterprise 层级(标准 1:4 配置)的服务可以通过以编程方式调用 API,或在 UI 中更改设置来进行水平伸缩,从而调整系统资源。这些服务也可以进行**自动垂直伸缩**,以满足应用程序的需求。 @@ -27,8 +26,6 @@ import ScalePlanFeatureBadge from '@theme/badges/ScalePlanFeatureBadge' Scale 和 Enterprise 层级同时支持单副本和多副本服务,而 Basic 层级仅支持单副本服务。单副本服务的规格是固定的,不支持垂直或水平伸缩。用户可以升级到 Scale 或 Enterprise 层级来对其服务进行伸缩。 ::: - - ## ClickHouse Cloud 中的扩缩容工作原理 {#how-scaling-works-in-clickhouse-cloud} 目前,ClickHouse Cloud 在 Scale 层级服务上支持垂直自动扩缩容和手动水平扩缩容。 @@ -89,8 +86,6 @@ Scale 和 Enterprise 服务支持基于 CPU 和内存使用情况的自动扩缩 不过,仍然可以通过联系支持团队对这些服务进行垂直扩缩容。 ::: - - ## 手动水平扩展 {#manual-horizontal-scaling} @@ -129,8 +124,6 @@ Scale 和 Enterprise 服务支持基于 CPU 和内存使用情况的自动扩缩 - - ## 自动空闲 {#automatic-idling} 在 **Settings** 页面中,你还可以选择是否允许在服务处于非活动状态时自动进入空闲,如上图所示(即服务当前未执行任何用户提交的查询时)。自动空闲可以降低服务成本,因为当服务暂停时,你无需为计算资源付费。 @@ -144,8 +137,6 @@ Scale 和 Enterprise 服务支持基于 CPU 和内存使用情况的自动扩缩 仅当你的使用场景可以接受查询开始响应前的一段延迟时,才应使用自动空闲,因为当服务被暂停时,与服务的连接会超时。自动空闲非常适合不经常使用、且可以容忍一定延迟的服务。不建议在为高频使用的、面向客户的功能提供支撑的服务上启用自动空闲。 ::: - - ## 处理工作负载峰值 {#handling-bursty-workloads} 如果您预期即将出现工作负载峰值,可以使用 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md index f90c3485bc6..c44fc76b99b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/deployment-options.md @@ -6,15 +6,11 @@ keywords: ['自有云(BYOC,bring your own cloud)', 'byoc', '私有', '政 doc_type: 'reference' --- - - # ClickHouse 部署选项 {#clickhouse-deployment-options} ClickHouse 提供多种部署选项,以满足不同客户的需求,在控制能力、合规性以及运维开销方面提供不同程度的选择。 本文档概述了可用的各类部署类型,帮助用户选择最契合其特定架构偏好、合规要求和资源管理策略的最优解决方案。 - - ## ClickHouse Cloud {#clickhouse-cloud} ClickHouse Cloud 是一项完全托管的云原生服务,在免除自建与自管运维复杂性的前提下,提供 ClickHouse 的强大功能与高性能。 @@ -24,24 +20,18 @@ ClickHouse Cloud 负责基础设施的全部环节,包括资源供应、扩缩 了解更多关于 [ClickHouse Cloud](/getting-started/quick-start/cloud) 的信息。 - - ## 自带云环境(Bring Your Own Cloud) {#byoc} ClickHouse 自带云环境(Bring Your Own Cloud,简称 BYOC)使组织能够在自己的云环境中部署和管理 ClickHouse,同时利用托管服务层。此选项在 ClickHouse Cloud 的全托管体验与完全自管部署的完全控制之间架起桥梁。借助 ClickHouse BYOC,用户可以保留对其数据、基础设施和安全策略的控制权,以满足特定合规和监管要求,同时将补丁更新、监控和扩缩容等运维任务交由 ClickHouse 负责。该模式在提供私有云部署灵活性的同时,兼具托管服务的优势,适用于在安全、治理和数据驻留方面具有严格要求的大规模企业级部署。 进一步了解[自带云环境(Bring Your Own Cloud)](/cloud/reference/byoc/overview)。 - - ## ClickHouse Private {#clickhouse-private} ClickHouse Private 是一种自部署的 ClickHouse 版本,采用与 ClickHouse Cloud 相同的专有技术。此选项提供最大程度的控制权,非常适合具有严格合规、网络与安全要求的组织,以及具备运维专长、能够自行管理基础设施的团队。它受益于在 ClickHouse Cloud 环境中经过充分验证的定期更新与升级、功能丰富的产品路线图,并由我们的专家支持团队全程支援。 进一步了解 [ClickHouse Private](/cloud/infrastructure/clickhouse-private)。 - - ## ClickHouse Government {#clickhouse-government} ClickHouse Government 是一种自部署的 ClickHouse 版本,专为需要隔离且具备合规认证环境的政府机构和公共部门组织的独特且严格需求而设计。此部署选项提供高度安全、合规且隔离的环境,重点通过使用 OpenSSL 实现 FIPS 140-3 合规性,并提供额外的系统加固和漏洞管理。在充分利用 ClickHouse Cloud 强大能力的同时,它集成了专门的功能和配置,以满足政府机构特定的运营和安全要求。借助 ClickHouse Government,机构可以在受控且经认证的基础设施中对敏感数据进行高性能分析,并获得针对公共部门需求量身定制的专家支持。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md index 0ac98f2c31a..7b0d0a7dded 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/replica-aware-routing.md @@ -8,7 +8,6 @@ doc_type: 'guide' import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; - # 副本感知路由 {#replica-aware-routing} @@ -27,8 +26,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; 请注意,原始主机名仍然会使用 `LEAST_CONNECTION` 负载均衡,这是默认的路由算法。 - - ## 副本感知路由的限制 {#limitations-of-replica-aware-routing} ### 副本感知路由不保证隔离 {#replica-aware-routing-does-not-guarantee-isolation} @@ -39,8 +36,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; 客户需要手动添加一条 DNS 记录,才能使新的主机名模式的名称解析生效。如果使用不当,这可能会导致服务器负载不均衡。 - - ## 配置副本感知路由 {#configuring-replica-aware-routing} 如需启用副本感知路由功能,请联系[我们的支持团队](https://clickhouse.com/support/program)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md index 489795db16f..42b421c259c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-catalog.md @@ -7,8 +7,6 @@ description: '描述 ClickHouse Cloud 中的 Shared Catalog 组件和 Shared 数 doc_type: 'reference' --- - - # 共享目录和共享数据库引擎 {#shared-catalog-and-shared-database-engine} **仅在 ClickHouse Cloud(以及官方一方合作云服务)中提供** @@ -24,8 +22,6 @@ Shared Catalog **不会复制表中的数据本身**,而是通过复制 DDL - MySQL - DataLakeCatalog - - ## 架构与元数据存储 {#architecture-and-metadata-storage} Shared Catalog 中的所有元数据和 DDL 查询历史记录都集中存储在 ZooKeeper 中,本地磁盘上不会持久化任何数据。此架构可确保: @@ -34,8 +30,6 @@ Shared Catalog 中的所有元数据和 DDL 查询历史记录都集中存储在 - 计算节点保持无状态 - 副本能够快速、可靠地启动和初始化 - - ## 共享数据库引擎 {#shared-database-engine} **共享数据库引擎(Shared database engine)** 与 Shared Catalog 协同工作,用于管理其中的表使用诸如 `SharedMergeTree` 之类**无状态表引擎(stateless table engines)** 的数据库。这些表引擎不会将持久状态写入磁盘,并且兼容动态计算环境。 @@ -69,8 +63,6 @@ Shared Catalog 中的所有元数据和 DDL 查询历史记录都集中存储在 - **集中、版本化的元数据状态** Shared Catalog 在 ZooKeeper 中存储单一事实来源(single source of truth)。当副本启动时,它会获取最新状态并应用差异以实现一致性。在查询执行期间,系统可以等待其他副本至少达到所需的元数据版本,以确保正确性。 - - ## 在 ClickHouse Cloud 中的使用 {#usage-in-clickhouse-cloud} 对于终端用户而言,使用 Shared Catalog 和 Shared 数据库引擎无需任何额外配置。数据库的创建方式与以往完全相同: @@ -81,7 +73,6 @@ CREATE DATABASE my_database; ClickHouse Cloud 会自动为数据库分配 Shared 数据库引擎。在此类数据库中创建的、使用无状态引擎的任意表,都将自动具备 Shared Catalog 的复制与协调能力。 - ## 概要 {#summary} Shared Catalog 和 Shared 数据库引擎具备以下特性: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md index 2a33d56ef10..2065467d1b4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/shared-merge-tree.md @@ -11,7 +11,6 @@ import shared_merge_tree from '@site/static/images/cloud/reference/shared-merge- import shared_merge_tree_2 from '@site/static/images/cloud/reference/shared-merge-tree-2.png'; import Image from '@theme/IdealImage'; - # SharedMergeTree 表引擎 {#sharedmergetree-table-engine} SharedMergeTree 表引擎系列是面向云环境、用于替代 ReplicatedMergeTree 引擎的解决方案,并针对在共享存储之上运行进行了优化(例如 Amazon S3、Google Cloud Storage、MinIO、Azure Blob Storage)。每一种具体的 MergeTree 引擎类型都有对应的 SharedMergeTree 变体,例如 ReplacingSharedMergeTree 用来替代 ReplacingReplicatedMergeTree。 @@ -34,8 +33,6 @@ SharedMergeTree 带来的一个重要改进是:相比 ReplicatedMergeTree, 与 ReplicatedMergeTree 不同,SharedMergeTree 不需要副本之间直接通信。相反,所有通信都通过共享存储和 clickhouse-keeper 完成。SharedMergeTree 实现了异步、无主(leaderless)的复制机制,并使用 clickhouse-keeper 进行协调和元数据存储。这意味着在服务扩容和缩容时,无需再在所有副本间复制元数据。由此带来了更快的复制、变更、合并以及扩容操作。SharedMergeTree 允许每个表拥有数百个副本,使得在不使用分片的情况下实现动态伸缩成为可能。在 ClickHouse Cloud 中,会采用分布式查询执行方式,以便为单个查询利用更多的计算资源。 - - ## 内省 {#introspection} 用于对 ReplicatedMergeTree 进行内省的大多数系统表在 SharedMergeTree 中同样存在,`system.replication_queue` 和 `system.replicated_fetches` 除外,因为 SharedMergeTree 中不会发生数据和元数据的复制。不过,SharedMergeTree 为这两个表提供了相应的替代表。 @@ -48,8 +45,6 @@ SharedMergeTree 带来的一个重要改进是:相比 ReplicatedMergeTree, 此表是 SharedMergeTree 中 `system.replicated_fetches` 的替代表。它包含当前正在进行中的将主键和校验和拉取到内存的操作信息。 - - ## 启用 SharedMergeTree {#enabling-sharedmergetree} `SharedMergeTree` 默认已启用。 @@ -103,7 +98,6 @@ ENGINE = SharedReplacingMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica ORDER BY key ``` - ## 设置 {#settings} 某些设置的行为发生了显著变化: @@ -112,8 +106,6 @@ ORDER BY key - `insert_quorum_parallel` -- 所有对 SharedMergeTree 的插入都是 quorum 插入(写入到共享存储),因此在使用 SharedMergeTree 表引擎时无需配置该设置。 - `select_sequential_consistency` -- 不要求使用 quorum 插入,但会在执行 `SELECT` 查询时给 clickhouse-keeper 带来额外负载 - - ## 一致性 {#consistency} SharedMergeTree 相比 ReplicatedMergeTree 提供更好的轻量级一致性。向 SharedMergeTree 执行插入时,无需设置诸如 `insert_quorum` 或 `insert_quorum_parallel` 之类的参数。插入本身就是 quorum 插入,这意味着元数据会存储在 ClickHouse-Keeper 中,并会被复制到至少达到 quorum 的 ClickHouse-Keeper 节点。集群中的每个副本都会从 ClickHouse-Keeper 异步拉取最新的元数据信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md index 9cb1b578636..7b827e38a50 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/04_infrastructure/warehouses.md @@ -15,11 +15,8 @@ import compute_7 from '@site/static/images/cloud/reference/compute-compute-7.png import compute_8 from '@site/static/images/cloud/reference/compute-compute-8.png'; import Image from '@theme/IdealImage'; - # 仓库 {#warehouses} - - ## 什么是计算-计算分离? {#what-is-compute-compute-separation} 计算-计算分离适用于 Scale 和 Enterprise 层级。 @@ -51,8 +48,6 @@ _图 2 - ClickHouse Cloud 中的计算分离_ 你可以创建与现有服务共享同一数据的额外服务,或者从零开始创建一个包含多个共享同一数据的服务的新部署。 - - ## 什么是仓库? {#what-is-a-warehouse} 在 ClickHouse Cloud 中,_仓库(warehouse)_ 是共享同一数据的一组服务。 @@ -75,8 +70,6 @@ _图 3 - 仓库示例_ 您可以按照所属的仓库对服务进行排序。 - - ## 访问控制 {#access-controls} ### 数据库凭证 {#database-credentials} @@ -116,8 +109,6 @@ _图 6 - 仓库中的读写服务和只读服务_ 2. 当前,可刷新的物化视图会在仓库中的所有服务上执行,包括只读服务。不过,将来此行为会被更改,仅在读写(RW)服务上执行。 ::: - - ## 扩展 {#scaling} 仓库中的每个服务都可以根据您的工作负载在以下方面进行调整: @@ -126,13 +117,9 @@ _图 6 - 仓库中的读写服务和只读服务_ - 服务是否应自动扩展 - 服务在空闲时是否应被停用(不适用于组中的第一个服务——请参阅 **限制** 部分) - - ## 行为变化 {#changes-in-behavior} 一旦为某个服务启用了 compute-compute(已创建至少一个次级服务),使用 `default` 集群名称调用 `clusterAllReplicas()` 函数时,将只会使用调用该函数的服务中的副本。也就是说,如果有两个服务连接到同一数据集,并且从服务 1 调用了 `clusterAllReplicas(default, system, processes)`,则只会显示运行在服务 1 上的进程。如有需要,仍然可以调用 `clusterAllReplicas('all_groups.default', system, processes)` 来访问所有副本。 - - ## 限制 {#limitations} 1. **主服务必须始终保持运行且不能被休眠(该限制会在 GA 正式发布后的一段时间内移除)。** 在私有预览期间以及 GA 之后的一段时间内,主服务(通常是你希望通过添加其他服务来扩展的现有服务)必须始终保持运行,并且会禁用休眠设置。如果至少存在一个次级服务,你将无法停止或休眠主服务。一旦所有次级服务都被移除,你就可以再次停止或休眠原始服务。 @@ -154,22 +141,17 @@ SETTINGS distributed_ddl_task_timeout=0 7. **当前每个 warehouse 最多支持 5 个服务(软限制)。** 如需在单个 warehouse 中配置超过 5 个服务,请联系支持团队。 - ## 定价 {#pricing} 在同一个仓库(主服务和次服务)中,所有服务的计算费用相同。存储费用只计费一次——包含在第一个(原始)服务中。 请参阅 [定价](https://clickhouse.com/pricing) 页面上的价格计算器,它可以根据您的工作负载大小和所选级别帮助估算成本。 - - ## 备份 {#backups} - 由于同一仓库中的所有服务共享同一存储,因此只在主(初始)服务上执行备份。通过这种方式,可以备份该仓库中所有服务的数据。 - 如果你从某个仓库的主服务还原备份,该备份会被还原到一个全新的服务,而不是还原到与现有仓库关联的服务上。还原完成后,你可以立即为这个新服务添加更多服务。 - - ## 使用仓库 {#using-warehouses} ### 创建仓库 {#creating-a-warehouse} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md index c98bb627763..2a7b029e67e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/api-overview.md @@ -8,20 +8,14 @@ doc_type: 'reference' keywords: ['ClickHouse Cloud', 'API 概览', '云 API', 'REST API', '编程式访问'] --- - - # ClickHouse Cloud API {#clickhouse-cloud-api} - - ## 概览 {#overview} ClickHouse Cloud API 是为开发者设计的 REST API,便于在 ClickHouse Cloud 上轻松管理组织和服务。通过我们的 Cloud API,您可以创建和管理服务、创建和管理 API 密钥、在组织中添加或移除成员等。 [了解如何创建您的第一个 API 密钥并开始使用 ClickHouse Cloud API。](/cloud/manage/openapi) - - ## Swagger(OpenAPI)端点和 UI {#swagger-openapi-endpoint-and-ui} ClickHouse Cloud API 基于开源的 [OpenAPI 规范](https://www.openapis.org/) 构建,以实现可预测的客户端调用行为。 @@ -34,14 +28,10 @@ ClickHouse Cloud API 基于开源的 [OpenAPI 规范](https://www.openapis.org/) 这将影响 `POST`、`GET` 和 `PATCH` 服务请求返回的对象。因此,任何调用这些 API 的代码都可能需要进行相应调整以适配这些变更。 ::: - - ## 速率限制 {#rate-limits} 每个组织最多可以创建 100 个 API 密钥。每个 API 密钥在任意 10 秒时间窗口内最多可发送 10 个请求。如需为您的组织提升 API 密钥数量上限或每个 10 秒时间窗口内的请求上限,请联系 support@clickhouse.com。 - - ## Terraform provider {#terraform-provider} 官方 ClickHouse Terraform Provider 允许你使用[基础设施即代码(Infrastructure as Code)](https://www.redhat.com/en/topics/automation/what-is-infrastructure-as-code-iac) @@ -57,16 +47,12 @@ ClickHouse Cloud API 基于开源的 [OpenAPI 规范](https://www.openapis.org/) 现在你还可以将 `num_replicas` 字段作为 service 资源的一个属性进行指定。 ::: - - ## Terraform 和 OpenAPI 新定价:副本设置详解 {#terraform-and-openapi-new-pricing---replica-settings-explained} 在 Scale 和 Enterprise 层级中,每个服务在创建时的副本数默认是 3,而在 Basic 层级中默认是 1。 对于 Scale 和 Enterprise 层级,可以在服务创建请求中通过指定 `numReplicas` 字段来进行调整。 对于某个仓库中的第一个服务,`numReplicas` 字段的取值必须在 2 到 20 之间。而在现有仓库中创建的服务,其副本数量可以低至 1。 - - ## 支持 {#support} 我们建议您优先通过[我们的 Slack 频道](https://clickhouse.com/slack)获取快速支持。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md index 1c3b1e50f73..da5f0a2958c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/openapi.md @@ -14,7 +14,6 @@ import image_04 from '@site/static/images/cloud/manage/openapi4.png'; import image_05 from '@site/static/images/cloud/manage/openapi5.png'; import Image from '@theme/IdealImage'; - # 管理 API 密钥 {#managing-api-keys} ClickHouse Cloud 提供了一个基于 OpenAPI 的 API,允许你以编程方式管理你的账号以及服务的各个方面。 @@ -68,7 +67,6 @@ $ curl --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations - ## 端点 {#endpoints} 有关端点的详细信息,请参阅 [API 参考](https://clickhouse.com/docs/cloud/manage/api/swagger)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md index 8ffcadde34b..a63d5cc25f4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/api/postman.md @@ -90,7 +90,6 @@ Postman 应用程序可以在网页浏览器中使用,也可以下载到桌面 - ## 测试 ClickHouse Cloud API 功能 {#test-the-clickhouse-cloud-api-functionalities} ### 测试 "GET list of available organizations" {#test-get-list-of-available-organizations} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md index 71dc7229c76..4cae2c1d8c6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/05_admin_features/upgrades.md @@ -15,7 +15,6 @@ import enroll_fast_release from '@site/static/images/cloud/manage/enroll_fast_re import scheduled_upgrades from '@site/static/images/cloud/manage/scheduled_upgrades.png'; import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled_upgrade_window.png'; - # 升级 {#upgrades} 使用 ClickHouse Cloud,您无需担心打补丁和升级。我们会定期推出包含修复、新功能以及性能改进的升级版本。有关 ClickHouse 新增内容的完整列表,请参阅我们的 [Cloud 变更日志](/whats-new/cloud)。 @@ -26,8 +25,6 @@ import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled 作为此次变更的一部分,在升级事件期间,历史系统表数据最多会被保留 30 天。此外,对于运行在 AWS 或 GCP 上的服务,所有早于 2024 年 12 月 19 日的系统表数据,以及对于运行在 Azure 上的服务,所有早于 2025 年 1 月 14 日的系统表数据,在迁移到新的组织层级时都不会被保留。 ::: - - ## 版本兼容性 {#version-compatibility} 当你创建服务时,在服务首次预配的时刻,[`compatibility`](/operations/settings/settings#compatibility) 设置会被设为当时 ClickHouse Cloud 所提供的最新 ClickHouse 版本。 @@ -36,16 +33,12 @@ import scheduled_upgrade_window from '@site/static/images/cloud/manage/scheduled 你无法在服务级别管理服务的默认 `compatibility` 设置。如果你希望更改服务默认 `compatibility` 设置所使用的版本,必须[联系技术支持](https://clickhouse.com/support/program)。不过,你可以在用户、角色、配置文件(profile)、查询或会话级别,通过标准的 ClickHouse 设置机制来覆盖 `compatibility` 设置,例如在会话中使用 `SET compatibility = '22.3'`,或在查询中使用 `SETTINGS compatibility = '22.3'`。 - - ## 维护模式 {#maintenance-mode} 在某些情况下,我们可能需要更新您的服务,这可能会要求我们暂时禁用某些功能,例如扩缩容或空闲休眠。在极少数情况下,我们可能需要对出现问题的服务采取措施,使其恢复到健康状态。在此类维护期间,您会在服务页面上看到一条横幅,显示 _"Maintenance in progress"_。在这段时间内,您通常仍然可以继续使用该服务进行查询。 在服务处于维护状态的这段时间内,我们不会向您收取费用。_维护模式_ 的出现非常罕见,不应与常规的服务升级相混淆。 - - ## 发布通道(升级计划) {#release-channels-upgrade-schedule} 用户可以通过订阅特定的发布通道来指定其 ClickHouse Cloud 服务的升级计划。共有三个发布通道,用户可以使用 **计划升级(scheduled upgrades)** 功能配置每周的升级日期和时间。 @@ -112,8 +105,6 @@ Basic 等级服务会在快速发布通道之后不久进行升级。 - 切换到更慢的通道不会将你的服务降级,并会让你保持当前版本,直到该通道中有更新的版本可用。例如:常规到慢速、快速到常规或慢速 ::: - - ## 计划升级 {#scheduled-upgrades} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/06_security.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/06_security.md index 0869e631416..49e10160a1b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/06_security.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/06_security.md @@ -7,16 +7,12 @@ doc_type: 'reference' keywords: ['安全', '云安全', '访问控制', '合规性', '数据保护'] --- - - # ClickHouse Cloud 安全性 {#clickhouse-cloud-security} 本文档详细介绍了用于保护 ClickHouse Cloud 组织和服务的安全选项和最佳实践。 ClickHouse 致力于提供安全的分析型数据库解决方案,因此保护数据和服务的完整性是重中之重。 本文所含信息涵盖多种方法,旨在帮助用户增强其 ClickHouse 环境的安全性。 - - ## 云控制台身份认证 {#cloud-console-auth} ### 密码认证 {#password-auth} @@ -49,8 +45,6 @@ ClickHouse Cloud 支持通过 Google 或 Microsoft 进行社交身份认证, 了解更多[API 认证](/cloud/manage/openapi)。 - - ## 数据库身份验证 {#database-auth} ### 数据库密码身份验证 {#db-password-auth} @@ -65,8 +59,6 @@ ClickHouse 数据库用户密码符合 NIST 800-63B 标准进行配置,长度 详细了解[SSH 身份验证](/cloud/security/manage-database-users#database-ssh)。 - - ## 访问控制 {#access-control} ### 控制台基于角色的访问控制(RBAC) {#console-rbac} @@ -81,8 +73,6 @@ ClickHouse 数据库支持通过用户授权实现细粒度的权限管理和基 了解更多关于 [数据库用户授权](/cloud/security/manage-database-users#database-permissions)。 - - ## 网络安全 {#network-security} ### IP 过滤器 {#ip-filters} @@ -97,8 +87,6 @@ ClickHouse 数据库支持通过用户授权实现细粒度的权限管理和基 详细了解 [私有网络连接](/cloud/security/connectivity/private-networking)。 - - ## 加密 {#encryption} ### 存储级加密 {#storage-encryption} @@ -119,8 +107,6 @@ ClickHouse Cloud Enterprise 客户可以使用自己的密钥进行数据库级 详细了解[客户管理加密密钥](/cloud/security/cmek#customer-managed-encryption-keys-cmek)。 - - ## 审计与日志记录 {#auditing-logging} ### 控制台审计日志 {#console-audit-log} @@ -141,8 +127,6 @@ ClickHouse Cloud Enterprise 客户可以使用自己的密钥进行数据库级 详细了解[BYOC 安全操作手册](/cloud/security/audit-logging/byoc-security-playbook)。 - - ## 合规性 {#compliance} ### 安全与合规报告 {#compliance-reports} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md index 4a03d453600..bf359539c6d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/advanced_dashboard.md @@ -23,7 +23,6 @@ import Image from '@theme/IdealImage'; 高级仪表盘同时适用于 ClickHouse OSS(开源软件)和 Cloud。本文将介绍如何在 Cloud 中使用高级仪表盘。 - ## 访问高级仪表板 {#accessing-the-advanced-dashboard} 可以通过以下路径访问高级仪表板: @@ -33,8 +32,6 @@ import Image from '@theme/IdealImage'; - - ## 访问原生高级仪表盘 {#accessing-the-native-advanced-dashboard} 可以通过以下路径访问原生高级仪表盘: @@ -51,8 +48,6 @@ import Image from '@theme/IdealImage'; - - ## 开箱即用的可视化 {#out-of-box-visualizations} Advanced Dashboard 中的默认图表旨在帮助你实时了解 ClickHouse 系统的运行状况。下面列出了每个图表及其说明,并分为三大类,便于浏览和查找。 @@ -86,8 +81,6 @@ Advanced Dashboard 中的默认图表旨在帮助你实时了解 ClickHouse 系 | OS CPU Usage (Userspace) | 运行用户态代码的 CPU 使用率 | | OS CPU Usage (Kernel) | 运行内核代码的 CPU 使用率 | - - ## ClickHouse Cloud 特有指标 {#clickhouse-cloud-specific} ClickHouse Cloud 使用对象存储(S3 类型)来保存数据。监控该接口有助于发现潜在问题。 @@ -106,8 +99,6 @@ ClickHouse Cloud 使用对象存储(S3 类型)来保存数据。监控该接 | Network receive bytes/sec | 跟踪当前入站网络流量速率 | | Concurrent network connections | 跟踪当前并发网络连接的数量 | - - ## 使用高级仪表板识别问题 {#identifying-issues-with-the-advanced-dashboard} 通过这种对 ClickHouse 服务健康状况的实时视图,可以在问题影响业务之前大大 @@ -204,7 +195,6 @@ read_rows: 150957260 tables: ['default.amazon_reviews_no_pk'] ``` - 第 2 行: ────── type: QueryFinish diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md index d63d197be9a..d21ab7b3649 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/features/07_monitoring/prometheus.md @@ -15,7 +15,6 @@ import prometheus_grafana_metrics_explorer from '@site/static/images/integration import prometheus_datadog from '@site/static/images/integrations/prometheus-datadog.png'; import Image from '@theme/IdealImage'; - # Prometheus 集成 {#prometheus-integration} 该功能支持集成 [Prometheus](https://prometheus.io/) 来监控 ClickHouse Cloud 服务。Prometheus 指标通过 [ClickHouse Cloud API](/cloud/manage/api/api-overview) 端点对外提供访问,用户可以安全连接并将指标导出到 Prometheus 指标采集器中。这些指标可以与仪表盘(例如 Grafana、Datadog)集成,用于可视化。 @@ -59,7 +58,6 @@ export SERVICE_ID= curl --silent --user $KEY_ID:$KEY_SECRET https://api.clickhouse.cloud/v1/organizations/$ORG_ID/services/$SERVICE_ID/prometheus?filtered_metrics=true ``` - ### 示例响应 {#sample-response} ```response @@ -187,7 +185,6 @@ scrape_configs: 请注意,必须将 `honor_labels` 配置参数设置为 `true`,才能正确填充实例标签。此外,上述示例中将 `filtered_metrics` 设置为 `true`,但实际应根据用户偏好进行配置。 - ## 与 Grafana 集成 {#integrating-with-grafana} 用户可以通过两种主要方式与 Grafana 集成: @@ -260,7 +257,6 @@ prometheus.remote_write "metrics_service" { 请注意,必须将 `honor_labels` 配置参数设置为 `true`,才能使 instance 标签被正确填充。 - ### 使用 Alloy 的自托管 Grafana {#grafana-self-managed-with-alloy} Grafana 自托管用户可以在[此处](https://grafana.com/docs/alloy/latest/get-started/install/)找到安装 Alloy agent 的说明。我们假定用户已经将 Alloy 配置为将 Prometheus 指标发送到所需的目标端点。下面的 `prometheus.scrape` 组件会让 Alloy 抓取 ClickHouse Cloud 端点。我们假定 `prometheus.remote_write` 会接收已抓取的指标。如果该目标不存在,请将 `forward_to key` 调整为实际的目标端点。 @@ -293,7 +289,6 @@ prometheus.scrape "clickhouse_cloud" { 请注意,需要将 `honor_labels` 配置参数设置为 `true`,才能正确填充实例(`instance`)标签。 - ## 集成 Datadog {#integrating-with-datadog} 可以使用 Datadog 的 [Agent](https://docs.datadoghq.com/agent/?tab=Linux) 和 [OpenMetrics 集成](https://docs.datadoghq.com/integrations/openmetrics/) 从 ClickHouse Cloud 端点采集指标。下面是该 Agent 和集成的一个简单示例配置。请注意,您可能只希望选择自己最关心的那部分指标。下面这个「兜底式」示例会导出成千上万种指标与实例的组合,Datadog 会将它们视为自定义指标。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md index abe31bebd37..697bc37d8ac 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/SQL_console/query-endpoints.md @@ -17,7 +17,6 @@ import endpoints_monitoring from '@site/static/images/cloud/sqlconsole/endpoints import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # 设置查询 API 端点 {#setting-up-query-api-endpoints} **Query API Endpoints** 功能允许您在 ClickHouse Cloud 控制台中直接基于任意已保存的 SQL 查询创建一个 API 端点。之后,您可以通过 HTTP 调用这些 API 端点来执行已保存的查询,而无需通过原生驱动连接到 ClickHouse Cloud 服务。 @@ -136,7 +135,6 @@ GET /query-endpoints/{queryEndpointId}/run POST /query-endpoints/{queryEndpointId}/run ``` - ### HTTP 方法 {#http-methods} | Method | Use Case | Parameters | @@ -252,7 +250,6 @@ POST /query-endpoints/{queryEndpointId}/run SELECT database, name AS num_tables FROM system.tables LIMIT 3; ``` - #### 版本 1 {#version-1} @@ -429,7 +426,6 @@ SELECT name, database FROM system.tables WHERE match(name, {tableNameRegex: Stri - ### 在查询变量中使用数组向表中插入数据的请求 {#request-with-array-in-the-query-variables-that-inserts-data-into-a-table} **表的 SQL:** @@ -493,7 +489,6 @@ INSERT INTO default.t_arr VALUES ({arr: Array(Array(Array(UInt32)))}); - ### 将 ClickHouse 设置 `max_threads` 为 8 的请求 {#request-with-clickhouse-settings-max_threads-set-to-8} **查询 API 端点的 SQL:** @@ -540,7 +535,6 @@ SELECT * FROM system.tables; - ### 以流的形式发送请求并解析响应` {#request-and-parse-the-response-as-a-stream} **查询 API 端点的 SQL:** @@ -611,7 +605,6 @@ SELECT name, database FROM system.tables; - ### 从文件向表中插入数据流 {#insert-a-stream-from-a-file-into-a-table} 创建文件 `./samples/my_first_table_2024-07-11.csv`,内容如下: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md index 034faa19293..b240512b523 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/backups/01_review-and-restore-backups.md @@ -17,21 +17,16 @@ import backup_usage from '@site/static/images/cloud/manage/backup-usage.png'; import backup_restore from '@site/static/images/cloud/manage/backup-restore.png'; import backup_service_provisioning from '@site/static/images/cloud/manage/backup-service-provisioning.png'; - # 查看和恢复备份 {#review-and-restore-backups} 本指南介绍 ClickHouse Cloud 中备份的工作机制、可用于为你的服务配置备份的选项,以及如何从备份中恢复数据。 - - ## 备份状态列表 {#backup-status-list} 无论是默认的每日计划,还是你选择的[自定义计划](/cloud/manage/backups/configurable-backups),你的服务都会按照设定的计划自动备份。所有可用的备份都可以在服务的 **Backups** 选项卡中查看。在这里,你可以看到备份的状态、耗时以及备份大小。你也可以通过 **Actions** 列来恢复特定的备份。 - - ## 了解备份成本 {#understanding-backup-cost} 根据默认策略,ClickHouse Cloud 要求每天执行一次备份,并保留 24 小时的数据。选择需要保留更多数据的备份计划,或导致更频繁备份的计划,可能会产生额外的备份存储费用。 @@ -50,8 +45,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup 请记住,随着服务中数据量随时间增长,备份的预估成本也会发生变化。 ::: - - ## 恢复备份 {#restore-a-backup} 备份会被恢复到一个新的 ClickHouse Cloud 服务中,而不是恢复到创建该备份的现有服务上。 @@ -64,8 +57,6 @@ import backup_service_provisioning from '@site/static/images/cloud/manage/backup - - ## 使用已恢复的服务 {#working-with-your-restored-service} 在完成一次备份恢复后,您将会拥有两个类似的服务:需要恢复的**原始服务**,以及从原始服务备份中恢复得到的新的**已恢复服务**。 @@ -147,7 +138,6 @@ FROM remoteSecure('source-hostname', db, table, 'exporter', 'password-here') 在成功将数据插入到原有服务后,请务必在该服务中验证数据。数据验证完成后,还应删除新服务。 - ## 恢复已删除的表 {#undeleting-or-undropping-tables} 通过 [Shared Catalog](https://clickhouse.com/docs/cloud/reference/shared-catalog),ClickHouse Cloud 支持使用 `UNDROP` 命令。 @@ -169,13 +159,10 @@ SYNC SETTINGS max_table_size_to_drop=2000000000000 -- 将限制增加至 2TB 旧版套餐:对于使用旧版套餐的客户,默认的每日备份保留 24 小时,其占用的存储空间已包含在存储费用中。 ::: - ## 可配置备份 {#configurable-backups} 如果您希望设置不同于默认备份计划的备份计划,请参阅[可配置备份](/cloud/manage/backups/configurable-backups)。 - - ## 将备份导出到您自己的云账户 {#export-backups-to-your-own-cloud-account} 如果您希望将备份导出到您自己的云账户,请参阅[此页面](/cloud/manage/backups/export-backups-to-own-cloud-account)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md index e0f8f592b00..b0e11487766 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/index.md @@ -9,7 +9,6 @@ doc_type: 'landing-page' import TableOfContents from '@site/i18n/zh/docusaurus-plugin-content-docs/current/best-practices/_snippets/_table_of_contents.md'; - # ClickHouse Cloud 中的最佳实践 {#best-practices-in-clickhouse-cloud} 本节介绍一些最佳实践,帮助您最大限度地发挥 ClickHouse Cloud 的价值。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md index ac7c40248e8..f6e3d36b27c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/best_practices/multitenancy.md @@ -91,7 +91,6 @@ GRANT user_role TO user_1 GRANT user_role TO user_2 ``` - 现在,你可以以 `user_1` 身份连接并运行一个简单的 select 查询。只会返回来自第一个租户的行。 ```sql @@ -108,7 +107,6 @@ FROM events └───────────┴──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## 独立表 {#separate-tables} 在这种方案中,每个租户的数据都存储在同一数据库内的独立表中,因此不再需要使用特定字段来标识租户。通过使用 [GRANT 语句](/sql-reference/statements/grant) 来实施用户访问控制,确保每个用户只能访问包含其所属租户数据的表。 @@ -201,7 +199,6 @@ FROM default.events_tenant_1 └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## 独立数据库 {#separate-databases} 每个租户的数据都存储在同一 ClickHouse 服务内的独立数据库中。 @@ -286,7 +283,6 @@ GRANT SELECT ON tenant_1.events TO user_1 GRANT SELECT ON tenant_2.events TO user_2 ``` - 现在,你可以以 `user_1` 身份连接,并在相应数据库中的 events 表上执行一个简单的 SELECT 查询。只会返回来自第一个租户的行。 ```sql @@ -303,7 +299,6 @@ FROM tenant_1.events └──────────────────────────────────────┴─────────────┴─────────────────────┴─────────┴─────────────────────────────────────────┘ ``` - ## 计算-计算分离 {#compute-compute-separation} 上述三种方法也可以通过使用 [Warehouses](/cloud/reference/warehouses#what-is-a-warehouse) 进一步隔离。数据存放在共享的对象存储中,但通过[计算-计算分离](/cloud/reference/warehouses#what-is-compute-compute-separation),每个租户都可以根据不同的 CPU/内存比拥有自己的独立计算服务。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md index 73377967efe..6d8f6768fa6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/cloud-compatibility.md @@ -7,14 +7,10 @@ keywords: ['ClickHouse Cloud', '兼容性'] doc_type: 'guide' --- - - # ClickHouse Cloud 兼容性指南 {#clickhouse-cloud-compatibility-guide} 本指南概述在 ClickHouse Cloud 中在功能和运维方面可以预期的行为和特性。虽然 ClickHouse Cloud 构建于开源 ClickHouse 发行版之上,但在架构和实现上可能存在一些差异。你可能会对这篇关于[我们如何构建 ClickHouse Cloud](https://clickhouse.com/blog/building-clickhouse-cloud-from-scratch-in-a-year) 的博客感兴趣,它可作为相关的背景阅读材料。 - - ## ClickHouse Cloud 架构 {#clickhouse-cloud-architecture} ClickHouse Cloud 大幅简化了运维开销,并降低了大规模运行 ClickHouse 的成本。无需预先规划部署规模、为高可用性配置复制、手动对数据进行分片、在工作负载增加时扩容服务器,或在空闲时缩容服务器——这一切都由 ClickHouse Cloud 代为处理。 @@ -26,8 +22,6 @@ ClickHouse Cloud 大幅简化了运维开销,并降低了大规模运行 Click - 针对间歇性工作负载的无缝休眠功能默认启用。系统会在一段时间无活动后自动暂停计算资源,并在有新查询到达时透明地重新启动它们,因此无需为闲置资源付费。 - 高级伸缩控制允许设置自动伸缩的最大值以进一步控制成本,或设置自动伸缩的最小值,为具有特殊性能要求的应用程序预留计算资源。 - - ## 能力 {#capabilities} ClickHouse Cloud 提供对开源版 ClickHouse 中一组精心筛选功能的访问。下文列出了当前在 ClickHouse Cloud 中被禁用的部分特性。 @@ -107,8 +101,6 @@ ClickHouse Cloud 支持 HTTPS、原生接口以及 [MySQL wire protocol](/interf [命名集合](/operations/named-collections) 目前在 ClickHouse Cloud 中尚不受支持。 - - ## 默认运行配置与注意事项 {#operational-defaults-and-considerations} 以下是 ClickHouse Cloud 服务的默认设置。在某些情况下,这些设置是固定的,以确保服务正确运行;在其他情况下,则可以进行调整。 @@ -131,8 +123,6 @@ ClickHouse Cloud 针对变化的工作负载进行了调优,因此目前大多 ### 高级安全管理 {#advanced-security-administration} 在创建 ClickHouse 服务的过程中,我们会创建一个默认数据库,以及一个对该数据库拥有广泛权限的默认用户。该初始用户可以创建其他用户,并为这些用户分配对此数据库的权限。除此之外,目前不支持在数据库中启用以下安全功能:使用 Kerberos、LDAP 或 SSL X.509 证书认证的安全机制。 - - ## 路线图 {#roadmap} 我们正在为 ClickHouse Cloud 引入对可执行 UDF 的支持,并评估对许多其他功能的需求。如果您有任何反馈或希望请求某个特定功能,请[在此提交](https://console.clickhouse.cloud/support)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md index 24ef284a9da..d61974b2699 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/data_sources/02_accessing-s3-data-securely.md @@ -14,7 +14,6 @@ import s3_output from '@site/static/images/cloud/security/secures3_output.jpg'; 本文演示 ClickHouse Cloud 客户如何利用基于角色的访问控制与 Amazon Simple Storage Service (S3) 完成身份验证,并安全访问其数据。 - ## 介绍 {#introduction} 在开始配置安全 S3 访问之前,了解其工作原理非常重要。下面概述了 ClickHouse 服务如何通过在客户的 AWS 账户中扮演一个角色来访问私有 S3 存储桶。 @@ -130,7 +129,6 @@ IAM 策略(请将 `{BUCKET_NAME}` 替换为您的存储桶名称): 4 - 在创建完成后复制新的 **IAM Role Arn**。这是访问你的 S3 bucket 所需的 Arn。 - ## 使用 ClickHouseAccess 角色访问你的 S3 bucket {#access-your-s3-bucket-with-the-clickhouseaccess-role} ClickHouse Cloud 新增了一个功能,允许你在 S3 表函数中指定 `extra_credentials`。下面是一个示例,展示如何使用上面新创建的角色来运行查询。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md index 6942d8eb6dc..114d287cf03 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/infrastructure/01_deployment_options/byoc/03_onboarding/01_aws.md @@ -16,7 +16,6 @@ import byoc_subnet_1 from '@site/static/images/cloud/reference/byoc-subnet-1.png import byoc_subnet_2 from '@site/static/images/cloud/reference/byoc-subnet-2.png'; import byoc_s3_endpoint from '@site/static/images/cloud/reference/byoc-s3-endpoint.png' - ## 接入流程 {#onboarding-process} 客户可以通过联系[我们](https://clickhouse.com/cloud/bring-your-own-cloud)来发起接入流程。客户需要准备一个专用的 AWS 账号,并确认将要使用的 Region。目前,我们仅允许用户在 ClickHouse Cloud 支持的 Region 中启动 BYOC 服务。 @@ -83,7 +82,6 @@ module "clickhouse_onboarding" {
-
@@ -169,8 +167,6 @@ module "clickhouse_onboarding" { 可选:在验证 peering 正常工作之后,您可以请求为 ClickHouse BYOC 删除公共负载均衡器。 - - ## 升级流程 {#upgrade-process} 我们会定期升级软件,包括 ClickHouse 数据库版本、ClickHouse Operator、EKS 以及其他组件。 @@ -181,8 +177,6 @@ module "clickhouse_onboarding" { 维护窗口不适用于安全补丁和漏洞修复。这类升级将作为周期外升级进行处理,并通过及时沟通协调合适的时间,从而将对运行的影响降至最低。 ::: - - ## CloudFormation IAM 角色 {#cloudformation-iam-roles} ### Bootstrap IAM 角色 {#bootstrap-iam-role} @@ -217,8 +211,6 @@ Bootstrap IAM 角色具有以下权限: 最后,**`data-plane-mgmt`** 允许一个 ClickHouse Cloud 控制平面组件对所需的自定义资源(例如 `ClickHouseCluster` 和 Istio Virtual Service/Gateway)进行协调(reconcile)。 - - ## 网络边界 {#network-boundaries} 本节介绍往返于客户 BYOC VPC 的不同网络流量: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md index 56cefcc72cb..78d0e154575 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/production-readiness.md @@ -7,8 +7,6 @@ keywords: ['生产就绪', '企业', 'saml', 'sso', 'terraform', '监控', '备 doc_type: 'guide' --- - - # ClickHouse Cloud 生产就绪指南 {#production-readiness} 适用于已完成快速入门指南且已有活跃服务并在持续接收数据的组织 @@ -23,8 +21,6 @@ doc_type: 'guide' - 验证备份流程并编写灾难恢复流程文档 ::: - - ## 简介 {#introduction} 你已经成功在业务工作负载中运行 ClickHouse Cloud。现在,你需要使你的部署进一步成熟,以满足企业级生产标准——无论是由于合规审计的触发、由未测试查询引发的生产事故,还是因为 IT 部门要求将其集成到公司系统中。 @@ -41,8 +37,6 @@ ClickHouse Cloud 的托管平台负责基础设施运维、自动扩缩容以及 本指南将逐一讲解上述各个方面,帮助你从一个可用的 ClickHouse Cloud 部署平滑过渡到企业级就绪的系统。 - - ## 环境策略 {#environment-strategy} 建立彼此独立的环境,以便在不影响生产工作负载的前提下安全测试变更。大多数生产事故都可以追溯到直接部署到生产系统、但未经过测试的查询或配置更改。 @@ -57,8 +51,6 @@ ClickHouse Cloud 的托管平台负责基础设施运维、自动扩缩容以及 **规模规划**:预发布服务的规格应尽量贴近生产负载特征。在明显更小的基础设施上进行测试,可能无法暴露资源争用或扩展性问题。通过定期数据刷新或生成合成数据,使用贴近生产的代表性数据集。关于如何为预发布环境进行规模规划并适当扩展服务,请参考 [Sizing and hardware recommendations](/guides/sizing-and-hardware-recommendations) 和 [Scaling in ClickHouse Cloud](/manage/scaling) 文档。这些资源提供了关于内存、CPU 和存储规模规划的实用建议,以及纵向和横向扩展选项的详细信息,帮助你使预发布环境尽可能贴近生产工作负载。 - - ## 私有网络 {#private-networking} ClickHouse Cloud 中的[私有网络](/cloud/security/connectivity/private-networking)功能允许将 ClickHouse 服务直接连接到您的云虚拟网络,确保数据不经过公共互联网传输。对于具有严格安全或合规性要求的组织,或在私有子网中运行应用程序的场景,这一点尤为重要。 @@ -71,8 +63,6 @@ ClickHouse Cloud 通过以下机制支持私有网络: 如果您需要更多技术细节或分步配置说明,可参阅各云服务商的链接文档,其中提供了完整的指南。 - - ## 企业级认证与用户管理 {#enterprise-authentication} 从基于控制台的用户管理迁移到企业级认证集成,是实现生产环境就绪的关键步骤。 @@ -103,8 +93,6 @@ ClickHouse Cloud 目前尚不支持通过身份提供方进行 SCIM 或自动化 进一步了解 [Cloud Access Management](/cloud/security/cloud_access_management) 和 [SAML SSO 设置](/cloud/security/saml-setup)。 - - ## 基础设施即代码与自动化 {#infrastructure-as-code} 通过采用基础设施即代码实践和 API 自动化来管理 ClickHouse Cloud,可以为您的部署配置提供一致性、版本控制和可复现性。 @@ -148,7 +136,6 @@ Terraform 提供程序支持服务开通、IP 访问列表和用户管理。请 API 认证采用与 Terraform 相同的基于 Token 的方式。完整的 API 参考与集成示例请参阅 [ClickHouse Cloud API](/cloud/manage/api/api-overview) 文档。 - ## 监控与运维集成 {#monitoring-integration} 将 ClickHouse Cloud 接入现有监控基础设施,可以确保可观测性并实现对问题的主动发现。 @@ -179,7 +166,6 @@ scrape_configs: 如需了解包含 Prometheus 和 Grafana 详细配置以及高级告警在内的完整设置,请参阅 [ClickHouse Cloud 可观测性指南](/use-cases/observability/cloud-monitoring#prometheus)。 - ## 业务连续性与支持集成 {#business-continuity} 建立备份校验流程并完成支持集成,可确保你的 ClickHouse Cloud 部署在发生故障时能够恢复,并在需要时获得帮助。 @@ -206,8 +192,6 @@ ClickHouse Cloud 提供带有可配置保留期的自动备份。根据合规性 详细了解 [ClickHouse Cloud 备份与恢复](/cloud/manage/backups/overview) 和 [支持服务](/about-us/support)。 - - ## 后续步骤 {#next-steps} 在完成本指南中的集成和相关操作后,请访问 [Cloud 资源导览](/cloud/get-started/cloud/resource-tour),查阅关于[监控](/cloud/get-started/cloud/resource-tour#monitoring)、[安全](/cloud/get-started/cloud/resource-tour#security)以及[成本优化](/cloud/get-started/cloud/resource-tour#cost-optimization)的指南。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md index 76eb407802d..974e1966376 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/03_manage-sql-console-role-assignments.md @@ -16,7 +16,6 @@ import step_5 from '@site/static/images/cloud/guides/sql_console/service_level_a import step_6 from '@site/static/images/cloud/guides/sql_console/service_level_access/6_service_settings.png' import step_7 from '@site/static/images/cloud/guides/sql_console/service_level_access/7_service_settings.png' - # 配置 SQL 控制台角色分配 {#configuring-sql-console-role-assignments} > 本指南介绍如何配置 SQL 控制台角色分配,这些分配决定整个控制台的访问权限,以及用户在 Cloud 控制台中可使用的功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md index 679ab0a037a..fb96cdff249 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_manage-database-users.md @@ -16,7 +16,6 @@ import user_grant_permissions_options from '@site/static/images/cloud/security/c SQL 控制台用户会为每个会话单独创建,并使用会自动轮换的 X.509 证书进行身份验证。会话终止时,该用户会被删除。在为审计生成访问列表时,请在控制台中进入相应服务的 Settings 选项卡,在记录数据库中现有数据库用户的同时,也一并记录 SQL 控制台访问情况。如果配置了自定义角色,用户的访问权限会列在以该用户用户名结尾的角色中。 - ## SQL 控制台用户和角色 {#sql-console-users-and-roles} 具有 Service Read Only 和 Service Admin 权限的用户可以被分配基本的 SQL 控制台角色。有关更多信息,请参阅 [管理 SQL 控制台角色分配](/cloud/guides/sql-console/manage-sql-console-role-assignments)。本指南演示如何为 SQL 控制台用户创建自定义角色。 @@ -52,8 +51,6 @@ GRANT database_developer TO `sql-console-role:my.user@domain.com`; - - ## 数据库身份验证 {#database-authentication} ### 数据库用户 ID 和密码 {#database-user-id--password} @@ -79,7 +76,6 @@ CREATE USER userName IDENTIFIED WITH sha256_hash BY 'hash'; 如需包含示例的详细操作说明,请参阅我们知识库中的[如何使用 SSH 密钥连接到 ClickHouse Cloud](/knowledgebase/how-to-connect-to-ch-cloud-using-ssh-keys)。 - ## 数据库权限 {#database-permissions} 在服务和数据库中使用 SQL [GRANT](/sql-reference/statements/grant) 语句配置以下内容。 @@ -160,7 +156,6 @@ GRANT default_role to userID; e. 点击显示具有该服务数据库访问权限用户数量的链接 `There are # users with access to this service.`,以查看用户列表。 - ## Warehouse users {#warehouse-users} Warehouse 用户在同一 Warehouse 内由各服务共享。有关详细信息,请参阅 [Warehouse 访问控制](/cloud/reference/warehouses#access-controls)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md index 9d7096cbc51..dc56fb8dc1c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/01_cloud_access_management/04_saml-sso-setup.md @@ -15,7 +15,6 @@ import samlAzureApp from '@site/static/images/cloud/security/saml-azure-app.png' import samlAzureClaims from '@site/static/images/cloud/security/saml-azure-claims.png'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' - # 配置 SAML SSO {#saml-sso-setup} @@ -24,16 +23,12 @@ ClickHouse Cloud 通过安全断言标记语言(SAML)支持单点登录(SS 我们目前支持由服务提供者发起的 SSO、通过独立连接接入的多个组织,以及即时(Just-in-time,JIT)预配。我们尚不支持跨域身份管理系统(SCIM)或属性映射功能。 - - ## 开始之前 {#before-you-begin} 你需要在你的 IdP 中拥有管理员(Admin)权限,并在 ClickHouse Cloud 组织中具有 **Admin** 角色。在你在 IdP 中完成连接配置后,请按照下文步骤中所需的信息联系我们,以完成整个流程。 我们建议在配置 SAML 连接的同时,额外设置一个**指向你组织的直接链接**,以简化登录流程。不同 IdP 的具体配置方式各不相同。请继续阅读,了解如何在你的 IdP 中完成这些操作。 - - ## 如何配置 IdP {#how-to-configure-your-idp} ### 步骤 {#steps} @@ -148,8 +143,6 @@ ClickHouse Cloud 通过安全断言标记语言(SAML)支持单点登录(SS - -
{" "} @@ -254,7 +247,6 @@ ClickHouse Cloud 通过安全断言标记语言(SAML)支持单点登录(SS | App attributes | email | 12. 点击 **Finish**。 - 14. 要启用应用程序,请点击 **OFF** 将所有人的设置更改为 **ON**。也可以通过选择屏幕左侧的选项将访问权限限制为特定组或组织单位。
@@ -339,7 +331,6 @@ Azure (Microsoft) SAML 也可称为 Azure Active Directory (AD) 或 Microsoft En - ## 工作原理 {#how-it-works} ### 使用 SAML SSO 的用户管理 {#user-management-with-saml-sso} @@ -354,8 +345,6 @@ Azure (Microsoft) SAML 也可称为 Azure Active Directory (AD) 或 Microsoft En ClickHouse Cloud 通过为每个组织提供单独的连接来支持多组织 SSO。使用直接链接(`https://console.clickhouse.cloud/?connection={organizationid}`)登录到各自的组织。在登录另一个组织之前,请务必先从当前组织注销。 - - ## 附加信息 {#additional-information} 在身份验证方面,安全性是我们的首要任务。基于这一点,在实现 SSO 时我们做出了一些决策,需要提前告知您。 @@ -364,8 +353,6 @@ ClickHouse Cloud 通过为每个组织提供单独的连接来支持多组织 SS - **我们不会自动关联 SSO 与非 SSO 账户。** 即使用户使用相同的电子邮件地址,您在 ClickHouse 用户列表中也可能会看到他们的多个账户。 - - ## 常见问题排查 {#troubleshooting-common-issues} | 错误 | 原因 | 解决方案 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md index 6d524bc4f0b..3b2675ea204 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/01_setting-ip-filters.md @@ -11,7 +11,6 @@ import Image from '@theme/IdealImage'; import ip_filtering_after_provisioning from '@site/static/images/cloud/security/ip-filtering-after-provisioning.png'; import ip_filter_add_single_ip from '@site/static/images/cloud/security/ip-filter-add-single-ip.png'; - ## 设置 IP 过滤器 {#setting-ip-filters} IP 访问列表通过指定允许连接的源地址来过滤到 ClickHouse 服务或使用 API 密钥的流量。可以为每个服务和每个 API 密钥分别配置这些列表。列表既可以在创建服务或 API 密钥时配置,也可以在之后进行配置。 @@ -20,16 +19,12 @@ IP 访问列表通过指定允许连接的源地址来过滤到 ClickHouse 服 如果在创建 ClickHouse Cloud 服务时跳过 IP 访问列表的创建,那么将不允许任何流量访问该服务。如果 ClickHouse 服务的 IP 访问列表设置为 `Allow from anywhere`,互联网爬虫和扫描器在查找公共 IP 时,可能会周期性地将您的服务从空闲状态切换为活动状态,从而产生少量意料之外的费用。 ::: - - ## 准备 {#prepare} 在开始之前,先收集需要添加到访问列表中的 IP 地址或地址段。请将远程办公人员、值班地点、VPN 等访问来源一并考虑在内。IP 访问列表的用户界面同时支持单个地址和 CIDR 表示法。 无类别域间路由(Classless Inter-domain Routing,CIDR)表示法允许你指定比传统 A、B 或 C 类(8、6 或 24)子网掩码长度更小的 IP 地址范围。[ARIN](https://account.arin.net/public/cidrCalculator) 和其他一些组织提供了 CIDR 计算器可供使用,如果你想了解更多关于 CIDR 表示法的信息,请参阅 [Classless Inter-domain Routing (CIDR)](https://www.rfc-editor.org/rfc/rfc4632.html) RFC。 - - ## 创建或修改 IP 访问列表 {#create-or-modify-an-ip-access-list} :::note 仅适用于未通过 PrivateLink 的连接 @@ -90,8 +85,6 @@ IP 访问列表仅适用于来自公共互联网、即 [PrivateLink](/cloud/secu 要应用所做的更改,必须点击 **Save**。 - - ## 验证 {#verification} 创建过滤器后,先在允许的范围内确认可以连接到某个服务,再确认来自该范围之外的连接会被拒绝。可以使用一个简单的 `curl` 命令进行验证: @@ -118,7 +111,6 @@ curl https://.clickhouse.cloud:8443 Ok. ``` - ## 限制 {#limitations} - 目前,IP 访问列表仅支持 IPv4 地址 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md index 16218b2d2bf..5aba198c888 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/02_aws-privatelink.md @@ -18,7 +18,6 @@ import pe_remove_private_endpoint from '@site/static/images/cloud/security/pe-re import aws_private_link_pe_filters from '@site/static/images/cloud/security/aws-privatelink-pe-filters.png'; import aws_private_link_ped_nsname from '@site/static/images/cloud/security/aws-privatelink-pe-dns-name.png'; - # AWS PrivateLink {#aws-privatelink} @@ -69,15 +68,11 @@ ClickHouse Cloud 在以下区域支持 [跨区域 PrivateLink](https://aws.amazo 您可以在[此处](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)找到 Terraform 示例。 - - ## 重要注意事项 {#considerations} ClickHouse 会尝试对您的服务进行分组,以便在同一 AWS 区域内复用同一个已发布的[服务端点](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html#endpoint-service-overview)。但是,并不能保证一定会完成这种分组,尤其是在您将服务分散在多个 ClickHouse 组织中的情况下。 如果您已经在 ClickHouse 组织中为其他服务配置了 PrivateLink,那么通常可以跳过大部分步骤,直接进行最后一步:将 ClickHouse “Endpoint ID” 添加到 ClickHouse 服务允许列表中。 - - ## 本流程的前提条件 {#prerequisites} 在开始之前,需要准备: @@ -85,8 +80,6 @@ ClickHouse 会尝试对您的服务进行分组,以便在同一 AWS 区域内 1. AWS 账户。 1. 具有在 ClickHouse 端创建和管理私有端点所需权限的 [ClickHouse API key](/cloud/manage/openapi)。 - - ## 步骤 {#steps} 按照以下步骤,通过 AWS PrivateLink 连接您的 ClickHouse Cloud 服务。 @@ -179,7 +172,6 @@ jq .result #### 选项 2:AWS CloudFormation {#option-2-aws-cloudformation} - 接下来,需要使用在[获取 Endpoint "Service name"](#obtain-endpoint-service-info) 步骤中获得的 `Service name`console 或 `endpointServiceId`API 来创建 VPC Endpoint。 请确保使用正确的子网 ID、安全组和 VPC ID。 @@ -282,7 +274,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" \ -d @pl_config.json | jq ``` - 要从允许列表中移除某个端点 ID: ```bash @@ -344,7 +335,6 @@ jq .result 在此示例中,使用 `privateDnsHostname` 主机名发起的连接将通过 PrivateLink 路由,而使用 `endpointServiceId` 主机名发起的连接将通过 Internet 路由。 - ## 故障排查 {#troubleshooting} ### 在同一区域中使用多个 PrivateLink {#multiple-privatelinks-in-one-region} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md index 88b72301881..817f18bb9f8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/03_gcp-private-service-connect.md @@ -21,7 +21,6 @@ import gcp_pe_remove_private_endpoint from '@site/static/images/cloud/security/g import gcp_privatelink_pe_filters from '@site/static/images/cloud/security/gcp-privatelink-pe-filters.png'; import gcp_privatelink_pe_dns from '@site/static/images/cloud/security/gcp-privatelink-pe-dns.png'; - # Private Service Connect {#private-service-connect} @@ -50,16 +49,12 @@ Private Service Connect(PSC)是 Google Cloud 的一项网络功能,允许 1. 将“Endpoint ID”添加到 ClickHouse Cloud 服务。 1. 将“Endpoint ID”添加到 ClickHouse 服务允许列表。 - - ## 注意 {#attention} ClickHouse 会尝试对您的服务进行分组,以便在同一 GCP 区域内复用同一个已发布的 [PSC 端点](https://cloud.google.com/vpc/docs/private-service-connect)。但是,这种分组并不能得到保证,尤其是在您将服务分散到多个 ClickHouse 组织时。 如果您已经在 ClickHouse 组织中为其他服务配置了 PSC,得益于这种分组,通常可以跳过大部分步骤,直接进入最后一步:[将“Endpoint ID”添加到 ClickHouse 服务允许列表](#add-endpoint-id-to-services-allow-list)。 可以在[这里](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)找到 Terraform 示例。 - - ## 开始之前 {#before-you-get-started} :::note @@ -98,7 +93,6 @@ jq ".result[] | select (.region==\"${REGION:?}\" and .provider==\"${PROVIDER:?}\ * 你可以[创建一个新密钥](/cloud/manage/openapi)或使用现有密钥。 ::: - ## 获取用于 Private Service Connect 的 GCP 服务附件和 DNS 名称 {#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect} ### 选项 1:ClickHouse Cloud 控制台 {#option-1-clickhouse-cloud-console} @@ -125,7 +119,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud 请记录下 `endpointServiceId` 和 `privateDnsHostname`,在接下来的步骤中你将会用到它们。 - ## 创建服务端点 {#create-service-endpoint} :::important @@ -218,7 +211,6 @@ output "psc_connection_id" { 使用在[获取用于 Private Service Connect 的 GCP 服务附件](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)步骤中获得的 `endpointServiceId`API 或 `Service name`console ::: - ## 为端点设置私有 DNS 名称 {#set-private-dns-name-for-endpoint} :::note @@ -227,8 +219,6 @@ output "psc_connection_id" { 您需要将在[获取用于 Private Service Connect 的 GCP 服务附件和 DNS 名称](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)步骤中得到的 DNS 名称指向 GCP Private Service Connect 端点的 IP 地址。这样可以确保您的 VPC/网络中的服务和组件能够正确解析该地址。 - - ## 将 Endpoint ID 添加到 ClickHouse Cloud 组织 {#add-endpoint-id-to-clickhouse-cloud-organization} ### 选项 1:ClickHouse Cloud 控制台 {#option-1-clickhouse-cloud-console-1} @@ -288,7 +278,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## 将 "Endpoint ID" 添加到 ClickHouse 服务允许列表 {#add-endpoint-id-to-services-allow-list} 您需要为每个需要通过 Private Service Connect 访问的实例,将一个 Endpoint ID 添加到其允许列表中。 @@ -343,7 +332,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## 使用 Private Service Connect 访问实例 {#accessing-instance-using-private-service-connect} 每个启用了 Private Link 的服务都有一个公共端点和私有端点。要通过 Private Link 进行连接,您需要使用私有端点,该端点对应于在[获取用于 Private Service Connect 的 GCP 服务附件](#obtain-gcp-service-attachment-and-dns-name-for-private-service-connect)中获得的 `privateDnsHostname`。 @@ -371,7 +359,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud 在此示例中,对主机名 `xxxxxxx.yy-xxxxN.p.gcp.clickhouse.cloud` 的连接会被路由到 Private Service Connect。与此同时,`xxxxxxx.yy-xxxxN.gcp.clickhouse.cloud` 的连接则会通过互联网进行路由。 - ## 故障排查 {#troubleshooting} ### 测试 DNS 设置 {#test-dns-setup} @@ -404,7 +391,6 @@ DNS_NAME - 使用在[获取用于 Private Service Connect 的 GCP service at openssl s_client -connect ${DNS_NAME}:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -447,7 +433,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: appl 要实现这一点,请配置 GCP VPC 防火墙规则,允许从 ClickHouse Cloud 访问你的内部/私有数据库服务。请查看 [ClickHouse Cloud 各区域的默认出站 IP 地址](/manage/data-sources/cloud-endpoints-api),以及[可用的静态 IP 地址](https://api.clickhouse.cloud/static-ips.json)。 - ## 更多信息 {#more-information} 如需了解更多详细信息,请参阅 [cloud.google.com/vpc/docs/configure-private-service-connect-services](https://cloud.google.com/vpc/docs/configure-private-service-connect-services)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md index f07e2beb5b6..14000c4475b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/02_connectivity/private_networking/04_azure-privatelink.md @@ -27,7 +27,6 @@ import azure_pe_remove_private_endpoint from '@site/static/images/cloud/security import azure_privatelink_pe_filter from '@site/static/images/cloud/security/azure-privatelink-pe-filter.png'; import azure_privatelink_pe_dns from '@site/static/images/cloud/security/azure-privatelink-pe-dns.png'; - # Azure Private Link {#azure-private-link} @@ -54,16 +53,12 @@ Azure 通过 Private Link 支持跨区域连接。这使您能够在部署了 Cl ClickHouse Cloud Azure Private Link 已从使用 resourceGUID 切换为使用 Resource ID 筛选器。您仍然可以使用 resourceGUID(其具有向后兼容性),但我们建议切换到 Resource ID 筛选器。要迁移,只需使用 Resource ID 创建新的终结点,将其关联到服务,然后移除旧的基于 resourceGUID 的终结点。 ::: - - ## 注意 {#attention} ClickHouse 会尝试对您的服务进行分组,以便在同一 Azure 区域内复用同一个已发布的 [Private Link 服务](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview)。但无法保证始终能够实现这种分组,尤其是在您将服务分散到多个 ClickHouse 组织时。 如果您已经在 ClickHouse 组织中的其他服务上配置了 Private Link,那么通常可以利用这一分组跳过大部分步骤,直接进行最后一步:[将 Private Endpoint Resource ID 添加到服务的允许列表](#add-private-endpoint-id-to-services-allow-list)。 您可以在 ClickHouse 的 [Terraform Provider 仓库](https://github.com/ClickHouse/terraform-provider-clickhouse/tree/main/examples/)中找到 Terraform 示例。 - - ## 获取用于 Private Link 的 Azure 连接别名 {#obtain-azure-connection-alias-for-private-link} ### 选项 1:ClickHouse Cloud 控制台 {#option-1-clickhouse-cloud-console} @@ -109,7 +104,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud 记录下 `endpointServiceId`。您将在下一步中用到它。 - ## 在 Azure 中创建专用终结点 {#create-private-endpoint-in-azure} :::important @@ -216,7 +210,6 @@ resource "azurerm_private_endpoint" "example_clickhouse_cloud" { 专用终结点资源 ID 可在 Azure 门户中查看。打开在上一步中创建的专用终结点,然后单击 **JSON 视图**: - 在 properties 属性中找到 `id` 字段,并复制其值: @@ -229,8 +222,6 @@ resource "azurerm_private_endpoint" "example_clickhouse_cloud" { - - ## 为 Private Link 配置 DNS {#setting-up-dns-for-private-link} 您需要创建一个专用 DNS 区域 (`${location_code}.privatelink.azure.clickhouse.cloud`),并将其关联到您的虚拟网络 (VNet),以便通过 Private Link 访问资源。 @@ -310,7 +301,6 @@ nslookup xxxxxxxxxx.westus3.privatelink.azure.clickhouse.cloud. 地址:10.0.0.4 ``` - ## 将专用终结点资源 ID 添加到你的 ClickHouse Cloud 组织 {#add-the-private-endpoint-id-to-your-clickhouse-cloud-organization} ### 选项 1:ClickHouse Cloud 控制台 {#option-1-clickhouse-cloud-console-1} @@ -379,7 +369,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}" -d @pl_config_org.json ``` - ## 将 Private Endpoint Resource ID 添加到服务的允许列表 {#add-private-endpoint-id-to-services-allow-list} 默认情况下,即使 Private Link 连接已获批准并建立,ClickHouse Cloud 服务也无法通过 Private Link 连接访问。你需要为每个需要通过 Private Link 访问的服务显式添加对应的 Private Endpoint Resource ID。 @@ -443,7 +432,6 @@ EOF curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X PATCH -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" -d @pl_config.json | jq ``` - ## 使用 Private Link 访问 ClickHouse Cloud 服务 {#access-your-clickhouse-cloud-service-using-private-link} 每个启用了 Private Link 的服务都具有一个公共端点和一个私有端点。要通过 Private Link 进行连接,您需要使用私有端点,即从[获取用于 Private Link 的 Azure 连接别名](#obtain-azure-connection-alias-for-private-link)中取得的 `privateDnsHostname`API 或 `DNS name`console。 @@ -486,7 +474,6 @@ curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" "https://api.clickhouse.cloud 使用 `privateDnsHostname` 通过 Private Link 连接到您的 ClickHouse Cloud 服务。 - ## 故障排除 {#troubleshooting} ### 测试 DNS 配置 {#test-dns-setup} @@ -525,7 +512,6 @@ OpenSSL 应该能够建立连接(在输出中可以看到 CONNECTED)。`errn openssl s_client -connect abcd.westus3.privatelink.azure.clickhouse.cloud:9440 ``` - ```response # highlight-next-line {#highlight-next-line} CONNECTED(00000003) @@ -564,7 +550,6 @@ INSTANCE_ID=<实例 ID> curl --silent --user "${KEY_ID:?}:${KEY_SECRET:?}" -X GET -H "Content-Type: application/json" "https://api.clickhouse.cloud/v1/organizations/${ORG_ID:?}/services/${INSTANCE_ID:?}" | jq .result.privateEndpointIds ``` - ## 更多信息 {#more-information} 如需了解有关 Azure Private Link 的更多信息,请访问 [azure.microsoft.com/en-us/products/private-link](https://azure.microsoft.com/en-us/products/private-link)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md index a8d55015c6f..1cc17aee501 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/04_cmek.md @@ -11,11 +11,8 @@ import Image from '@theme/IdealImage'; import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge' import cmek_performance from '@site/static/images/_snippets/cmek-performance.png'; - # 数据加密 {#data-encryption} - - ## 存储层加密 {#storage-encryption} ClickHouse Cloud 默认启用静态数据加密,使用由云服务提供商管理的 AES-256 密钥。更多信息请参阅: @@ -23,8 +20,6 @@ ClickHouse Cloud 默认启用静态数据加密,使用由云服务提供商管 - [GCP 默认静态数据加密](https://cloud.google.com/docs/security/encryption/default-encryption) - [Azure 存储中静态数据的加密](https://learn.microsoft.com/en-us/azure/storage/common/storage-service-encryption) - - ## 数据库级加密 {#database-encryption} @@ -111,16 +106,12 @@ TDE 必须在创建服务时启用。现有服务在创建后无法再启用加 #### KMS 密钥轮询器 {#kms-key-poller} - - 在使用 CMEK 时,系统每 10 分钟检查一次所提供的 KMS 密钥是否仍然有效。如果对该 KMS 密钥的访问权限失效,ClickHouse 服务将会停止运行。要恢复服务,请先按照本指南中的步骤恢复对 KMS 密钥的访问,然后重新启动服务。 ### 备份和恢复 {#backup-and-restore} 备份会使用与关联服务相同的密钥进行加密。当你恢复一个已加密的备份时,会创建一个新的加密实例,并使用与原始实例相同的 KMS 密钥。如果需要,你可以在恢复后轮换 KMS 密钥;更多详情请参见 [密钥轮换](#key-rotation)。 - - ## 性能 {#performance} 数据库加密功能使用 ClickHouse 内置的[用于数据加密的虚拟文件系统功能](/operations/storing-data#encrypted-virtual-file-system)来对数据进行加密和保护。该功能使用的算法为 `AES_256_CTR`,预计会根据具体工作负载带来 5–15% 的性能损耗: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md index 44c6a350e8d..1e98cc93d71 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/02_database-audit-log.md @@ -7,8 +7,6 @@ doc_type: 'guide' keywords: ['审计日志', '数据库日志', '合规', '安全', '监控'] --- - - # 数据库审计日志 {#database-audit-log} ClickHouse 默认提供数据库审计日志。本页重点介绍与安全相关的日志。有关系统记录的数据的更多信息,请参阅 [系统表(system tables)](/operations/system-tables/overview) 文档。 @@ -17,8 +15,6 @@ ClickHouse 默认提供数据库审计日志。本页重点介绍与安全相关 相关信息会直接记录到系统表中,默认保留期最长为 30 天。该时长可能更长或更短,并会受到系统合并(merge)频率的影响。用户可以采取额外措施以更长时间存储日志,或将日志导出到安全信息和事件管理(SIEM)系统进行长期存储。详见下文。 ::: - - ## 与安全相关的日志 {#security-relevant-logs} ClickHouse 主要将与安全相关的数据库事件记录在 session 日志和 query 日志中。 @@ -53,13 +49,10 @@ FROM clusterAllReplicas('default', system.query_log) WHERE user=’compromised_account’ ``` - ## 在服务内部保留日志数据 {#reatining-log-data-within-services} 需要更长时间保留或更高日志持久性的客户可以使用物化视图来实现这些目标。有关物化视图的更多信息,包括概念、优势以及实现方式,请参阅我们关于[物化视图](/materialized-views)的视频和文档。 - - ## 导出日志 {#exporting-logs} 可以采用多种与 SIEM 系统兼容的格式,将系统日志写入或导出到存储位置。有关更多信息,请参阅我们的[表函数](/sql-reference/table-functions)文档。最常见的方法包括: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md index 1da27dec0f8..622592f2ac7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/guides/security/05_audit_logging/03_byoc-security-playbook.md @@ -7,20 +7,14 @@ doc_type: '指南' keywords: ['byoc', 'security', 'playbook', 'best practices', 'compliance'] --- - - # BYOC 安全作战手册 {#byoc-security-playbook} ClickHouse 在“自带云”(Bring Your Own Cloud,BYOC)模式下采用共享责任安全模型。该模型可从我们的信任中心(https://trust.clickhouse.com)下载。以下信息提供给 BYOC 客户,用作识别潜在安全事件的示例。客户应结合自身的安全计划来参考这些信息,以判断是否需要额外的检测和告警。 - - ## 可能已泄露的 ClickHouse 凭证 {#compromised-clickhouse-credentials} 请参阅 [数据库审计日志](/cloud/security/audit-logging/database-audit-log) 文档,了解用于检测基于凭证的攻击和排查恶意活动的查询。 - - ## 应用层拒绝服务攻击 {#application-layer-dos-attack} 发起拒绝服务(DoS)攻击的方法有多种。如果攻击的目标是通过特定 payload 使 ClickHouse 实例崩溃,请将系统恢复到运行状态,或重启系统并限制访问以重新获得控制权。使用以下查询查看 [system.crash_log](/operations/system-tables/crash_log),以获取有关此次攻击的更多信息。 @@ -30,15 +24,12 @@ SELECT * FROM clusterAllReplicas('default',system.crash_log) ``` - ## 遭入侵的 ClickHouse 创建的 AWS 角色 {#compromised-clickhouse-created-aws-roles} ClickHouse 使用预先创建的角色来实现系统功能。本节假设客户在 AWS 上启用了 CloudTrail,并且能够访问 CloudTrail 日志。 如果某个安全事件可能是由于角色被入侵导致的,请在 CloudTrail 和 CloudWatch 中审阅与 ClickHouse IAM 角色及其相关操作有关的活动。有关 IAM 角色列表,请参考作为部署设置一部分提供的 [CloudFormation](/cloud/reference/byoc/onboarding/aws#cloudformation-iam-roles) 堆栈或 Terraform 模块。 - - ## 未经授权访问 EKS 集群 {#unauthorized-access-eks-cluster} ClickHouse BYOC 在 EKS 中运行。本节假设客户在 AWS 中使用 CloudTrail 和 CloudWatch,并且可以访问相应日志。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md index 30fe9dd76c8..a9c8c088745 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md @@ -15,7 +15,6 @@ import rta_3 from '@site/static/images/cloud/onboard/discover/use_cases/3_rta.pn - - ## ClickHouse Cloud 目标 {#clickhouse-cloud-destination} 请参阅 Fivetran 官网上的官方文档: @@ -51,8 +46,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [ClickHouse 目标概览](https://fivetran.com/docs/destinations/clickhouse) - [ClickHouse 目标设置指南](https://fivetran.com/docs/destinations/clickhouse/setup-guide) - - ## 联系我们 {#contact-us} 如果您有任何问题,或希望提出新功能需求,请提交[支持工单](/about-us/support)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md index e05292332fa..c35ff80ed4e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/etl-tools/nifi-and-clickhouse.md @@ -30,7 +30,6 @@ import nifi14 from '@site/static/images/integrations/data-ingestion/etl-tools/ni import nifi15 from '@site/static/images/integrations/data-ingestion/etl-tools/nifi_15.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Apache NiFi 连接到 ClickHouse {#connect-apache-nifi-to-clickhouse} @@ -42,27 +41,20 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - ## 准备连接信息 {#1-gather-your-connection-details} - - ## 下载并运行 Apache NiFi {#2-download-and-run-apache-nifi} 对于全新部署,请从 https://nifi.apache.org/download.html 下载二进制文件,并通过运行 `./bin/nifi.sh start` 来启动 - - ## 下载 ClickHouse JDBC 驱动程序 {#3-download-the-clickhouse-jdbc-driver} 1. 访问 GitHub 上的 ClickHouse JDBC 驱动程序发布页面,并找到最新的 JDBC 发行版本 2. 在该版本的发布内容中,点击 “Show all xx assets”,然后查找文件名中包含关键字 “shaded” 或 “all” 的 JAR 文件,例如 `clickhouse-jdbc-0.5.0-all.jar` 3. 将该 JAR 文件放置在 Apache NiFi 可访问的文件夹中,并记录其绝对路径 - - ## 添加 `DBCPConnectionPool` Controller Service 并配置其属性 {#4-add-dbcpconnectionpool-controller-service-and-configure-its-properties} 1. 要在 Apache NiFi 中配置 Controller Service,点击“齿轮”按钮打开 NiFi Flow Configuration 页面 @@ -107,8 +99,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 使用 `ExecuteSQL` 处理器从表中读取数据 {#5-read-from-a-table-using-the-executesql-processor} 1. 添加一个 `ExecuteSQL` 处理器,并配置相应的上游和下游处理器 @@ -134,8 +124,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 使用 `MergeRecord` 和 `PutDatabaseRecord` 处理器写入表 {#6-write-to-a-table-using-mergerecord-and-putdatabaserecord-processor} 1. 要在单次插入中写入多行数据,首先需要将多条记录合并为单条记录。可以使用 `MergeRecord` 处理器来完成此操作 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md index bf23b97b44b..d85588147c8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/gcs/index.md @@ -13,7 +13,6 @@ import Image from '@theme/IdealImage'; import GCS_examine_bucket_1 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-1.png'; import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestion/s3/GCS-examine-bucket-2.png'; - # 将 Google Cloud Storage 与 ClickHouse 集成 {#integrate-google-cloud-storage-with-clickhouse} :::note @@ -22,8 +21,6 @@ import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestio 我们认识到,对于希望实现存储与计算分离的用户而言,GCS 是一个颇具吸引力的存储解决方案。为此,ClickHouse 支持在 MergeTree 引擎中使用 GCS 作为底层存储。这使用户能够同时利用 GCS 的可扩展性和成本优势,以及 MergeTree 引擎的写入和查询性能。 - - ## 基于 GCS 的 MergeTree {#gcs-backed-mergetree} ### 创建磁盘 {#creating-a-disk} @@ -140,7 +137,6 @@ import GCS_examine_bucket_2 from '@site/static/images/integrations/data-ingestio 与此磁盘配置相关的设置完整列表可在[此处](/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3)中找到。 - ### 创建表 {#creating-a-table} 假设你已经将磁盘配置为使用具有写权限的存储桶,现在应该可以创建如下示例中的表。为简洁起见,我们只使用 NYC taxi 数据集中的部分列,并将数据直接流式写入由 GCS 作为后端存储的表中: @@ -189,7 +185,6 @@ SELECT passenger_count, avg(tip_amount) AS avg_tip, avg(total_amount) AS avg_amo 有关线程调优的更多信息,请参阅[性能优化](../s3/index.md#s3-optimizing-performance)。 - ## 使用 Google Cloud Storage (GCS) {#gcs-multi-region} :::tip @@ -257,8 +252,6 @@ ClickHouse Keeper 至少需要两个节点才能工作,因此为了实现高 - 将文件复制到相应位置(在每个 Keeper 服务器上为 `/etc/clickhouse-keeper/keeper_config.xml`) - 在每台机器上根据其在 `raft_configuration` 中的条目序号编辑对应的 `server_id` - - ```xml title=/etc/clickhouse-keeper/keeper_config.xml @@ -352,7 +345,6 @@ ClickHouse Keeper 至少需要两个节点才能工作,因此为了实现高 * 根据你的主机名编辑该文件,并确保这些主机名可以从 ClickHouse 服务器节点正确解析 - ```xml title=/etc/clickhouse-server/config.d/remote-servers.xml @@ -452,7 +444,6 @@ sudo systemctl status clickhouse-keeper 通过 `netcat` 向 ClickHouse Keeper 发送命令。例如,`mntr` 会返回 ClickHouse Keeper 集群的状态。如果你在每个 Keeper 节点上执行该命令,你会看到其中一个是 leader,另外两个是 follower: - ```bash echo mntr | nc localhost 9181 ``` @@ -561,7 +552,6 @@ is_broken: 0 cache_path: ``` - 3 行数据,耗时 0.002 秒。 ```` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md index 1e20936a9ce..de8343eda3a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/dataflow.md @@ -10,7 +10,6 @@ keywords: ['Google Dataflow ClickHouse', 'Dataflow ClickHouse integration', 'Apa import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 将 Google Dataflow 与 ClickHouse 集成 {#integrating-google-dataflow-with-clickhouse} @@ -21,8 +20,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [Java 运行器](#1-java-runner) - [预定义模板](#2-predefined-templates) - - ## Java runner {#1-java-runner} [Java runner](./java-runner) 允许用户使用集成了 `ClickHouseIO` 的 Apache Beam SDK 来实现自定义 Dataflow 管道。该方法为 pipeline 逻辑提供了高度的灵活性和控制力,使用户能够根据特定需求定制 ETL 流程。 不过,该选项需要具备 Java 编程知识,并且熟悉 Apache Beam 框架。 @@ -32,8 +29,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - 适用于复杂或高级用例。 - 需要编写代码并理解 Beam API。 - - ## 预定义模板 {#2-predefined-templates} ClickHouse 提供了针对特定使用场景设计的[预定义模板](./templates),例如将数据从 BigQuery 导入到 ClickHouse。这些模板开箱即用,可简化集成过程,非常适合偏好无代码解决方案的用户。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md index efd68be8a6e..658561cb35a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/java-runner.md @@ -10,15 +10,12 @@ keywords: ['Dataflow Java Runner', 'Google Dataflow ClickHouse', 'Apache Beam Ja import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Dataflow Java 运行器 {#dataflow-java-runner} Dataflow Java 运行器使你能够在 Google Cloud 的 Dataflow 服务上执行自定义 Apache Beam 管道。此方式提供了最大的灵活性,非常适合高级 ETL 工作流。 - - ## 工作原理 {#how-it-works} 1. **Pipeline 实现** diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md index 12e84e30270..e455bf5dd4e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates.md @@ -10,30 +10,23 @@ keywords: ['Google Dataflow', 'GCP', '数据管道', '模板', '批处理'] import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Google Dataflow 模板 {#google-dataflow-templates} Google Dataflow 模板提供了一种便捷方式,使您无需编写自定义代码即可运行预构建、开箱即用的数据管道。这些模板旨在简化常见的数据处理任务,基于 [Apache Beam](https://beam.apache.org/) 构建,并通过 `ClickHouseIO` 等连接器与 ClickHouse 数据库实现无缝集成。通过在 Google Dataflow 上运行这些模板,您可以以最小的投入实现高度可扩展的分布式数据处理。 - - ## 为什么使用 Dataflow 模板? {#why-use-dataflow-templates} - **易用性**:模板通过提供针对特定用例预配置的管道,免去了编写自定义代码的工作。 - **可扩展性**:Dataflow 确保您的管道能够高效扩展,通过分布式处理来应对海量数据。 - **成本效益**:只需为实际消耗的资源付费,并且可以优化管道的执行成本。 - - ## 如何运行 Dataflow 模板 {#how-to-run-dataflow-templates} 截至目前,可以通过 Google Cloud 控制台、CLI 或 Dataflow REST API 使用 ClickHouse 官方模板。 有关详细的分步操作说明,请参阅 [Google Dataflow Run Pipeline From a Template Guide](https://cloud.google.com/dataflow/docs/templates/provided-templates)。 - - ## ClickHouse 模板列表 {#list-of-clickhouse-templates} * [BigQuery 到 ClickHouse](./templates/bigquery-to-clickhouse) * [GCS 到 ClickHouse](https://github.com/ClickHouse/DataflowTemplates/issues/3)(即将推出!) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md index 80defca71f1..bb63649d5dd 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/google-dataflow/templates/bigquery-to-clickhouse.md @@ -18,7 +18,6 @@ import dataflow_extended_template_form from '@site/static/images/integrations/da import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # Dataflow BigQuery 到 ClickHouse 模板 {#dataflow-bigquery-to-clickhouse-template} BigQuery 到 ClickHouse 模板是一个批处理管道,用于将 BigQuery 表中的数据摄取到 ClickHouse 表中。 @@ -26,16 +25,12 @@ BigQuery 到 ClickHouse 模板是一个批处理管道,用于将 BigQuery 表 - - ## 管道要求 {#pipeline-requirements} * 源 BigQuery 表必须已存在。 * 目标 ClickHouse 表必须已存在。 * 必须能从 Dataflow 工作器实例访问 ClickHouse 主机。 - - ## 模板参数 {#template-parameters}
@@ -60,14 +55,10 @@ BigQuery 到 ClickHouse 模板是一个批处理管道,用于将 BigQuery 表 | `queryTempDataset` | 设置一个已存在的数据集,用于创建存储查询结果的临时表。例如:`temp_dataset`。 | | | | `KMSEncryptionKey` | 当使用 query 作为数据源从 BigQuery 读取时,使用此 Cloud KMS 密钥对创建的任何临时表进行加密。例如:`projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`。 | | | - - :::note 所有 `ClickHouseIO` 参数的默认值可以在 [`ClickHouseIO` Apache Beam Connector](/integrations/apache-beam#clickhouseiowrite-parameters) 中找到。 ::: - - ## 源表与目标表的模式 {#source-and-target-tables-schema} 为了高效地将 BigQuery 数据集加载到 ClickHouse 中,流水线会执行列推断流程,该流程包含以下阶段: @@ -81,8 +72,6 @@ BigQuery 到 ClickHouse 模板是一个批处理管道,用于将 BigQuery 表 因此,你的 BigQuery 数据集(无论是表还是查询)必须与 ClickHouse 目标表具有完全相同的列名。 ::: - - ## 数据类型映射 {#data-types-mapping} BigQuery 类型会根据 ClickHouse 表的定义进行转换。因此,上表列出了在目标 ClickHouse 表中(针对给定的 BigQuery 表/查询)推荐使用的映射关系: @@ -97,8 +86,6 @@ BigQuery 类型会根据 ClickHouse 表的定义进行转换。因此,上表 | [**Numeric - Integer Types**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**Integer Types**](../../../sql-reference/data-types/int-uint) | 在 BigQuery 中,所有 Int 类型(`INT`、`SMALLINT`、`INTEGER`、`BIGINT`、`TINYINT`、`BYTEINT`)都是 `INT64` 的别名。建议在 ClickHouse 中为列设置合适的整数宽度,因为模板会根据定义的列类型(`Int8`、`Int16`、`Int32`、`Int64`)来转换列。如果在 ClickHouse 表中使用了无符号 Int 类型(`UInt8`、`UInt16`、`UInt32`、`UInt64`),模板也会对其进行转换。 | | [**Numeric - Float Types**](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric_types) | [**Float Types**](../../../sql-reference/data-types/float) | 支持的 ClickHouse 类型:`Float32` 和 `Float64` | - - ## 运行模板 {#running-the-template} BigQuery 到 ClickHouse 模板可以通过 Google Cloud CLI 执行。 @@ -185,12 +172,8 @@ job: 在 Google Cloud 控制台中导航到 [Dataflow Jobs 选项卡](https://console.cloud.google.com/dataflow/jobs),以监控作业状态。你可以查看作业详情,包括进度和任何错误信息: - - - - ## 疑难解答 {#troubleshooting} ### 内存总量限制超出错误(代码 241){#code-241-dbexception-memory-limit-total-exceeded} @@ -200,8 +183,6 @@ job: * 增加实例资源:将 ClickHouse 服务器升级为具有更多内存的更大实例,以应对数据处理负载。 * 减小批大小:在 Dataflow 作业配置中调整批大小,以较小的数据块发送到 ClickHouse,从而降低每个批次的内存消耗。这些更改有助于在数据摄取过程中平衡资源使用。 - - ## 模板源代码 {#template-source-code} 该模板的源代码可在 ClickHouse 的 [DataflowTemplates](https://github.com/ClickHouse/DataflowTemplates) 派生仓库(fork)中获取。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md index b74dde6ea29..365a8d31e7f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/confluent-cloud.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import Image from '@theme/IdealImage'; - # 将 Confluent Cloud 与 ClickHouse 集成 {#integrating-confluent-cloud-with-clickhouse}
@@ -30,15 +29,11 @@ import Image from '@theme/IdealImage';
- - ## 前提条件 {#prerequisites} 我们假设您已经熟悉以下内容: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Confluent Cloud - - ## ClickHouse 与 Confluent Cloud 的官方 Kafka 连接器 {#the-official-kafka-connector-from-clickhouse-with-confluent-cloud} #### 创建 Topic {#create-a-topic} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md index 002b5c1b28b..72d2dbba98b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/confluent/custom-connector.md @@ -12,7 +12,6 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import AddCustomConnectorPlugin from '@site/static/images/integrations/data-ingestion/kafka/confluent/AddCustomConnectorPlugin.png'; - # 将 Confluent 平台与 ClickHouse 集成 {#integrating-confluent-platform-with-clickhouse}
@@ -27,15 +26,11 @@ import AddCustomConnectorPlugin from '@site/static/images/integrations/data-inge
- - ## 前提条件 {#prerequisites} 假定您已经熟悉: * [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md) * Confluent Platform 和[自定义连接器(Custom Connectors)](https://docs.confluent.io/cloud/current/connectors/bring-your-connector/overview.html)。 - - ## ClickHouse 官方 Kafka 连接器(适用于 Confluent Platform) {#the-official-kafka-connector-from-clickhouse-with-confluent-platform} ### 在 Confluent Platform 上安装 {#installing-on-confluent-platform} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md index 204730c8902..f95cf9cbfec 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/index.md @@ -13,14 +13,10 @@ integration: - category: 'data_ingestion' --- - - # 在 ClickHouse 中集成 Kafka {#integrating-kafka-with-clickhouse} [Apache Kafka](https://kafka.apache.org/) 是一个开源的分布式事件流平台,被成千上万的公司用于高性能数据管道、流式分析、数据集成以及关键业务应用。ClickHouse 提供多种方式来**从** Kafka 及其他兼容 Kafka API 的代理(如 Redpanda、Amazon MSK)读取数据,并**向其写入**数据。 - - ## 可用选项 {#available-options} 为你的用例选择合适的选项取决于多个因素,包括 ClickHouse 部署类型、数据流向以及运维需求。 @@ -91,8 +87,6 @@ Kafka Connect 是一个开源框架,作为集中式数据枢纽,用于在 Ka #### 入门 {#kafka-table-engine-getting-started} - - 要开始使用 Kafka 表引擎,请参阅[参考文档](./kafka-table-engine.md)。 ### 选择选项 {#choosing-an-option} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md index 35441e84d70..37718f161f2 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-clickhouse-connect-sink.md @@ -10,7 +10,6 @@ keywords: ['ClickHouse Kafka Connect Sink', 'Kafka 连接器 ClickHouse', '官 import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # ClickHouse Kafka Connect Sink {#clickhouse-kafka-connect-sink} :::note @@ -92,7 +91,6 @@ schemas.enable=false 完整的配置选项表如下: - | Property Name | Description | Default Value | |-------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| | `hostname` (Required) | 服务器的主机名或 IP 地址 | N/A | @@ -124,8 +122,6 @@ schemas.enable=false ### 目标表 {#target-tables} - - ClickHouse Connect Sink 从 Kafka 主题读取消息,并将其写入相应的表中。它只会向已存在的表写入数据。请确保在开始向目标表插入数据之前,已经在 ClickHouse 中创建了具有合适 schema 的目标表。 每个主题在 ClickHouse 中都需要一个专用的目标表。目标表名必须与源主题名一致。 @@ -205,7 +201,6 @@ ClickHouse Connect Sink 从 Kafka 主题读取消息,并将其写入相应的 该连接器可以从多个主题(topic)中消费数据 - ```json { "name": "clickhouse-connect", @@ -340,7 +335,6 @@ com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} * `byte-rate`: 每秒发送字节的平均速率 * `compression-rate`: 实际达到的压缩率 - **分区级指标:** - `records-sent-total`: 发送到该分区的记录总数 - `bytes-sent-total`: 发送到该分区的字节总数 @@ -430,8 +424,6 @@ com.clickhouse:type=ClickHouseKafkaConnector,name=SinkTask{id} - 默认的连接器设置已经满足你的吞吐量需求 - 你的 ClickHouse 集群可以轻松处理当前的写入负载 - - #### 理解数据流 {#understanding-the-data-flow} 在进行调优之前,首先需要理解数据在 connector 中的流转方式: @@ -467,24 +459,17 @@ Connector 从框架的缓冲区轮询消息: 为了在 ClickHouse 上获得最佳性能,应尽量使用较大的批量: - - ```properties # 增加每次轮询的记录数量 {#increase-the-number-of-records-per-poll} consumer.max.poll.records=5000 ``` - # 增大分区拉取大小上限(5 MB) {#increase-the-partition-fetch-size-5-mb} consumer.max.partition.fetch.bytes=5242880 - - # 可选:将最小拉取大小增加到 1 MB,以便等待更多数据 {#optional-increase-minimum-fetch-size-to-wait-for-more-data-1-mb} consumer.fetch.min.bytes=1048576 - - # 可选:如果对延迟非常敏感,可缩短等待时间 {#optional-reduce-wait-time-if-latency-is-critical} consumer.fetch.max.wait.ms=300 @@ -581,7 +566,6 @@ consumer.fetch.max.wait.ms=300 在使用 `exactlyOnce=true` 进行异步插入时: - ```json { "config": { @@ -685,7 +669,6 @@ SETTINGS **常见性能问题**: - | 症状 | 可能原因 | 解决方案 | | ------------------- | --------- | ------------------------------------------------- | | 消费者延迟较高 | 批次过小 | 增加 `max.poll.records`,启用异步写入 | @@ -783,7 +766,6 @@ SETTINGS * `UnknownHostException` - 在无法解析主机名时抛出。 * `IOException` - 在出现网络问题时抛出。 - #### “所有数据都是空值/0” {#all-my-data-is-blankzeroes} 很可能是你数据中的字段与表中的字段不匹配——这在使用 CDC(以及 Debezium 格式)时尤其常见。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md index 4dccdf2072d..7bff02595ed 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-connect-jdbc.md @@ -10,7 +10,6 @@ keywords: ['kafka', 'kafka connect', 'jdbc', '集成', '数据管道'] import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # JDBC connector {#jdbc-connector} :::note @@ -54,8 +53,6 @@ JDBC Connector 基于 [Confluent Community License](https://www.confluent.io/con 下面这些参数与在 ClickHouse 中使用 JDBC Connector 相关。完整的参数列表参见[这里](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/index.html): - - * `_connection.url_` - 应采用 `jdbc:clickhouse://<clickhouse host>:<clickhouse http port>/<target database>` 的形式 * `connection.user` - 对目标数据库具有写权限的用户 * `table.name.format`- 用于插入数据的 ClickHouse 表。该表必须已存在。 @@ -84,8 +81,6 @@ JDBC Connector 基于 [Confluent Community License](https://www.confluent.io/con 请确保该表已创建;如果之前的示例中已存在,则先将其删除。下面展示了一个与精简版 GitHub 数据集兼容的示例。请注意其中不包含当前尚不支持的 Array 或 Map 类型: - - ```sql CREATE TABLE github ( @@ -150,7 +145,6 @@ SELECT count() FROM default.github; ### 推荐阅读 {#recommended-further-reading} - * [Kafka Sink 配置参数](https://docs.confluent.io/kafka-connect-jdbc/current/sink-connector/sink_config_options.html#sink-config-options) * [Kafka Connect 深入解析:JDBC Source Connector](https://www.confluent.io/blog/kafka-connect-deep-dive-jdbc-source-connector) * [Kafka Connect JDBC Sink 深入解析:主键处理](https://rmoff.net/2021/03/12/kafka-connect-jdbc-sink-deep-dive-working-with-primary-keys/) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md index 09ee52878c8..76da1c8ed5d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/kafka-table-engine-named-collections.md @@ -6,12 +6,8 @@ slug: /integrations/data-ingestion/kafka/kafka-table-engine-named-collections doc_type: 'guide' --- - - # 通过命名集合集成 ClickHouse 与 Kafka {#integrating-clickhouse-with-kafka-using-named-collections} - - ## 介绍 {#introduction} 在本指南中,我们将介绍如何使用命名集合将 ClickHouse 连接到 Kafka。使用命名集合的配置文件具有以下优势: @@ -21,8 +17,6 @@ doc_type: 'guide' 本指南已在 Apache Kafka 3.4.1 和 ClickHouse 24.5.1 上完成测试。 - - ## 前提假设 {#assumptions} 本文档假定已具备: @@ -30,8 +24,6 @@ doc_type: 'guide' 2. 一个已部署并正在运行的 ClickHouse 集群。 3. 具备 SQL 基础,并熟悉 ClickHouse 和 Kafka 的基本配置。 - - ## 先决条件 {#prerequisites} 确保负责创建该具名集合的用户具备必要的访问权限: @@ -45,7 +37,6 @@ doc_type: 'guide' 有关如何启用访问控制的更多详情,请参阅[用户管理指南](./../../../guides/sre/user-management/index.md)。 - ## 配置 {#configuration} 在 ClickHouse 的 `config.xml` 文件中添加以下配置段: @@ -106,7 +97,6 @@ doc_type: 'guide' 3. `` 内的部分包含额外的 Kafka 配置选项。更多可用选项请参阅 [librdkafka 配置说明](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md)。 4. 本示例使用 `SASL_SSL` 安全协议和 `PLAIN` 机制。请根据实际的 Kafka 集群配置调整这些设置。 - ## 创建表和数据库 {#creating-tables-and-databases} 在你的 ClickHouse 集群上创建所需的数据库和表。如果你以单节点方式运行 ClickHouse,请省略 SQL 命令中的集群(cluster)部分,并使用除 `ReplicatedMergeTree` 之外的其他任意引擎。 @@ -193,7 +183,6 @@ SELECT FROM second_kafka_table; ``` - ## 验证设置 {#verifying-the-setup} 现在你应该可以在 Kafka 集群上看到相应的 consumer group(消费者组): diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md index 22c8763ac2e..a29f5883d4e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/kafka/msk/index.md @@ -30,14 +30,12 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr > 注意:视频中展示的策略较为宽松,仅用于快速上手。请参阅下文基于最小权限原则的 IAM 指南。 - - ## 前提条件 {#prerequisites} -我们假定: -* 你已经熟悉 [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md)、Amazon MSK 和 MSK Connectors。我们推荐阅读 Amazon MSK 的[入门指南](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html)和 [MSK Connect 指南](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html)。 -* MSK broker 已配置为可通过公网访问。请参阅《开发者指南》中 [Public Access](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html) 章节。 +我们假定: +* 你已经熟悉 [ClickHouse Connector Sink](../kafka-clickhouse-connect-sink.md)。 +* 你已经熟悉 Amazon MSK 和 MSK Connectors。我们推荐阅读 Amazon MSK 的[入门指南](https://docs.aws.amazon.com/msk/latest/developerguide/getting-started.html)和 [MSK Connect 指南](https://docs.aws.amazon.com/msk/latest/developerguide/msk-connect.html)。 ## ClickHouse 官方 Kafka 连接器(适用于 Amazon MSK) {#the-official-kafka-connector-from-clickhouse-with-amazon-msk} @@ -184,4 +182,4 @@ consumer.max.partition.fetch.bytes=1048576 1. **验证连通性(快速检查清单):** 1. 在 connector 运行环境中,解析 MSK bootstrap DNS,并通过 TLS 连接到 broker 端口。 1. 在端口 9440 与 ClickHouse 建立 TLS 连接(或使用 8443 进行 HTTPS)。 - 1. 如果使用 AWS 服务(Glue/Secrets Manager),允许对这些端点的出口访问。 + 1. 如果使用 AWS 服务(Glue/Secrets Manager),允许对这些端点的出口访问。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md index 1c7c5b8d175..43add58bfae 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-ingestion/s3/performance.md @@ -25,7 +25,6 @@ import HardwareSize from '@site/static/images/integrations/data-ingestion/s3/har 在通过调优线程数和块大小来提升插入性能之前,我们建议用户先了解 S3 插入的工作机制。如果你已经熟悉插入机制,或者只想快速获得一些调优建议,可以直接跳转到下面的[示例](/integrations/s3/performance#example-dataset)部分。 - ## 插入机制(单节点) {#insert-mechanics-single-node} 在硬件配置之外,还有两个主要因素会影响 ClickHouse 单节点数据插入机制的性能和资源使用:**插入块大小** 和 **插入并行度**。 @@ -81,7 +80,6 @@ ClickHouse 会持续[合并 part](https://clickhouse.com/blog/asynchronous-data- ② 将内存中已加载的块合并成一个更大的块。 ``` - ③ 将合并后的数据块写入磁盘中的一个新的 part。 回到 ① @@ -118,13 +116,10 @@ ClickHouse 服务器可以并行处理和插入数据。插入并行性级别会 对于 s3 函数和表,单个文件是否并行下载由 [max_download_threads](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_threads) 和 [max_download_buffer_size](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_download_buffer_size) 的取值决定。只有当文件大小大于 `2 * max_download_buffer_size` 时,文件才会被并行下载。默认情况下,`max_download_buffer_size` 设置为 10MiB。在某些情况下,可以放心地将该缓冲区大小增大到 50 MB(`max_download_buffer_size=52428800`),以确保每个文件由单个线程下载。这样可以减少每个线程发起 S3 调用所花费的时间,从而降低 S3 等待时间。此外,对于过小而不适合并行读取的文件,为了提高吞吐量,ClickHouse 会通过异步预读此类文件来自动预取数据。 - ## 性能衡量 {#measuring-performance} 在以下两种场景下,都需要对使用 S3 表函数的查询进行性能优化:一是数据不搬移、直接对其运行查询的场景,即仅使用 ClickHouse 计算资源、数据保持在 S3 中并保留原始格式的临时(即席)查询;二是将来自 S3 的数据插入到 ClickHouse MergeTree 表引擎中的场景。除非特别说明,以下建议适用于这两种场景。 - - ## 硬件规模的影响 {#impact-of-hardware-size} @@ -137,14 +132,10 @@ ClickHouse 服务器可以并行处理和插入数据。插入并行性级别会 从而影响整体的摄取吞吐量。 - - ## 区域本地性 {#region-locality} 请确保你的 bucket 与 ClickHouse 实例位于同一地域(region)。这个简单的优化可以显著提升吞吐性能,尤其是在你将 ClickHouse 实例部署在 AWS 基础设施上时。 - - ## 格式 {#formats} ClickHouse 可以使用 `s3` 函数和 `S3` 引擎,以[受支持的格式](/interfaces/formats#formats-overview)读取存储在 S3 存储桶中的文件。如果是直接读取原始文件,这些格式各有一些明显优势: @@ -155,8 +146,6 @@ ClickHouse 可以使用 `s3` 函数和 `S3` 引擎,以[受支持的格式](/in * 每种压缩格式都有其优缺点,通常在压缩率与速度之间权衡,并分别偏向压缩或解压缩方向的性能。如果对 CSV 或 TSV 等原始文件进行压缩,lz4 提供最快的解压缩性能,但牺牲了压缩率。Gzip 通常能获得更好的压缩率,但读取速度会略慢。Xz 在这方面更进一步,通常提供最佳压缩率,但压缩和解压缩性能最慢。如果是导出数据,Gz 和 lz4 的压缩速度相近。需要结合你的网络连接速度进行权衡。任何来自更快压缩或解压缩的收益,都可能轻易被到 S3 存储桶的较慢网络连接所抵消。 * 对于 Native 或 Parquet 等格式,通常不值得再引入额外的压缩开销。数据大小的节省往往非常有限,因为这些格式本身已经非常紧凑。花在压缩和解压缩上的时间很难抵消网络传输时间——尤其是考虑到 S3 在全球范围内可用且通常具有较高的网络带宽。 - - ## 示例数据集 {#example-dataset} 为了进一步说明潜在的优化空间,我们将使用 [Stack Overflow 数据集中的 posts 表](/data-modeling/schema-design#stack-overflow-dataset),同时优化该数据集的查询和插入性能。 @@ -202,7 +191,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow 在读取查询结果时,初次执行的查询往往看起来比重复执行同一查询更慢。这通常是由于 S3 自身的缓存机制以及 [ClickHouse Schema Inference Cache](/operations/system-tables/schema_inference_cache) 所致。后者会存储针对文件推断出的 schema,从而在后续访问中跳过推断步骤,降低查询时间。 ::: - ## 在读取中使用线程 {#using-threads-for-reads} 在不受网络带宽或本地 I/O 限制的前提下,S3 上的读取性能会随核心数量线性扩展。增加线程数量也会带来额外的内存开销,用户需要了解这一点。可以通过修改以下设置来潜在地提升读取吞吐性能: @@ -249,7 +237,6 @@ SETTINGS max_threads = 64 Peak memory usage: 639.99 MiB. ``` - ## 为插入操作调优线程数与块大小 {#tuning-threads-and-block-size-for-inserts} 为了获得最大的摄取性能,你必须基于以下三点进行选择:(1) 插入块大小;(2) 合适的插入并行度;(3) 可用 CPU 内核数和 RAM 容量。总结如下: @@ -287,7 +274,6 @@ FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/stackoverflow 如上所示,通过调整这些设置,插入性能提升了 `33%` 以上。我们将其留给读者自行尝试,看看能否进一步提升单节点性能。 - ## 基于资源和节点的扩展 {#scaling-with-resources-and-nodes} 基于资源和节点的扩展同样适用于读取查询和插入查询。 @@ -365,7 +351,6 @@ FROM s3Cluster('default', 'https://datasets-documentation.s3.eu-west-3.amazonaws 返回 0 行。用时:171.202 秒。已处理 5982 万行,24.03 GB(每秒 34.941 万行,140.37 MB/秒) ``` - 读者会注意到,文件读取带来了查询性能的提升,但没有改善写入性能。默认情况下,尽管读取是通过 `s3Cluster` 分布式执行的,插入操作仍然是在发起请求的节点上进行的。这意味着,虽然每个节点都会参与读取,但生成的行会被路由回发起节点再进行分发。在高吞吐场景下,这可能成为瓶颈。为了解决这个问题,需要为 `s3cluster` 函数设置参数 `parallel_distributed_insert_select`。 将其设置为 `parallel_distributed_insert_select=2`,可以确保 `SELECT` 和 `INSERT` 会在每个分片上执行,并针对每个节点上分布式引擎的底层表进行读写。 @@ -382,7 +367,6 @@ Peak memory usage: 11.75 GiB. 如预期,这会使插入性能降低到原来的三分之一。 - ## 进一步调优 {#further-tuning} ### 禁用去重 {#disable-de-duplication} @@ -416,7 +400,6 @@ SETTINGS parallel_distributed_insert_select = 2, min_insert_block_size_rows = 0, 0 rows in set. Elapsed: 49.688 sec. Processed 59.82 million rows, 24.03 GB (1.20 million rows/s., 483.66 MB/s.) ``` - ## 其他注意事项 {#misc-notes} * 在内存受限的场景下,如需向 S3 插入数据,考虑调低 `max_insert_delayed_streams_for_parallel_write`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md index e805df5acd8..ae8419039fe 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/astrato-and-clickhouse.md @@ -25,15 +25,12 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Astrato 连接到 ClickHouse {#connecting-astrato-to-clickhouse} Astrato 使用 Pushdown SQL 直接查询 ClickHouse Cloud 或本地部署的 ClickHouse。这意味着你可以在 ClickHouse 行业领先的性能加持下访问所需的全部数据。 - - ## 所需连接信息 {#connection-data-required} 在设置数据连接时,您需要准备以下信息: @@ -44,8 +41,6 @@ Astrato 使用 Pushdown SQL 直接查询 ClickHouse Cloud 或本地部署的 Cli - - ## 在 Astrato 中创建到 ClickHouse 的数据连接 {#creating-the-data-connection-to-clickhouse} - 在侧边栏中选择 **Data**,然后选择 **Data Connection** 选项卡 @@ -75,8 +70,6 @@ Astrato 使用 Pushdown SQL 直接查询 ClickHouse Cloud 或本地部署的 Cli 如果创建了重复的数据源,会在数据源名称中添加时间戳。 ::: - - ## 创建语义模型 / 数据视图 {#creating-a-semantic-model--data-view} 在我们的 Data View 编辑器中,您可以看到 ClickHouse 中的所有表(Tables)和模式(Schemas),请选择部分对象开始配置。 @@ -93,8 +86,6 @@ Astrato 使用 Pushdown SQL 直接查询 ClickHouse Cloud 或本地部署的 Cli - - ## 创建仪表板 {#creating-a-dashboard} 只需几个步骤,您就可以在 Astrato 中构建第一个图表。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md index d319f32c5b6..eed41fc9006 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/chartbrew-and-clickhouse.md @@ -22,15 +22,12 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import Image from '@theme/IdealImage'; - # 将 Chartbrew 连接到 ClickHouse {#connecting-chartbrew-to-clickhouse} [Chartbrew](https://chartbrew.com) 是一个数据可视化平台,允许用户创建仪表盘并实时监控数据。它支持包括 ClickHouse 在内的多种数据源,并提供无代码界面用于构建图表和报表。 - - ## 目标 {#goal} 在本指南中,您将把 Chartbrew 连接到 ClickHouse,运行一条 SQL 查询,并创建一个可视化图表。完成后,您的仪表盘可能看起来类似于这样: @@ -41,14 +38,10 @@ import Image from '@theme/IdealImage'; 如果您还没有可用的数据集,可以添加一个示例数据集。本指南使用 [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) 数据集。 ::: - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 将 Chartbrew 连接到 ClickHouse {#2-connect-chartbrew-to-clickhouse} 1. 登录 [Chartbrew](https://chartbrew.com/login),然后转到 **Connections** 选项卡。 @@ -72,8 +65,6 @@ import Image from '@theme/IdealImage'; - - ## 3. 创建数据集并运行 SQL 查询 {#3-create-a-dataset-and-run-a-sql-query} 1. 点击 **Create dataset** 按钮,或导航到 **Datasets** 选项卡来创建一个数据集。 @@ -100,7 +91,6 @@ ORDER BY year; 成功获取数据后,点击 **Configure dataset** 来设置可视化参数。 - ## 4. 创建可视化 {#4-create-a-visualization} 1. 为可视化定义一个度量(数值)和一个维度(类别值)。 @@ -114,8 +104,6 @@ ORDER BY year; - - ## 5. 自动化数据更新 {#5-automate-data-updates} 为了让仪表板始终保持最新状态,你可以设置自动数据更新: @@ -126,8 +114,6 @@ ORDER BY year; - - ## 了解更多 {#learn-more} 如需了解更多详细信息,请参阅这篇关于 [Chartbrew 和 ClickHouse](https://chartbrew.com/blog/visualizing-clickhouse-data-with-chartbrew-a-step-by-step-guide/) 的博客文章。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md index af6572a9e09..316f611fcd1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/databrain-and-clickhouse.md @@ -18,7 +18,6 @@ import databrain_06 from '@site/static/images/integrations/data-visualization/da import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Databrain 连接到 ClickHouse {#connecting-databrain-to-clickhouse} @@ -31,16 +30,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 本指南将逐步介绍如何将 Databrain 连接到你的 ClickHouse 实例。 - - ## 前置条件 {#pre-requisites} - 一个 ClickHouse 数据库,可以部署在自有基础设施上,或托管于 [ClickHouse Cloud](https://clickhouse.com/)。 - 一个 [Databrain 账号](https://app.usedatabrain.com/users/sign-up)。 - 一个 Databrain 工作区,用于连接数据源。 - - ## 将 Databrain 连接到 ClickHouse 的步骤 {#steps-to-connect-databrain-to-clickhouse} ### 1. 收集连接详细信息 {#1-gather-your-connection-details} @@ -102,7 +97,6 @@ GRANT SELECT ON your_database.* TO your_databrain_user; 将 `your_databrain_user` 和 `your_database` 替换为您实际使用的用户名和数据库名称。 - ## 将 Databrain 与 ClickHouse 配合使用 {#using-databrain-with-clickhouse} ### 探索你的数据 {#explore-your-data} @@ -152,8 +146,6 @@ GRANT SELECT ON your_database.* TO your_databrain_user; - **Embedded Analytics**:将仪表板和指标直接嵌入到你的应用程序中 - **Semantic Layer**:创建可复用的数据模型和业务逻辑 - - ## 故障排查 {#troubleshooting} ### 连接失败 {#connection-fails} @@ -175,8 +167,6 @@ GRANT SELECT ON your_database.* TO your_databrain_user; 3. **使用合适的数据类型**:确保 ClickHouse 模式(schema)使用了最优的数据类型 4. **索引优化**:利用 ClickHouse 的主键和跳过索引(skipping index) - - ## 进一步了解 {#learn-more} 如需深入了解 Databrain 的功能以及如何构建强大的分析能力: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md index adc15b8b208..9134b0ea775 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/deepnote.md @@ -19,7 +19,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # 将 ClickHouse 连接到 Deepnote {#connect-clickhouse-to-deepnote} @@ -28,15 +27,11 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr 本指南假定您已经拥有 Deepnote 账户,并且已有一个正在运行的 ClickHouse 实例。 - - ## 交互式示例 {#interactive-example} 如果您希望在 Deepnote 数据笔记本中探索从 ClickHouse 查询数据的交互式示例,请点击下方按钮,启动一个已连接到 [ClickHouse Playground](../../../getting-started/playground.md) 的模板项目。 [](https://deepnote.com/launch?template=ClickHouse%20and%20Deepnote) - - ## 连接到 ClickHouse {#connect-to-clickhouse} 1. 在 Deepnote 中,打开 “Integrations” 概览并点击 ClickHouse 卡片。 @@ -52,8 +47,6 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr 3. 恭喜!您已在 Deepnote 中成功集成 ClickHouse。 - - ## 使用 ClickHouse 集成 {#using-clickhouse-integration} 1. 首先,在笔记本右侧连接 ClickHouse 集成。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md index ced234e73ea..d0d9dd769d9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/dot-and-clickhouse.md @@ -12,7 +12,6 @@ import dot_01 from '@site/static/images/integrations/data-visualization/dot_01.p import dot_02 from '@site/static/images/integrations/data-visualization/dot_02.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Dot {#dot} @@ -20,16 +19,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [Dot](https://www.getdot.ai/) 是你的 **AI 数据分析师**。 它可以直接连接到 ClickHouse,让你能够使用自然语言询问数据相关问题、探索数据、验证假设,并解答「为什么」类的问题——而且这一切都可以直接在 Slack、Microsoft Teams、ChatGPT 或原生 Web UI 中完成。 - - ## 前提条件 {#pre-requisites} - 一个 ClickHouse 数据库,可以是自托管的,也可以是在 [ClickHouse Cloud](https://clickhouse.com/cloud) 上 - 一个 [Dot](https://www.getdot.ai/) 账户 - 一个 [Hashboard](https://www.hashboard.com/) 账户和项目 - - ## 将 Dot 连接到 ClickHouse {#connecting-dot-to-clickhouse} @@ -48,8 +43,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; Dot 使用 **query-pushdown**:由 ClickHouse 负责大规模的繁重计算,而 Dot 确保结果准确可信。 - - ## 亮点 {#highlights} Dot 通过对话让数据触手可及: @@ -60,8 +53,6 @@ Dot 通过对话让数据触手可及: - **结果可靠可追溯**:Dot 会根据你的模式(schema)和定义验证查询,最大限度减少错误。 - **高扩展性**:基于查询下推(query pushdown)构建,将 Dot 的智能与 ClickHouse 的极速性能相结合。 - - ## 安全与治理 {#security} Dot 已达到企业级就绪标准: @@ -72,8 +63,6 @@ Dot 已达到企业级就绪标准: - **治理与校验**:训练/验证空间有助于减少模型幻觉 - **合规性**:通过 SOC 2 Type I 认证 - - ## 其他资源 {#additional-resources} - Dot 网站:[https://www.getdot.ai/](https://www.getdot.ai/) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md index 144b4d3520f..637fb556488 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/draxlr-and-clickhouse.md @@ -21,20 +21,15 @@ import draxlr_06 from '@site/static/images/integrations/data-visualization/draxl import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Draxlr 连接到 ClickHouse {#connecting-draxlr-to-clickhouse} Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您的团队能够在几分钟内探索、可视化并发布洞见。本文将引导您完成建立连接的各个步骤。 - - ## 1. 获取您的 ClickHouse 凭证 {#1-get-your-clickhouse-credentials} - - ## 2. 将 Draxlr 连接到 ClickHouse {#2--connect-draxlr-to-clickhouse} 1. 点击导航栏中的 **Connect a Database** 按钮。 @@ -51,8 +46,6 @@ Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您 6. 点击 **Next** 按钮,并等待连接建立。连接成功后你会看到数据表页面。 - - ## 4. 探索你的数据 {#4-explore-your-data} 1. 点击列表中的任意一个表。 @@ -67,8 +60,6 @@ Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您 - - ## 4. 使用 SQL 查询 {#4-using-sql-queries} 1. 点击导航栏中的 Explore 按钮。 @@ -79,8 +70,6 @@ Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您 3. 点击 **Execute Query** 按钮即可查看结果。 - - ## 4. 保存查询 {#4-saving-you-query} 1. 执行查询后,单击 **Save Query** 按钮。 @@ -93,8 +82,6 @@ Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您 4. 单击 **Save** 按钮以保存该查询。 - - ## 5. 构建仪表板 {#5-building-dashboards} 1. 点击导航栏上的 **Dashboards** 按钮。 @@ -107,7 +94,5 @@ Draxlr 提供直观的界面用于连接到您的 ClickHouse 数据库,使您 4. 从已保存查询列表中选择一个查询并选择可视化类型,然后点击 **Add Dashboard Item** 按钮。 - - ## 了解更多 {#learn-more} 若要进一步了解 Draxlr,您可以访问 [Draxlr 文档](https://draxlr.notion.site/draxlr/Draxlr-Docs-d228b23383f64d00a70836ff9643a928) 网站。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md index cd1901625c0..8dc1151c016 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/embeddable-and-clickhouse.md @@ -10,7 +10,6 @@ doc_type: 'guide' import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Embeddable 连接到 ClickHouse {#connecting-embeddable-to-clickhouse} @@ -21,13 +20,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 内置的行级安全机制确保每个用户只会看到其被允许查看的精确数据。而两级、完全可配置的缓存机制则意味着你可以在大规模场景下提供快速的实时分析能力。 - - ## 1. 收集连接参数 {#1-gather-your-connection-details} - - ## 2. 创建 ClickHouse 连接类型 {#2-create-a-clickhouse-connection-type} 您可以使用 Embeddable API 添加数据库连接。该连接用于连接到您的 ClickHouse 服务。您可以使用以下 API 调用来添加连接: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md index 69a70a33e89..e9d27ee7dac 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/explo-and-clickhouse.md @@ -31,15 +31,12 @@ import explo_15 from '@site/static/images/integrations/data-visualization/explo_ import explo_16 from '@site/static/images/integrations/data-visualization/explo_16.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Explo 连接到 ClickHouse {#connecting-explo-to-clickhouse} 适用于任何平台的面向客户分析功能。为精美可视化而设计,为极致简洁而打造。 - - ## 目标 {#goal} 在本指南中,您将把 ClickHouse 中的数据连接到 Explo,并对结果进行可视化展示。生成的图表如下所示: @@ -51,13 +48,9 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 如果您目前还没有可用的数据集,可以添加一个示例数据集。本指南使用的是 [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) 数据集,您可以选择使用该数据集。在同一文档类别下还有其他几个可供参考的数据集。 ::: - - ## 1. 收集连接参数 {#1-gather-your-connection-details} - - ## 2. 将 Explo 连接到 ClickHouse {#2--connect-explo-to-clickhouse} 1. 注册 Explo 账户。 @@ -91,8 +84,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 54.211.43.19, 52.55.98.121, 3.214.169.94, 54.156.141.148 ` - - ## 3. 创建 Dashboard {#3-create-a-dashboard} 1. 在左侧导航栏中切换到 **Dashboard** 选项卡。 @@ -107,8 +98,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 4. 运行 SQL 查询 {#4-run-a-sql-query} 1. 在右侧边栏中,在你的 schema 标题下找到表名。然后在数据集编辑器中输入以下命令: @@ -123,8 +112,6 @@ LIMIT 100 - - ## 5. 构建图表 {#5-build-a-chart} 1. 从左侧将柱状图图标拖动到画布上。 @@ -147,8 +134,6 @@ LIMIT 100 - - ## 了解更多 {#learn-more} 请查阅 Explo 文档,了解更多关于 Explo 以及如何构建仪表板的信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md index 03215d9cdc0..e2b9b152f70 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/fabi-and-clickhouse.md @@ -15,7 +15,6 @@ import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # 将 ClickHouse 连接到 Fabi.ai {#connecting-clickhouse-to-fabiai} @@ -24,14 +23,10 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr - - ## 收集连接信息 {#gather-your-connection-details} - - ## 创建你的 Fabi.ai 账户并连接 ClickHouse {#connect-to-clickhouse} 登录或创建你的 Fabi.ai 账户:https://app.fabi.ai/ @@ -46,16 +41,12 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr 3. 恭喜!你已经将 ClickHouse 成功集成到 Fabi.ai 中。 - - ## 查询 ClickHouse。 {#querying-clickhouse} 将 Fabi.ai 连接到 ClickHouse 之后,打开任意一个 [Smartbook](https://docs.fabi.ai/analysis_and_reporting/smartbooks) 并创建一个 SQL 单元格。如果你的 Fabi.ai 实例只连接了一个数据源,SQL 单元格会自动将 ClickHouse 设为默认数据源;否则,你可以在数据源下拉菜单中选择要查询的数据源。 - - ## 更多资源 {#additional-resources} [Fabi.ai](https://www.fabi.ai) 文档:https://docs.fabi.ai/introduction diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md index 6c87dd1243a..8b5b8d2d362 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/hashboard-and-clickhouse.md @@ -13,7 +13,6 @@ import hashboard_01 from '@site/static/images/integrations/data-visualization/ha import Image from '@theme/IdealImage'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 ClickHouse 连接到 Hashboard {#connecting-clickhouse-to-hashboard} @@ -26,15 +25,11 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 本指南将引导你完成将 Hashboard 与 ClickHouse 实例连接的步骤。你也可以在 Hashboard 的 [ClickHouse 集成文档](https://docs.hashboard.com/docs/database-connections/clickhouse) 中找到相同信息。 - - ## 前提条件 {#pre-requisites} - 一个 ClickHouse 数据库,可以部署在你自己的基础设施上,或托管在 [ClickHouse Cloud](https://clickhouse.com/) 上。 - 一个 [Hashboard 账户](https://hashboard.com/getAccess) 以及一个项目。 - - ## 将 Hashboard 连接到 ClickHouse 的步骤 {#steps-to-connect-hashboard-to-clickhouse} ### 1. 收集连接信息 {#1-gather-your-connection-details} @@ -53,8 +48,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 你的 ClickHouse 数据库现在已经连接到 Hashboard。接下来,你可以开始构建 [Data Models](https://docs.hashboard.com/docs/data-modeling/add-data-model)、[Explorations](https://docs.hashboard.com/docs/visualizing-data/explorations)、[Metrics](https://docs.hashboard.com/docs/metrics) 和 [Dashboards](https://docs.hashboard.com/docs/dashboards)。有关这些功能的更多详细信息,请参阅对应的 Hashboard 文档。 - - ## 了解更多 {#learn-more} 如需了解更多高级功能和故障排除方法,请访问 [Hashboard 文档](https://docs.hashboard.com/)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md index 6470dd4e926..45b759537d6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/luzmo-and-clickhouse.md @@ -18,13 +18,10 @@ import luzmo_02 from '@site/static/images/integrations/data-visualization/luzmo_ import luzmo_03 from '@site/static/images/integrations/data-visualization/luzmo_03.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # Luzmo 与 ClickHouse 集成 {#integrating-luzmo-with-clickhouse} - - ## 1. 设置 ClickHouse 连接 {#1-setup-a-clickhouse-connection} 要建立与 ClickHouse 的连接,先进入 **Connections 页面**,选择 **New Connection**,然后在 New Connection 弹窗中选择 ClickHouse。 @@ -42,8 +39,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 请参考我们开发者文档中的示例,了解如何通过我们的 API [创建 ClickHouse 连接](https://developer.luzmo.com/api/createAccount?exampleSection=AccountCreateClickhouseRequestBody)。 - - ## 2. 添加数据集 {#2-add-datasets} 在您连接好 ClickHouse 之后,可以按照[这里](https://academy.luzmo.com/article/ldx3iltg)的说明添加数据集。您可以从 ClickHouse 中选择一个或多个可用的数据集,并在 Luzmo 中将它们[关联](https://academy.luzmo.com/article/gkrx48x5),以确保它们可以在同一个仪表板中联合使用。同时,请务必查看这篇关于[为分析准备数据](https://academy.luzmo.com/article/u492qov0)的文章。 @@ -54,8 +49,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 使用说明 {#usage-notes} 1. Luzmo ClickHouse 连接器通过 HTTP API 接口(通常监听 8123 端口)进行连接。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md index 3c09e21124d..b6b5753b462 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/mitzu-and-clickhouse.md @@ -25,7 +25,6 @@ import mitzu_10 from '@site/static/images/integrations/data-visualization/mitzu_ import mitzu_11 from '@site/static/images/integrations/data-visualization/mitzu_11.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Mitzu 连接到 ClickHouse {#connecting-mitzu-to-clickhouse} @@ -34,8 +33,6 @@ Mitzu 是一款零代码、原生运行于数据仓库之上的产品分析应 然而,与这些平台不同的是,Mitzu 不会复制公司的产品使用数据。相反,它会在公司现有的数据仓库或数据湖之上直接生成原生 SQL 查询。 - - ## 目标 {#goal} 在本指南中,我们将介绍以下内容: @@ -50,38 +47,28 @@ Mitzu 是一款零代码、原生运行于数据仓库之上的产品分析应 本指南仅对如何使用 Mitzu 进行简要概览。你可以在 [Mitzu 文档](https://docs.mitzu.io/) 中找到更详细的信息。 - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 登录或注册 Mitzu {#2-sign-in-or-sign-up-to-mitzu} 首先,前往 [https://app.mitzu.io](https://app.mitzu.io) 注册账号。 - - ## 3. 配置你的工作区 {#3-configure-your-workspace} 创建组织之后,按照左侧导航栏中的 `Set up your workspace` 入门指南完成设置。然后,点击 `Connect Mitzu with your data warehouse` 链接。 - - ## 4. 将 Mitzu 连接到 ClickHouse {#4-connect-mitzu-to-clickhouse} 首先,选择 ClickHouse 作为连接类型并设置连接详细信息。然后,点击 `Test connection & Save` 按钮以保存设置。 - - ## 5. 配置事件表 {#5-configure-event-tables} 连接保存后,选择 `Event tables` 选项卡并点击 `Add table` 按钮。在弹出的窗口中,选择你的数据库以及要添加到 Mitzu 的表。 @@ -103,8 +90,6 @@ Mitzu 是一款零代码、原生运行于数据仓库之上的产品分析应
当所有表配置完成后,点击 `Save & update event catalog` 按钮,Mitzu 将根据上述定义的表自动发现所有事件及其属性。根据数据集的大小,此步骤可能需要几分钟时间。 - - ## 4. 运行分群查询 {#4-run-segmentation-queries} 在 Mitzu 中进行用户分群与在 Amplitude、Mixpanel 或 PostHog 中一样简单。 @@ -120,8 +105,6 @@ Explore 页面左侧是事件选择区域,顶部区域用于配置时间范围 你可以选择任意事件属性或用户属性进行细分(参见下文了解如何集成用户属性)。 ::: - - ## 5. 运行漏斗查询 {#5-run-funnel-queries} 为一个漏斗最多选择 9 个步骤。选择用户必须在其中完成该漏斗的时间窗口。 @@ -135,8 +118,6 @@ Explore 页面左侧是事件选择区域,顶部区域用于配置时间范围 选择 `Funnel trends`,以查看随时间变化的漏斗趋势。 ::: - - ## 6. 运行留存查询 {#6-run-retention-queries} 最多选择 2 个步骤用于计算留存率。为滚动分析选择留存时间窗口。 @@ -150,8 +131,6 @@ Explore 页面左侧是事件选择区域,顶部区域用于配置时间范围 选择 `Weekly cohort retention` 来可视化留存率随时间的变化。 ::: - - ## 7. 运行旅程查询 {#7-run-journey-queries} 为漏斗最多选择 9 个步骤。设置一个时间窗口,用于限定用户完成整个旅程的时间范围。Mitzu 旅程图会为你提供可视化图表,展示用户在所选事件之间经过的每一条路径。 @@ -164,15 +143,11 @@ Explore 页面左侧是事件选择区域,顶部区域用于配置时间范围
- - ## 8. 运行营收查询 {#8-run-revenue-queries} 如果已完成营收配置,Mitzu 可以根据你的付款事件计算总 MRR 和订阅数量。 - - ## 9. 原生 SQL {#9-sql-native} Mitzu 对 SQL 提供原生支持,这意味着它会根据你在 Explore 页面上选择的配置生成原生 SQL 代码。 @@ -185,16 +160,12 @@ Mitzu 对 SQL 提供原生支持,这意味着它会根据你在 Explore 页面 如果你在使用 Mitzu UI 时遇到限制,可以复制 SQL 代码,在 BI 工具中继续你的工作。 ::: - - ## Mitzu 支持 {#mitzu-support} 如果你在使用过程中遇到问题,欢迎通过 [support@mitzu.io](email://support@mitzu.io) 联系我们。 你也可以加入我们的 Slack 社区:[点击这里](https://join.slack.com/t/mitzu-io/shared_invite/zt-1h1ykr93a-_VtVu0XshfspFjOg6sczKg) - - ## 了解更多 {#learn-more} 访问 [mitzu.io](https://mitzu.io) 了解更多关于 Mitzu 的信息 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md index 3710bc3e963..4a7db4a6634 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/rocketbi-and-clickhouse.md @@ -30,7 +30,6 @@ import rocketbi_17 from '@site/static/images/integrations/data-visualization/roc import rocketbi_18 from '@site/static/images/integrations/data-visualization/rocketbi_18.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 目标:使用 Rocket.BI 构建你的第一个仪表盘 {#goal-build-your-first-dashboard-with-rocketbi} @@ -43,8 +42,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 你可以通过[此链接访问该仪表盘。](https://demo.rocket.bi/dashboard/sales-dashboard-7?token=7eecf750-cbde-4c53-8fa8-8b905fec667e) - - ## 安装 {#install} 使用我们预先构建的 Docker 镜像启动 RocketBI。 @@ -64,7 +61,6 @@ wget https://raw.githubusercontent.com/datainsider-co/rocket-bi/main/docker/.cli 如果你想从源码构建或进行高级配置,可以在这里查看 [Rocket.BI Readme](https://github.com/datainsider-co/rocket-bi/blob/main/README.md)。 - ## 让我们来构建仪表板 {#lets-build-the-dashboard} 在 Dashboard 中,你可以找到你的报表,点击 **+New** 开始可视化。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md index bb43de8a7c9..9469a96900b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/community_integrations/zingdata-and-clickhouse.md @@ -22,15 +22,12 @@ import zing_08 from '@site/static/images/integrations/data-visualization/zing_08 import zing_09 from '@site/static/images/integrations/data-visualization/zing_09.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 Zing Data 连接到 ClickHouse {#connect-zing-data-to-clickhouse} Zing Data 是一个数据探索与可视化平台。Zing Data 通过 ClickHouse 提供的 JS 驱动连接到 ClickHouse。 - - ## 如何连接 {#how-to-connect} 1. 收集你的连接信息。 @@ -62,8 +59,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; 6. 一旦添加了 ClickHouse 数据源,它将在你的 Zing 组织中对所有成员可见,位于 **Data Sources** / **Sources** 选项卡下。 - - ## 在 Zing Data 中创建图表和仪表板 {#creating-charts-and-dashboards-in-zing-data} 1. 在添加好 ClickHouse 数据源之后,在 Web 端点击 **Zing App**,或在移动端点击该数据源以开始创建图表。 @@ -93,8 +88,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained';
- - ## 相关内容 {#related-content} - [文档](https://docs.getzingdata.com/docs/) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md index 0ac7c1fa5c4..99a22d620ac 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/config.md @@ -19,7 +19,6 @@ import alias_table_config_example from '@site/static/images/integrations/data-vi import alias_table_select_example from '@site/static/images/integrations/data-visualization/grafana/alias_table_select_example.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 在 Grafana 中配置 ClickHouse 数据源 {#configuring-clickhouse-data-source-in-grafana} @@ -62,7 +61,6 @@ secureJsonData: 请注意,从 UI 中保存配置时会添加一个 `version` 属性。该属性表示配置保存时所使用的插件版本。 - ### HTTP 协议 {#http-protocol} 如果选择通过 HTTP 协议连接,将会显示更多设置。 @@ -79,7 +77,6 @@ jsonData: path: additional/path/example ``` - #### 自定义 HTTP 头 {#custom-http-headers} 可以为发送到服务器的请求添加自定义 HTTP 头。 @@ -106,7 +103,6 @@ secureJsonData: secureHttpHeaders.X-Example-Secure-Header: secure header value ``` - ## 其他配置 {#additional-settings} 以下附加配置为可选项。 @@ -125,7 +121,6 @@ jsonData: validateSql: false # 设置为 true 时,将在 SQL 编辑器中验证 SQL 语句。 ``` - ### OpenTelemetry {#opentelemetry} OpenTelemetry (OTel) 已与该插件深度集成。 @@ -164,7 +159,6 @@ jsonData: messageColumn: # 日志的消息/内容。 ``` - ### Traces {#traces} 为了加快[构建追踪查询](./query-builder.md#traces)的速度,可以为追踪查询设置默认数据库/数据表以及列。这样会在查询构建器中预加载一条可直接运行的追踪搜索查询,从而使在 Explore 页面中浏览可观测性数据更加高效。 @@ -201,7 +195,6 @@ jsonData: serviceTagsColumn: # 服务标签列。应为 map 类型。 ``` - ### 列别名 {#column-aliases} 列别名是一种便捷方式,可以使用不同的名称和类型来查询数据。 @@ -232,7 +225,6 @@ CREATE TABLE alias_example ( 更多信息请参阅 [ALIAS](/sql-reference/statements/create/table#alias) 列类型的文档。 - #### 列别名表 {#column-alias-tables} 默认情况下,Grafana 会根据 `DESC table` 命令的返回结果提供列建议。 @@ -275,7 +267,6 @@ INSERT INTO example_table_aliases (`alias`, `select`, `type`) VALUES 这两种类型的别名都可以用于执行复杂的类型转换或 JSON 字段提取。 - ## 所有 YAML 选项 {#all-yaml-options} 以下是该插件提供的全部 YAML 配置选项。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md index fed44f550e2..dbdcfb9c460 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/index.md @@ -22,7 +22,6 @@ import valid_ds from '@site/static/images/integrations/data-visualization/grafan import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 用于 Grafana 的 ClickHouse 数据源插件 {#clickhouse-data-source-plugin-for-grafana} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md index 77668ea181b..73ca9f4f937 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/grafana/query-builder.md @@ -20,7 +20,6 @@ import trace_id_in_logs from '@site/static/images/integrations/data-visualizatio import demo_data_links from '@site/static/images/integrations/data-visualization/grafana/demo_data_links.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 查询构建器 {#query-builder} @@ -37,8 +36,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - [追踪](#traces):针对搜索和查看追踪数据进行了优化。在配置了[默认值](./config.md#traces)的探索视图中效果最佳。 - [SQL 编辑器](#sql-editor):当你需要对查询进行完全控制时,可以使用 SQL 编辑器。在此模式下,可以执行任意 SQL 查询。 - - ## 查询类型 {#query-types} *Query Type* 设置会更改查询构建器的布局,以匹配正在构建的查询类型。 @@ -110,8 +107,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 如果数据集允许,请尝试通过将其设置为 `0` 来移除 `LIMIT` 子句。 ::: - - | Field | Description | |----|----| | Builder Mode | 简单查询会排除 Aggregates 和 Group By,而聚合查询会包含这些选项。 | @@ -164,8 +159,6 @@ Trace 查询类型支持 [data links](#data-links)。 此查询类型会在 Trace Search 模式下使用表格视图渲染数据,在 Trace ID 模式下使用 trace 面板渲染数据。 - - ## SQL 编辑器 {#sql-editor} 对于过于复杂而无法通过查询构建器完成的查询,你可以使用 SQL 编辑器。 @@ -180,8 +173,6 @@ Trace 查询类型支持 [data links](#data-links)。 - - ## 数据链接 {#data-links} Grafana 的 [data links](https://grafana.com/docs/grafana/latest/panels-visualizations/configure-data-links) @@ -220,8 +211,6 @@ Grafana 的 [data links](https://grafana.com/docs/grafana/latest/panels-visualiz - - ## 宏 {#macros} 宏是一种在查询中添加动态 SQL 的简单方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md index b7f2996d447..799f318fac9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/index.md @@ -8,8 +8,6 @@ description: '了解如何在 ClickHouse 中可视化数据' doc_type: 'guide' --- - - # 在 ClickHouse 中可视化数据 {#visualizing-data-in-clickhouse}
@@ -49,8 +47,6 @@ doc_type: 'guide' - [Tableau](./tableau/tableau-and-clickhouse.md) - [Zing Data](./community_integrations/zingdata-and-clickhouse.md) - - ## ClickHouse Cloud 与数据可视化工具的兼容性 {#clickhouse-cloud-compatibility-with-data-visualization-tools} | 工具 | 支持方式 | 已测试 | 有文档 | 备注 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md index 0e2df3c44af..309ac4399c5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/lightdash-and-clickhouse.md @@ -21,7 +21,6 @@ import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/curr import Image from '@theme/IdealImage'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Lightdash {#lightdash} @@ -32,8 +31,6 @@ Lightdash 是一个为现代数据团队构建的 **AI 优先 BI 平台**,将 这一合作将 **ClickHouse 的极速性能** 与 **Lightdash 的开发者体验**相结合,使得借助 AI 进行数据探索、可视化和洞察自动化变得前所未有地轻松。 - - ## 使用 Lightdash 和 ClickHouse 构建交互式仪表板 {#build-an-interactive-dashboard} 本指南将介绍如何使用 **Lightdash** 连接 **ClickHouse** 来探索 dbt 模型并构建交互式仪表板。 @@ -128,7 +125,6 @@ Lightdash 是一个为现代数据团队构建的 **AI 优先 BI 平台**,将 **探索**页面由五个主要区域组成: - 1. **维度和指标** — 所选表中的所有可用字段 2. **过滤器** — 限制查询返回的数据范围 3. **图表** — 将查询结果可视化 @@ -196,7 +192,6 @@ Lightdash 中的 **AI 代理**让数据探索真正实现自助服务。 - ## 了解更多 {#learn-more} 要进一步了解如何将 dbt 项目连接到 Lightdash,请访问 [Lightdash 文档 → ClickHouse 配置](https://docs.lightdash.com/get-started/setup-lightdash/connect-project#clickhouse?utm_source=clickhouse&utm_medium=partner&utm_campaign=integration_docs)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md index 56065848579..5e59e8e1429 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-and-clickhouse.md @@ -18,20 +18,15 @@ import looker_03 from '@site/static/images/integrations/data-visualization/looke import looker_04 from '@site/static/images/integrations/data-visualization/looker_04.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker {#looker} Looker 可以通过官方的 ClickHouse 数据源连接到 ClickHouse Cloud 或自托管的 ClickHouse 部署。 - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 创建 ClickHouse 数据源 {#2-create-a-clickhouse-data-source} 导航到 Admin -> Database -> Connections,然后单击右上角的 “Add Connection” 按钮。 @@ -56,8 +51,6 @@ Looker 可以通过官方的 ClickHouse 数据源连接到 ClickHouse Cloud 或 现在,您应该可以将 ClickHouse 数据源关联到 Looker 项目中。 - - ## 3. 已知限制 {#3-known-limitations} 1. 以下数据类型默认按字符串类型处理: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md index 192cd0dc4ab..c7b4dec6212 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/looker-studio-and-clickhouse.md @@ -23,25 +23,18 @@ import looker_studio_enable_mysql from '@site/static/images/integrations/data-vi import looker_studio_mysql_cloud from '@site/static/images/integrations/data-visualization/looker_studio_mysql_cloud.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Looker Studio {#looker-studio} Looker Studio 可以通过 MySQL 接口,使用 Google 官方提供的 MySQL 数据源连接到 ClickHouse。 - - ## ClickHouse Cloud 配置 {#clickhouse-cloud-setup} - - ## 本地部署 ClickHouse 服务器的设置 {#on-premise-clickhouse-server-setup} - - ## 将 Looker Studio 连接到 ClickHouse {#connecting-looker-studio-to-clickhouse} 首先,使用 Google 账号登录 https://lookerstudio.google.com,并创建一个新的 Data Source(数据源): @@ -77,8 +70,6 @@ Looker Studio 可以通过 MySQL 接口,使用 Google 官方提供的 MySQL 现在,你可以开始探索数据或创建新的报表了! - - ## 在 ClickHouse Cloud 中使用 Looker Studio {#using-looker-studio-with-clickhouse-cloud} 使用 ClickHouse Cloud 时,需要先启用 MySQL 接口。可以在连接对话框的 “MySQL” 选项卡中完成此操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md index 3171897b4ad..9776be08625 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/metabase-and-clickhouse.md @@ -24,15 +24,12 @@ import metabase_07 from '@site/static/images/integrations/data-visualization/met import metabase_08 from '@site/static/images/integrations/data-visualization/metabase_08.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # 将 Metabase 连接到 ClickHouse {#connecting-metabase-to-clickhouse} Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进行查询和分析。Metabase 是一个 Java 应用程序,只需下载 JAR 文件并使用 `java -jar metabase.jar` 运行即可。Metabase 通过 JDBC 驱动程序连接到 ClickHouse,你需要下载该驱动并将其放入 `plugins` 目录中: - - ## 目标 {#goal} 在本指南中,您将使用 Metabase 针对 ClickHouse 数据提出一些问题,并将答案进行可视化展示。其中一个结果如下所示: @@ -44,13 +41,9 @@ Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进 如果您目前没有可用的数据集,可以添加一个示例数据集。本指南使用 [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) 数据集,您可以选择使用该数据集。同一文档类别下还有其他几个示例可供参考。 ::: - - ## 1. 收集连接详细信息 {#1-gather-your-connection-details} - - ## 2. 下载用于 Metabase 的 ClickHouse 插件 {#2--download-the-clickhouse-plugin-for-metabase} 1. 如果还没有 `plugins` 文件夹,请在保存 `metabase.jar` 的目录下创建一个名为 `plugins` 的子文件夹。 @@ -63,8 +56,6 @@ Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进 5. 通过 http://hostname:3000 访问 Metabase。首次启动时,你会看到一个欢迎界面,并需要依次回答一系列问题。如果在此过程中提示你选择数据库,请选择 "**I'll add my data later**": - - ## 3. 将 Metabase 连接到 ClickHouse {#3--connect-metabase-to-clickhouse} 1. 点击右上角的齿轮图标并选择 **Admin Settings**,进入 Metabase 管理页面。 @@ -83,8 +74,6 @@ Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进 6. 点击 **Save** 按钮,Metabase 将扫描你的数据库以检测其中的表。 - - ## 4. 运行 SQL 查询 {#4-run-a-sql-query} 1. 点击右上角的 **Exit admin** 按钮退出 **Admin settings**。 @@ -97,8 +86,6 @@ Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进 - - ## 5. 创建问题 {#5-ask-a-question} 1. 点击 **+ New** 并选择 **Question**。请注意,您可以从选择数据库和数据表开始构建一个问题。例如,下面的问题是针对 `default` 数据库中名为 `uk_price_paid` 的表提问的。以下是一个简单的问题,用于计算大曼彻斯特郡各城镇的平均价格: @@ -113,8 +100,6 @@ Metabase 是一个易于使用的开源 UI 工具,可用于对你的数据进 - - ## 了解更多 {#learn-more} 通过查阅Metabase 文档,了解更多关于 Metabase 及如何构建仪表盘的信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md index ad322017b14..2c87ad46a34 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/omni-and-clickhouse.md @@ -13,21 +13,16 @@ import omni_01 from '@site/static/images/integrations/data-visualization/omni_01 import omni_02 from '@site/static/images/integrations/data-visualization/omni_02.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # Omni {#omni} Omni 可以通过官方 ClickHouse 数据源连接到 ClickHouse Cloud 或本地自建部署的 ClickHouse。 - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 创建 ClickHouse 数据源 {#2-create-a-clickhouse-data-source} 进入 Admin -> Connections,然后点击右上角的“Add Connection”按钮。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md index cad10cdfe0b..89c4ca343c3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/powerbi-and-clickhouse.md @@ -29,7 +29,6 @@ import powerbi_dsn_credentials from '@site/static/images/integrations/data-visua import powerbi_16 from '@site/static/images/integrations/data-visualization/powerbi_16.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # Power BI {#power-bi} @@ -50,8 +49,6 @@ Power BI 要求先在 Desktop 版本中创建仪表板,然后将其发布到 P * [从 ClickHouse 查询数据并在 Power BI Desktop 中进行可视化](#query-and-visualise-data) * [为 Power BI Service 设置本地数据网关](#power-bi-service) - - ## 先决条件 {#prerequisites} ### 安装 Power BI {#power-bi-installation} @@ -69,8 +66,6 @@ Power BI 要求先在 Desktop 版本中创建仪表板,然后将其发布到 P * Password - 该用户的密码 * Database - 要连接的实例上的数据库名称 - - ## Power BI 桌面版 {#power-bi-desktop} 要在 Power BI Desktop 中开始查询数据,你需要完成以下步骤: @@ -158,16 +153,12 @@ Power BI 要求先在 Desktop 版本中创建仪表板,然后将其发布到 P 导入完成后,你的 ClickHouse 数据将在 Power BI 中像平常一样可供访问和使用。
- - ## Power BI 服务 {#power-bi-service} 若要使用 Microsoft Power BI 服务,您需要创建一个[本地数据网关](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-onprem)。 关于如何配置自定义连接器的更多信息,请参阅 Microsoft 关于[在本地数据网关中使用自定义数据连接器](https://learn.microsoft.com/en-us/power-bi/connect-data/service-gateway-custom-connectors)的文档。 - - ## ODBC 驱动程序(仅导入) {#odbc-driver-import-only} 我们推荐使用采用 DirectQuery 的 ClickHouse Connector。 @@ -236,8 +227,6 @@ Power BI 要求先在 Desktop 版本中创建仪表板,然后将其发布到 P 导入完成后,你的 ClickHouse 数据就可以像往常一样在 Power BI 中进行访问了。 - - ## 已知限制 {#known-limitations} ### UInt64 {#uint64} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md index 4f160ed651b..a60a75cb33e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/quicksight-and-clickhouse.md @@ -21,15 +21,12 @@ import quicksight_06 from '@site/static/images/integrations/data-visualization/q import quicksight_07 from '@site/static/images/integrations/data-visualization/quicksight_07.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # QuickSight {#quicksight} QuickSight 可以通过官方 MySQL 数据源,并使用 Direct Query 模式,经由 MySQL 接口连接到本地部署的 ClickHouse 集群(23.11+)。 - - ## 本地部署 ClickHouse 服务器的设置 {#on-premise-clickhouse-server-setup} 请参阅[官方文档](/interfaces/mysql),了解如何设置启用了 MySQL 接口的 ClickHouse 服务器。 @@ -122,7 +119,6 @@ mysql> show databases; Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. ``` - ## 将 QuickSight 连接到 ClickHouse {#connecting-quicksight-to-clickhouse} 首先,访问 [https://quicksight.aws.amazon.com](https://quicksight.aws.amazon.com),进入 Datasets,然后单击 "New dataset"(新建数据集): @@ -163,8 +159,6 @@ Read 4 rows, 603.00 B in 0.00156 sec., 2564 rows/sec., 377.48 KiB/sec. 现在,您可以继续发布数据集并创建新的可视化了! - - ## 已知限制 {#known-limitations} - SPICE 导入功能不能正常工作;请改用 Direct Query 模式。请参见 [#58553](https://github.com/ClickHouse/ClickHouse/issues/58553)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md index 1bca8d20e43..daeaecee37c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/splunk-and-clickhouse.md @@ -21,7 +21,6 @@ import splunk_9 from '@site/static/images/integrations/splunk/splunk-9.png'; import splunk_10 from '@site/static/images/integrations/splunk/splunk-10.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 将 Splunk 连接到 ClickHouse {#connecting-splunk-to-clickhouse} @@ -36,8 +35,6 @@ Splunk 是一款广泛应用于安全和可观测性的产品,同时也是一 此集成的理想用例是:当使用 ClickHouse 来处理诸如 NetFlow、Avro 或 Protobuf 二进制数据、DNS、VPC 流日志,以及其他 OTel 日志等大规模数据源时,可将这些数据在 Splunk 中与团队共享,用于搜索和构建仪表盘。通过这种方式,数据不会被摄取到 Splunk 的索引层,而是类似于其他可视化集成(如 [Metabase](https://www.metabase.com/) 或 [Superset](https://superset.apache.org/))一样,直接从 ClickHouse 中进行查询。 - - ## 目标​ {#goal} 在本指南中,我们将使用 ClickHouse JDBC 驱动程序将 ClickHouse 连接到 Splunk。我们会安装本地版本的 Splunk Enterprise,但不会对任何数据进行索引。相反,我们只通过 DB Connect 查询引擎使用搜索功能。 @@ -50,8 +47,6 @@ Splunk 是一款广泛应用于安全和可观测性的产品,同时也是一 本指南使用了 [New York City Taxi 数据集](/getting-started/example-datasets/nyc-taxi)。你还可以在[我们的文档](http://localhost:3000/docs/getting-started/example-datasets)中找到许多其他可用的数据集。 ::: - - ## 前提条件 {#prerequisites} 在开始之前,您需要: @@ -61,8 +56,6 @@ Splunk 是一款广泛应用于安全和可观测性的产品,同时也是一 - 对运行 Splunk Enterprise 的操作系统实例具有管理员权限或 SSH 访问权限 - ClickHouse 连接信息(如果您使用 ClickHouse Cloud,请参阅[此处](/integrations/metabase#1-gather-your-connection-details)) - - ## 在 Splunk Enterprise 上安装并配置 DB Connect {#install-and-configure-db-connect-on-splunk-enterprise} 必须先在 Splunk Enterprise 实例上安装 Java Runtime Environment。若使用 Docker,可运行命令 `microdnf install java-11-openjdk`。 @@ -81,8 +74,6 @@ Splunk 是一款广泛应用于安全和可观测性的产品,同时也是一 - - ## 为 ClickHouse 配置 JDBC {#configure-jdbc-for-clickhouse} 将 [ClickHouse JDBC 驱动程序](https://github.com/ClickHouse/clickhouse-java) 下载到 DB Connect Drivers 文件夹,例如: @@ -111,7 +102,6 @@ ui_default_catalog = $database$ - ## 将 Splunk 搜索连接到 ClickHouse {#connect-splunk-search-to-clickhouse} 导航到 DB Connect App Configuration -> Databases -> Identities,在其中为你的 ClickHouse 创建一个 Identity。 @@ -132,8 +122,6 @@ ui_default_catalog = $database$ 如果出现错误,请确保已经将 Splunk 实例的 IP 地址添加到 ClickHouse Cloud 的 IP 访问列表中。更多信息参见[文档](/cloud/security/setting-ip-filters)。 ::: - - ## 运行 SQL 查询 {#run-a-sql-query} 现在我们将运行一个 SQL 查询,以验证一切工作正常。 @@ -148,8 +136,6 @@ ui_default_catalog = $database$ 如果查询成功,你应该会看到结果。 - - ## 创建仪表板 {#create-a-dashboard} 现在来创建一个仪表板,结合使用 SQL 和功能强大的 Splunk Processing Language(SPL)。 @@ -194,7 +180,6 @@ ORDER BY year, count(*) DESC; " connection="chc" - ## 时间序列数据 {#time-series-data} Splunk 提供了数百个内置函数,供仪表板用于时间序列数据的可视化和展示。此示例将结合 SQL 与 SPL,创建一个可在 Splunk 中处理时间序列数据的查询。 @@ -209,7 +194,6 @@ FROM "demo"."conn" WHERE time >= now() - interval 1 HOURS" connection="chc" | sort - duration: ``` - ## 了解更多 {#learn-more} 如果您想进一步了解 Splunk DB Connect 以及如何构建仪表板,请访问 [Splunk 文档](https://docs.splunk.com/Documentation)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md index 3ede785bf6a..0829e006bb4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/superset-and-clickhouse.md @@ -28,15 +28,12 @@ import superset_11 from '@site/static/images/integrations/data-visualization/sup import superset_12 from '@site/static/images/integrations/data-visualization/superset_12.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 将 Superset 连接到 ClickHouse {#connect-superset-to-clickhouse} Apache Superset 是一个用 Python 编写的开源数据探索和可视化平台。Superset 使用由 ClickHouse 提供的 Python 驱动程序连接到 ClickHouse。让我们看看它是如何工作的…… - - ## 目标 {#goal} 在本指南中,你将使用 ClickHouse 数据库中的数据,在 Superset 中构建一个仪表板。仪表板将如下所示: @@ -48,13 +45,9 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 如果你还没有可用的数据集,可以添加一个示例数据集。本指南使用 [UK Price Paid](/getting-started/example-datasets/uk-price-paid.md) 数据集,你可以选用它。在同一文档分类下还有多个其他示例数据集可供查看。 ::: - - ## 1. 收集连接详细信息 {#1-gather-your-connection-details} - - ## 2. 安装驱动程序 {#2-install-the-driver} 1. Superset 使用 `clickhouse-connect` 驱动程序连接到 ClickHouse。有关 `clickhouse-connect` 的详细信息,请参阅 https://pypi.org/project/clickhouse-connect/,可以通过以下命令进行安装: @@ -65,8 +58,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 2. 启动(或重新启动)Superset。 - - ## 3. 将 Superset 连接到 ClickHouse {#3-connect-superset-to-clickhouse} 1. 在 Superset 中,从顶部菜单中选择 **Data**,然后在下拉菜单中选择 **Databases**。点击 **+ Database** 按钮添加一个新数据库: @@ -89,8 +80,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 4. 依次点击 **CONNECT** 和 **FINISH** 按钮完成向导设置,此时你应该可以在数据库列表中看到你的数据库。 - - ## 4. 添加数据集 {#4-add-a-dataset} 1. 要在 Superset 中与 ClickHouse 数据交互,您需要先定义一个**数据集(_dataset_)**。在 Superset 顶部菜单中选择 **Data**,然后在下拉菜单中选择 **Datasets**。 @@ -102,8 +91,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; 3. 点击对话窗口底部的 **ADD** 按钮,您的表就会出现在数据集列表中。现在您已经可以构建仪表板(dashboard)并分析 ClickHouse 数据了! - - ## 5. 在 Superset 中创建图表和仪表板 {#5--creating-charts-and-a-dashboard-in-superset} 如果您已经熟悉 Superset,那么接下来的内容会让您觉得非常熟悉。如果您是第一次使用 Superset,那么……它和很多其他优秀的可视化工具类似——上手很快,但各种细节和使用技巧需要在您持续使用工具的过程中逐步掌握。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md index 002bc810fc4..813b676e26f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-and-clickhouse.md @@ -29,7 +29,6 @@ import tableau_workbook6 from '@site/static/images/integrations/data-visualizati import tableau_workbook7 from '@site/static/images/integrations/data-visualization/tableau_workbook7.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 将 Tableau 连接到 ClickHouse {#connecting-tableau-to-clickhouse} @@ -43,8 +42,6 @@ ClickHouse 提供了官方 Tableau 连接器,该连接器已发布在 - - ## 使用前的准备工作 {#setup-required-prior-usage} 1. 收集连接详情 @@ -64,8 +61,6 @@ ClickHouse 提供了官方 Tableau 连接器,该连接器已发布在 - Windows: `C:\Program Files\Tableau\Drivers` 5. 在 Tableau 中配置 ClickHouse 数据源,并开始构建数据可视化报表! - - ## 在 Tableau 中配置 ClickHouse 数据源 {#configure-a-clickhouse-data-source-in-tableau} 现在你已经安装并设置好了 `clickhouse-jdbc` 驱动,接下来将介绍如何在 Tableau 中定义一个数据源,用于连接 ClickHouse 中的 **TPCD** 数据库。 @@ -124,8 +119,6 @@ ClickHouse 提供了官方 Tableau 连接器,该连接器已发布在 现在你已经可以在 Tableau 中开始构建可视化报表了! - - ## 在 Tableau 中构建可视化 {#building-visualizations-in-tableau} 现在我们已经在 Tableau 中配置好了 ClickHouse 数据源,接下来就可以对这些数据进行可视化…… @@ -185,8 +178,6 @@ ClickHouse 提供了官方 Tableau 连接器,该连接器已发布在 恭喜!您已经成功将 Tableau 连接到了 ClickHouse,并为分析和可视化您的 ClickHouse 数据 打开了无限可能。 - - ## 手动安装连接器 {#install-the-connector-manually} 如果你使用的是未默认包含该连接器的旧版本 Tableau Desktop,可以按照以下步骤手动安装: @@ -197,19 +188,13 @@ ClickHouse 提供了官方 Tableau 连接器,该连接器已发布在 * Windows: `C:\Users\[Windows User]\Documents\My Tableau Repository\Connectors` 3. 重启 Tableau Desktop,如果安装成功,你会在 `New Data Source` 部分看到该连接器。 - - ## 连接和分析技巧 {#connection-and-analysis-tips} 如需获取有关优化 Tableau-ClickHouse 集成的更多指导, 请访问[连接技巧](/integrations/tableau/connection-tips)和[分析技巧](/integrations/tableau/analysis-tips)。 - - ## 测试 {#tests} 该连接器正在使用 [TDVT 框架](https://tableau.github.io/connector-plugin-sdk/docs/tdvt) 进行测试,目前测试覆盖率为 97%。 - - ## 摘要 {#summary} 可以使用通用的 ClickHouse ODBC/JDBC 驱动将 Tableau 连接到 ClickHouse。不过,本连接器可以简化连接配置过程。如果在使用该连接器时遇到任何问题,欢迎前往 GitHub 反馈。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md index 4d5a180c287..5140c238653 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-connection-tips.md @@ -11,13 +11,10 @@ doc_type: 'guide' import Image from '@theme/IdealImage'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 连接建议 {#connection-tips} - - ## 初始 SQL 选项卡 {#initial-sql-tab} 如果在“高级”选项卡中勾选了 *Set Session ID* 复选框(默认勾选),就可以使用以下方式自由设置会话级[设置](/operations/settings/settings/): @@ -26,7 +23,6 @@ import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; SET my_setting=value; ``` - ## 高级选项卡 {#advanced-tab} 在 99% 的情况下你不需要使用高级选项卡,只有在剩下的 1% 情况下才可能需要使用以下设置: @@ -38,8 +34,6 @@ SET my_setting=value; ``` 关于类型映射的更多信息请参阅相应章节。 - - * **JDBC Driver URL Parameters**。您可以在此字段中传递其余的[驱动参数](https://github.com/ClickHouse/clickhouse-jdbc#configuration),例如 `jdbcCompliance`。请注意,参数值必须以 URL 编码格式传递;如果同时通过此字段以及 Advanced 选项卡前两个字段传递了 `custom_http_params` 或 `typeMappings`,则以 Advanced 选项卡中前两个字段的值为准。 * **Set Session ID** 复选框。用于在 Initial SQL 选项卡中设置会话级别的设置,会生成一个带时间戳和伪随机数的 `session_id`,格式为 `"tableau-jdbc-connector-*{timestamp}*-*{number}*"`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md index 52dd94e75d2..118985188c8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/data-visualization/tableau/tableau-online-and-clickhouse.md @@ -21,23 +21,16 @@ import tableau_desktop_03 from '@site/static/images/integrations/data-visualizat import tableau_desktop_04 from '@site/static/images/integrations/data-visualization/tableau_desktop_04.png'; import tableau_desktop_05 from '@site/static/images/integrations/data-visualization/tableau_desktop_05.png'; - # Tableau Online {#tableau-online} Tableau Online 可以通过官方 MySQL 数据源,使用 MySQL 接口连接到 ClickHouse Cloud 或本地部署的 ClickHouse 环境。 - - ## ClickHouse Cloud 配置 {#clickhouse-cloud-setup} - - ## 本地部署 ClickHouse 服务器安装与配置 {#on-premise-clickhouse-server-setup} - - ## 将 Tableau Online 连接到 ClickHouse(本地部署且不使用 SSL) {#connecting-tableau-online-to-clickhouse-on-premise-without-ssl} 登录您的 Tableau Cloud 站点并添加一个新的 Published Data Source(已发布数据源)。 @@ -64,8 +57,6 @@ Tableau Online 会自动扫描数据库并提供可用表的列表。将所需 注意:如果您想将 Tableau Online 与 Tableau Desktop 配合使用,并在二者之间共享 ClickHouse 数据集,请确保在 Tableau Desktop 中同样使用默认的 MySQL 连接器,并按照在 Data Source 下拉框中选择 MySQL 时显示的安装指南进行配置,该指南位于[此处](https://www.tableau.com/support/drivers)。如果您使用的是 M1 Mac,请查看[此故障排除帖](https://community.tableau.com/s/question/0D58b0000Ar6OhvCQE/unable-to-install-mysql-driver-for-m1-mac) 以获取驱动安装的替代方案。 - - ## 将 Tableau Online 连接到 ClickHouse(云端或本地部署,使用 SSL) {#connecting-tableau-online-to-clickhouse-cloud-or-on-premise-setup-with-ssl} 由于无法在 Tableau Online 的 MySQL 连接设置向导中提供 SSL 证书, @@ -108,8 +99,6 @@ ClickHouse Cloud 的 SSL 证书由 [Let's Encrypt](https://letsencrypt.org/certi 最后,点击 "Publish",你嵌入了凭据的数据源会在 Tableau Online 中自动打开。 - - ## 已知限制(ClickHouse 23.11) {#known-limitations-clickhouse-2311} 所有已知限制均已在 ClickHouse `23.11` 中修复。如果您遇到任何其他不兼容问题,请随时[联系我们](https://clickhouse.com/company/contact)或创建一个[新 issue](https://github.com/ClickHouse/ClickHouse/issues)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md index f82a7347111..def77d945c6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/csharp.md @@ -55,7 +55,6 @@ Install-Package ClickHouse.Driver *** - ## 快速入门 {#quick-start} ```csharp @@ -83,7 +82,6 @@ using (var connection = new ClickHouseConnection("Host=my.clickhouse")) *** - ## 使用方法 {#usage} ### 连接字符串参数 {#connection-string} @@ -154,7 +152,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### 插入数据 {#inserting-data} 使用参数化查询插入数据: @@ -178,7 +175,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### 批量插入 {#bulk-insert} 使用 `ClickHouseBulkCopy` 时需要: @@ -221,7 +217,6 @@ Console.WriteLine($"Rows written: {bulkCopy.RowsWritten}"); *** - ### 执行 SELECT 查询 {#performing-select-queries} 执行 SELECT 查询并处理其结果: @@ -249,7 +244,6 @@ using (var connection = new ClickHouseConnection(connectionString)) *** - ### 原始数据流 {#raw-streaming} ```csharp @@ -263,7 +257,6 @@ var json = reader.ReadToEnd(); *** - ### 嵌套列支持 {#nested-columns} ClickHouse 嵌套类型(`Nested(...)`)可以按照数组语义进行读写。 @@ -289,7 +282,6 @@ await bulkCopy.WriteToServerAsync(new[] { row1, row2 }); *** - ### AggregateFunction 列 {#aggregatefunction-columns} 类型为 `AggregateFunction(...)` 的列不能直接进行查询或插入操作。 @@ -308,7 +300,6 @@ SELECT uniqMerge(c) FROM t; *** - ### SQL 参数 {#sql-parameters} 要在查询中传递参数,必须使用 ClickHouse 的参数格式,形式如下: @@ -339,7 +330,6 @@ INSERT INTO table VALUES ({val1:Int32}, {val2:Array(UInt8)}) *** - ## 支持的数据类型 {#supported-data-types} `ClickHouse.Driver` 支持以下 ClickHouse 数据类型及其对应的 .NET 类型映射: @@ -455,7 +445,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### 使用 appsettings.json {#logging-appsettings-config} 可以使用标准的 .NET 配置来配置日志级别: @@ -486,7 +475,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - #### 使用内存配置 {#logging-inmemory-config} 你也可以在代码中按类别配置日志的详细程度: @@ -523,7 +511,6 @@ await using var connection = new ClickHouseConnection(settings); await connection.OpenAsync(); ``` - ### 分类与发射源 {#logging-categories} 驱动程序使用专用日志分类,便于按组件精细调整日志级别: @@ -558,7 +545,6 @@ await connection.OpenAsync(); * 连接打开/关闭事件 * 会话 ID 跟踪 - ### 调试模式:网络跟踪与诊断 {#logging-debugmode} 为帮助诊断网络问题,驱动程序库提供了一个辅助工具,可启用对 .NET 网络内部机制的底层跟踪。要启用它,必须传入一个 LoggerFactory,并将日志级别设置为 Trace,同时将 EnableDebugMode 设置为 true(或者通过 `ClickHouse.Driver.Diagnostic.TraceHelper` 类手动启用)。警告:这会生成极其冗长的日志,并影响性能。不建议在生产环境中启用调试模式。 @@ -580,7 +566,6 @@ var settings = new ClickHouseClientSettings() *** - ### ORM & Dapper 支持 {#orm-support} `ClickHouse.Driver` 支持 Dapper(有一定限制)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md index fc337d36d4f..b390de18576 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/go/index.md @@ -13,7 +13,6 @@ integration: import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_native.md'; - # ClickHouse Go {#clickhouse-go} ## 一个简单示例 {#a-simple-example} @@ -32,7 +31,6 @@ cd clickhouse-golang-example go mod init clickhouse-golang-example ``` - ### 复制一些示例代码 {#copy-in-some-sample-code} 将此代码复制到 `clickhouse-golang-example` 目录中,保存为 `main.go`。 @@ -113,14 +111,12 @@ func connect() (driver.Conn, error) { } ``` - ### 运行 go mod tidy {#run-go-mod-tidy} ```bash go mod tidy ``` - ### 设置连接信息 {#set-your-connection-details} 此前您已经获取了连接信息。现在在 `main.go` 的 `connect()` 函数中进行设置: @@ -141,7 +137,6 @@ func connect() (driver.Conn, error) { }, ``` - ### 运行示例 {#run-the-example} ```bash @@ -156,7 +151,6 @@ go run . 2023/03/06 14:18:33 name: hourly_data, uuid: a4e36bd4-1e82-45b3-be77-74a0fe65c52b ``` - ### 进一步了解 {#learn-more} 本类别其余文档将详细介绍 ClickHouse Go 客户端。 @@ -248,7 +242,6 @@ go run main.go ``` - ### 版本管理与兼容性 {#versioning--compatibility} 该客户端的发布独立于 ClickHouse。2.x 是当前开发中的主版本分支。所有 2.x 版本之间都应保持相互兼容。 @@ -297,7 +290,6 @@ fmt.Println(v) **在后续所有示例中,除非特别说明,否则都假定 ClickHouse 的 `conn` 变量已创建并可用。** - #### 连接设置 {#connection-settings} 在建立连接时,可以使用一个 Options 结构体来控制客户端行为。可用的设置如下: @@ -355,7 +347,6 @@ if err != nil { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/connect_settings.go) - #### 连接池 {#connection-pooling} 客户端维护一个连接池,并根据需要在查询之间复用这些连接。任意时刻最多会使用 `MaxOpenConns` 个连接,连接池的最大容量由 `MaxIdleConns` 控制。客户端在每次执行查询时都会从池中获取一个连接,在查询完成后将其归还到池中以便复用。单个连接会在整个批处理的生命周期内被占用,并在调用 `Send()` 后释放。 @@ -433,7 +424,6 @@ v, err := conn.ServerVersion() 如果需要额外的 TLS 参数,应用代码应在 `tls.Config` 结构体中设置相应字段。这可以包括指定密码套件、强制使用特定 TLS 版本(如 1.2 或 1.3)、添加内部 CA 证书链、在 ClickHouse 服务器要求时添加客户端证书(及其私钥),以及大多数用于更高级安全配置的其他选项。 - ### 认证 {#authentication} 在连接配置中通过指定 Auth 结构体来设置用户名和密码。 @@ -456,7 +446,6 @@ v, err := conn.ServerVersion() [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/auth.go) - ### 连接到多个节点 {#connecting-to-multiple-nodes} 可以通过 `Addr` 结构指定多个地址。 @@ -510,7 +499,6 @@ if err != nil { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/1c0d81d0b1388dbb9e09209e535667df212f4ae4/examples/clickhouse_api/multi_host.go#L50-L67) - ### 执行 {#execution} 可以通过 `Exec` 方法执行任意语句。这对于 DDL 和简单语句非常有用,但不应将其用于大批量插入或循环执行查询。 @@ -533,7 +521,6 @@ conn.Exec(context.Background(), "INSERT INTO example VALUES (1, 'test-1')") 注意可以将 Context 传递给查询。这可以用于传入特定的查询级别设置——参见[使用 Context](#using-context)。 - ### 批量插入 {#batch-insert} 为了插入大量行,客户端提供了批处理语义。需要先准备一个批处理对象,然后可以向其中追加多行数据。最后通过 `Send()` 方法发送该批处理。在执行 `Send` 之前,批处理会保存在内存中。 @@ -626,7 +613,6 @@ return batch.Send() 若要查看每种列类型所支持的 Go 类型的完整说明,请参阅 [类型转换](#type-conversions)。 - ### 查询行 {#querying-rows} 用户可以使用 `QueryRow` 方法查询单行,或通过 `Query` 获取用于遍历结果集的游标。前者接收一个用于存放结果数据的目标变量,而后者则需要对每一行调用 `Scan`。 @@ -677,7 +663,6 @@ return rows.Err() 最后,请注意可以向 `Query` 和 `QueryRow` 方法传入 `Context`。这可用于配置查询级别的设置——更多详情请参阅 [使用 Context](#using-context)。 - ### 异步插入 {#async-insert} 支持通过 Async 方法进行异步插入。它允许用户指定客户端是应等待服务器完成插入操作,还是在服务器接收数据后立即返回响应。这实际上控制了参数 [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert)。 @@ -717,7 +702,6 @@ for i := 0; i < 100; i++ { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/async.go) - ### 列式插入 {#columnar-insert} 可以按列格式执行插入操作。如果数据本身已经按这种列式结构组织,则无需再转换为行格式,从而带来性能优势。 @@ -759,7 +743,6 @@ return batch.Send() [完整示例代码](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/columnar_insert.go) - ### 使用结构体 {#using-structs} 对用户而言,Golang 的结构体为 ClickHouse 中的一行数据提供了逻辑表示。为此,原生接口提供了多种便捷函数来实现这一点。 @@ -786,7 +769,6 @@ for _, v := range result { [完整示例代码](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/select_struct.go) - #### 扫描结构体 {#scan-struct} `ScanStruct` 允许将查询结果中的单行 `Row` 映射到一个结构体中。 @@ -803,7 +785,6 @@ if err := conn.QueryRow(context.Background(), "SELECT Col1, COUNT() AS count FRO [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/scan_struct.go) - #### 追加 struct {#append-struct} `AppendStruct` 允许将一个 struct 追加到已有的[批次](#batch-insert)中,并将其视为一整行。要求该 struct 的字段在名称和类型上都与表的列一一对应。虽然所有列都必须有对应的 struct 字段,但某些 struct 字段可能没有对应的列表达形式。这些字段将会被直接忽略。 @@ -831,7 +812,6 @@ for i := 0; i < 1_000; i++ { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/append_struct.go) - ### 类型转换 {#type-conversions} 该客户端在接受用于插入和响应编组(marshaling)的变量类型方面,力求尽可能灵活。大多数情况下,ClickHouse 列类型都存在等价的 Golang 类型,例如,[UInt64](/sql-reference/data-types/int-uint/) 对应 [uint64](https://pkg.go.dev/builtin#uint64)。这些逻辑映射应始终得到支持。用户可能希望使用某些变量类型,只要先对变量或接收的数据进行转换,就可以用于插入列或接收响应。客户端旨在透明地支持这些转换,从而使用户无需在插入前为精确对齐而显式转换数据,并在查询时提供灵活的编组能力。此类透明转换不允许出现精度损失。例如,`uint32` 不能用于从 `UInt64` 列接收数据。反之,只要满足格式要求,字符串就可以插入到 `DateTime64` 列中。 @@ -900,7 +880,6 @@ rows.Close() [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/array.go) - #### Map {#map} `Map` 应作为 Go 语言的 `map` 插入,其键和值必须符合[前面](#type-conversions)定义的类型规则。 @@ -946,7 +925,6 @@ rows.Close() [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/map.go) - #### Tuples {#tuples} Tuple 表示由任意数量的列组成的一组数据。列可以显式命名,也可以只指定类型,例如: @@ -1008,7 +986,6 @@ fmt.Printf("row: col1=%v, col2=%v, col3=%v\n", col1, col2, col3) 注意:支持带类型的切片和映射,前提是命名元组中提供的子列类型都相同。 - #### 嵌套(Nested) {#nested} 嵌套字段等价于一个具名 Tuple 的数组。其用法取决于用户是否将 [flatten_nested](/operations/settings/settings#flatten_nested) 设置为 1 或 0。 @@ -1117,7 +1094,6 @@ rows.Close() 如果 `flatten_nested` 使用默认值 1,嵌套列会被扁平化为多个独立数组。这要求在插入和查询时使用嵌套切片。尽管任意层级的嵌套在实践中可能可行,但这并未得到官方支持。 - ```go conn, err := GetNativeConnection(nil, nil, nil) if err != nil { @@ -1187,7 +1163,6 @@ if err := batch.Send(); err != nil { 由于接口更为简洁且对嵌套提供了官方支持,我们推荐使用 `flatten_nested=0`。 - #### Geo 类型 {#geo-types} 该客户端支持 Geo 类型 Point、Ring、Polygon 和 MultiPolygon。这些字段在 Go 语言中使用包 [github.com/paulmach/orb](https://github.com/paulmach/orb) 来表示。 @@ -1271,7 +1246,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&point, &ring, &polygo [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/geo.go) - #### UUID {#uuid} UUID 类型由 [github.com/google/uuid](https://github.com/google/uuid) 包提供支持。用户也可以将 UUID 作为字符串发送并进行编组(marshal),或使用任意实现了 `sql.Scanner` 或 `Stringify` 的类型。 @@ -1317,7 +1291,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2); err != [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/uuid.go) - #### Decimal {#decimal} `Decimal` 类型由 [github.com/shopspring/decimal](https://github.com/shopspring/decimal) 包支持。 @@ -1371,7 +1344,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v\n", col1, col2, col3, co [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/decimal.go) - #### Nullable {#nullable} Go 中的 Nil 值表示 ClickHouse 的 NULL。只有当字段被声明为 Nullable 时才能使用该值。在插入时,对于同一列的普通版本和 Nullable 版本都可以传入 Nil。对于前者(非 Nullable 列),将持久化该类型的默认值,例如 string 类型会存储为空字符串;对于后者(Nullable 版本),将在 ClickHouse 中存储 NULL 值。 @@ -1426,7 +1398,6 @@ if err = conn.QueryRow(ctx, "SELECT * FROM example").Scan(&col1, &col2, &col3, & 客户端还支持 `sql.Null*` 类型,例如 `sql.NullInt64`。这些类型与其对应的 ClickHouse 类型兼容。 - #### 大整数 - Int128、Int256、UInt128、UInt256 {#big-ints---int128-int256-uint128-uint256} 大于 64 位的数值类型使用 Go 语言原生的 [big](https://pkg.go.dev/math/big) 包来表示。 @@ -1497,7 +1468,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v, col6=%v, col7=%v\n", co [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/big_int.go) - ### 压缩 {#compression} 对压缩算法的支持取决于所使用的底层协议。对于原生协议,客户端支持 `LZ4` 和 `ZSTD` 压缩。压缩仅在块级别执行。可以通过在连接中包含 `Compression` 配置来启用压缩。 @@ -1547,7 +1517,6 @@ if err := batch.Send(); err != nil { 如果通过 HTTP 使用标准接口,还可以使用其他压缩方式。更多信息请参见 [database/sql API - Compression](#compression)。 - ### 参数绑定 {#parameter-binding} 该客户端在 `Exec`、`Query` 和 `QueryRow` 方法中支持参数绑定。如下面的示例所示,支持使用命名参数、编号参数以及位置参数。以下是这些用法的示例。 @@ -1576,7 +1545,6 @@ fmt.Printf("命名绑定计数: %d\n", count) [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind.go) - #### 特殊情况 {#special-cases} 默认情况下,当切片作为查询参数传入时,会被展开为以逗号分隔的值列表。如果用户需要将一组值以方括号 `[ ]` 包裹的形式注入,则应使用 `ArraySet`。 @@ -1616,7 +1584,6 @@ fmt.Printf("命名日期计数: %d\n", count) [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/bind_special.go) - ### 使用 context {#using-context} Go 的 context 提供了一种在 API 边界之间传递截止时间、取消信号及其他请求作用域值的机制。连接上的所有方法都将 context 作为其第一个参数。虽然前面的示例使用的是 `context.Background()`,但用户可以利用这一能力来传递设置和截止时间,并取消查询。 @@ -1717,7 +1684,6 @@ for i := 1; i <= 6; i++ { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/context.go) - ### 进度 / Profile / 日志信息 {#progressprofilelog-information} 在查询中可以请求 Progress、Profile 和 Log 信息。Progress 信息会报告在 ClickHouse 中已读取和处理的行数和字节数等统计数据。相比之下,Profile 信息会提供返回给客户端的数据摘要,包括未压缩字节数、行数和数据块数量等总计信息。最后,Log 信息会提供线程相关统计信息,例如内存使用情况和数据处理速度。 @@ -1749,7 +1715,6 @@ rows.Close() [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/progress.go) - ### 动态扫描 {#dynamic-scanning} 在某些情况下,用户需要读取一些表,但事先并不知道这些表的模式(schema)或返回字段的类型。这在执行临时数据分析或编写通用工具时非常常见。为此,可以在查询结果中获取列类型信息。可以将这些信息与 Go 的反射机制结合使用,在运行时创建类型正确的变量实例,并将其传递给 Scan。 @@ -1788,7 +1753,6 @@ for rows.Next() { [完整示例代码](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/dynamic_scan_types.go) - ### 外部表 {#external-tables} [External tables](/engines/table-engines/special/external-data/) 允许客户端在执行 SELECT 查询时向 ClickHouse 发送数据。该数据会被放入一个临时表中,并可在查询本身中用于计算。 @@ -1855,7 +1819,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/clickhouse_api/external_data.go) - ### OpenTelemetry {#open-telemetry} ClickHouse 允许在原生协议中传递[跟踪上下文](/operations/opentelemetry/)。客户端支持通过函数 `clickhouse.withSpan` 创建一个 Span,并通过 Context 传递,从而实现这一点。 @@ -1878,7 +1841,6 @@ fmt.Printf("count: %d\n", count) 关于如何使用链路追踪的完整说明,请参见 [OpenTelemetry 支持](/operations/opentelemetry/)。 - ## Database/SQL API {#databasesql-api} `database/sql` 或“标准”API 允许用户在应用代码应与底层数据库解耦、只依赖统一标准接口的场景下使用该客户端。这样做的代价是增加了额外的抽象层和间接层,并引入了一些不一定与 ClickHouse 完全契合的基础原语。但在需要通过工具连接多个数据库的场景中,这些成本通常是可以接受的。 @@ -1927,7 +1889,6 @@ func ConnectDSN() error { **在后续所有示例中,除非特别说明,我们都假定已创建并可以使用名为 `conn` 的 ClickHouse 连接变量。** - #### 连接设置 {#connection-settings-1} 可以在 DSN 字符串中传递以下参数: @@ -1966,7 +1927,6 @@ func ConnectSettings() error { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_settings.go) - #### 连接池 {#connection-pooling-1} 用户可以按照[连接到多个节点](#connecting-to-multiple-nodes)中的说明,控制所提供节点地址列表的使用方式。不过,按照设计,连接管理和连接池功能由 `sql.DB` 负责处理。 @@ -2008,7 +1968,6 @@ func ConnectDSNHTTP() error { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/connect_http.go) - #### 连接到多个节点 {#connecting-to-multiple-nodes-1} 如果使用 `OpenDB`,请使用与 ClickHouse API 相同的选项配置方式连接到多个主机,并可选地指定 `ConnOpenStrategy`。 @@ -2056,7 +2015,6 @@ func MultiStdHostDSN() error { [完整示例代码](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/multi_host.go) - ### 使用 TLS {#using-tls-1} 如果使用 DSN 连接字符串,可以通过参数 `secure=true` 启用 SSL。`OpenDB` 方法采用与 [TLS 原生 API](#using-tls) 相同的方式,依赖于提供一个非 nil 的 TLS 结构体。虽然 DSN 连接字符串支持参数 `skip_verify` 以跳过 SSL 校验,但对于更高级的 TLS 配置,必须使用 `OpenDB` 方法——因为它允许传入相应的配置。 @@ -2110,7 +2068,6 @@ func ConnectDSNSSL() error { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/ssl.go) - ### 身份验证 {#authentication-1} 如果使用 `OpenDB`,可以通过常规选项传入身份验证信息。对于基于 DSN 的连接,可以在连接字符串中提供用户名和密码——既可以作为参数附加在其后,也可以作为编码在地址中的凭证。 @@ -2151,7 +2108,6 @@ func ConnectDSNAuth() error { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/auth.go) - ### 执行 {#execution-1} 获取连接后,用户可以使用 Exec 方法执行 `sql` 语句。 @@ -2174,7 +2130,6 @@ _, err = conn.Exec("INSERT INTO example VALUES (1, 'test-1')") 此方法不支持接收 context 参数——默认情况下,它使用后台 context 执行。如果有此需求,用户可以使用 `ExecContext`——参见[使用 Context](#using-context)。 - ### 批量插入 {#batch-insert-1} 可以通过使用 `Being` 方法创建一个 `sql.Tx` 来实现批量语义。随后,使用携带 `INSERT` 语句的 `Prepare` 方法获取一个批处理对象。该方法返回一个 `sql.Stmt`,可以通过 `Exec` 方法向其中追加多行数据。批处理会在内存中累积,直到对原始的 `sql.Tx` 调用 `Commit` 为止。 @@ -2209,7 +2164,6 @@ return scope.Commit() [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/batch.go) - ### 查询行 {#querying-rows-1} 可以使用 `QueryRow` 方法来查询单行记录。它会返回一个 *sql.Row,你可以在其上调用 Scan,并传入变量的指针,用于接收并填充对应的列值。`QueryRowContext` 变体允许传入非 background 的 context —— 参见 [使用 Context](#using-context)。 @@ -2256,7 +2210,6 @@ for rows.Next() { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/query_rows.go) - ### 异步插入 {#async-insert-1} 可以通过调用 `ExecContext` 方法执行插入操作来实现异步插入。应按如下所示传入启用异步模式的 context。这样,用户可以指定客户端是应当等待服务器完成插入操作,还是在数据接收后立即返回响应。这实际上由参数 [wait_for_async_insert](/operations/settings/settings#wait_for_async_insert) 控制。 @@ -2288,7 +2241,6 @@ ctx := clickhouse.Context(context.Background(), clickhouse.WithStdAsync(false)) [完整示例代码](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/async.go) - ### 列式插入 {#columnar-insert-1} 不支持通过标准接口进行。 @@ -2352,7 +2304,6 @@ fmt.Printf("col1=%v, col2=%v, col3=%v, col4=%v, col5=%v", col1, col2, col3, col4 插入行为与 ClickHouse API 保持一致。 - ### 压缩 {#compression-1} 标准 API 支持与原生 [ClickHouse API](#compression) 相同的压缩算法,即在块级别支持 `lz4` 和 `zstd` 压缩。除此之外,对于 HTTP 连接,还支持 gzip、deflate 和 br 压缩。如果启用了上述任意一种,插入时的块以及查询响应都会进行压缩。其他请求(例如 ping 或查询请求)将保持未压缩状态。这与 `lz4` 和 `zstd` 选项的行为一致。 @@ -2390,7 +2341,6 @@ conn, err := sql.Open("clickhouse", fmt.Sprintf("http://%s:%d?username=%s&passwo * `br` - `0`(最佳速度)到 `11`(最佳压缩率) * `zstd`、`lz4` - 被忽略 - ### 参数绑定 {#parameter-binding-1} 标准 API 支持与 [ClickHouse API](#parameter-binding) 相同的参数绑定功能,允许将参数传递给 `Exec`、`Query` 和 `QueryRow` 方法(以及它们对应的 [Context](#using-context) 版本)。支持位置参数、命名参数和编号参数。 @@ -2421,7 +2371,6 @@ fmt.Printf("命名绑定计数: %d\n", count) 请注意,[特殊情况](#special-cases)仍然适用。 - ### 使用 context {#using-context-1} 标准 API 与 [ClickHouse API](#using-context) 一样,支持通过 context 传递截止时间、取消信号以及其他与请求范围关联的值。不同于 ClickHouse API,这里是通过使用带有 `Context` 后缀的方法变体来实现的。也就是说,像 `Exec` 这类默认使用后台 context 的方法,会提供一个变体 `ExecContext`,它将 context 作为第一个参数传入。这样就可以在应用流程的任意阶段传递 context。例如,用户可以在通过 `ConnContext` 建立连接时传入 context,或者在通过 `QueryRowContext` 请求查询行时传入 context。下文给出了所有可用方法的示例。 @@ -2509,7 +2458,6 @@ for rows.Next() { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/context.go) - ### 会话 {#sessions} 原生连接本身就包含一个会话,而通过 HTTP 的连接则要求用户创建一个会话 ID,用于在设置中传递上下文。这样可以使用诸如临时表等依赖会话的特性。 @@ -2571,7 +2519,6 @@ for rows.Next() { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/session.go) - ### 动态扫描 {#dynamic-scanning-1} 与 [ClickHouse API](#dynamic-scanning) 类似,这里也可以获取列的类型信息,便于用户在运行时创建类型正确的变量实例并将其传递给 Scan。这样即使事先不知道列的类型,也可以读取这些列。 @@ -2611,7 +2558,6 @@ for rows.Next() { [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/dynamic_scan_types.go) - ### 外部表 {#external-tables-1} [外部表](/engines/table-engines/special/external-data/) 允许客户端在执行 `SELECT` 查询时向 ClickHouse 发送数据。这些数据会被放入一个临时表中,并可在查询本身中用于计算。 @@ -2678,7 +2624,6 @@ fmt.Printf("external_table_1 UNION external_table_2: %d\n", count) [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/external_data.go) - ### OpenTelemetry {#open-telemetry-1} ClickHouse 允许在原生协议中传递 [trace context](/operations/opentelemetry/)。客户端可以通过函数 `clickhouse.withSpan` 创建一个 Span,并通过 Context 进行传递来实现这一点。当使用 HTTP 作为传输协议时,不支持该功能。 @@ -2699,7 +2644,6 @@ fmt.Printf("count: %d\n", count) [完整示例](https://github.com/ClickHouse/clickhouse-go/blob/main/examples/std/open_telemetry.go) - ## 性能建议 {#performance-tips} * 在可能的情况下使用 ClickHouse API,尤其是针对基本类型(primitive types)。这可以避免大量的反射和间接调用。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx index 2ed2638415a..3a94ef52938 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/client/_snippets/_v0_8.mdx @@ -128,50 +128,50 @@ Client client = new Client.Builder() | `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | * `timeout` - 以某个时间单位表示的超时时长。
- `unit` - `timeout` 的时间单位 | 设置 HTTP 连接的 Keep-Alive 超时时间。可通过将该超时时间设置为零 `0` 来禁用 Keep-Alive。

默认值:-
枚举:`ClientConfigProperties.HTTP_KEEP_ALIVE_TIMEOUT`
键:`http_keep_alive_timeout` | | `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | - `strategy` - 枚举常量 `com.clickhouse.client.api.ConnectionReuseStrategy` | 选择连接池应使用的策略:如果希望连接在返回到池中后立即被复用,则使用 `LIFO`;如果希望按照连接变为可用的顺序来使用(返回的连接不会被立即再次使用),则使用 `FIFO`。

默认值:`FIFO`
枚举:`ClientConfigProperties.CONNECTION_REUSE_STRATEGY`
键:`connection_reuse_strategy` | | `setSocketTimeout(long timeout, ChronoUnit unit`)` | *`timeout`- 以某个时间单位表示的超时时间。
-`unit`-`timeout`的时间单位。 | 设置用于读写操作的 socket 超时时间

默认值:`0`
枚举值:`ClientConfigProperties.SOCKET_OPERATION_TIMEOUT`
键:`socket_timeout` | - |`setSocketRcvbuf(long size)` | -`size` - 大小(字节) | 设置 TCP socket 接收缓冲区。该缓冲区不在 JVM 内存中分配。

默认值:`8196`
枚举:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
键:`socket_rcvbuf` | - |`setSocketSndbuf(long size)` | *`size` - 大小(以字节为单位) | 设置 TCP socket 接收缓冲区。该缓冲区不占用 JVM 内存。

默认值:`8196`
枚举:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
键:`socket_sndbuf` | - |`setSocketKeepAlive(boolean value)` | -`value`- 标志位,表示是否启用该选项。 | 为客户端创建的每个 TCP 套接字设置`SO_KEEPALIVE` 选项。TCP Keepalive 启用一种机制,用于检查连接的存活状态,并有助于检测被突然终止的连接。

默认值:-
枚举:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
键:`socket_keepalive` | - |`setSocketTcpNodelay(boolean value)` | *`value`- 指示是否启用该选项的标志。 | 为客户端创建的每个 TCP 套接字设置`SO_NODELAY`选项。该 TCP 选项会使套接字尽可能快地发送数据。

默认值: -
枚举:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
键:`socket_tcp_nodelay` | - |`setSocketLinger(int secondsToWait)` | -`secondsToWait` - 要等待的秒数。 | 为客户端创建的每个 TCP 套接字设置 Linger(延迟关闭)时间。

默认值:-
枚举:`ClientConfigProperties.SOCKET_LINGER_OPT`
键:`socket_linger` | - |`compressServerResponse(boolean enabled)` | *`enabled` - 用于指示是否应启用该选项的标志 | 设置服务器是否压缩其响应内容。

默认值:`true`
枚举:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
键:`compress` | - |`compressClientRequest(boolean enabled)` | -`enabled` - 标志,用于指示是否启用该选项 | 设置客户端是否应对其请求进行压缩。

默认值:`false`
枚举:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
键:`decompress` | - |`useHttpCompression(boolean enabled)` | *`enabled`- 表示是否启用该选项的标志 | 设置在启用相应选项时客户端/服务器通信是否使用 HTTP 压缩 | - |`appCompressedData(boolean enabled)` | -`enabled` - 指示是否启用该选项的标志位 | 通知客户端,压缩将由应用程序负责处理。

默认值:`false`
枚举值:`ClientConfigProperties.APP_COMPRESSED_DATA`
键:`app_compressed_data` | - |`setLZ4UncompressedBufferSize(int size)` | *`size` - 大小(字节) | 设置用于接收数据流未压缩部分的缓冲区大小。若缓冲区大小设置过小,将会创建一个新的缓冲区,并在日志中记录相应的警告。

默认值:`65536`
枚举常量:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
配置键:`compression.lz4.uncompressed_buffer_size` | - |`disableNativeCompression` | -`disable` - 用于指示是否应禁用该选项的标志 | 禁用原生压缩。如果设置为 true,则会禁用原生压缩。

默认值:`false`
枚举:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
键:`disable_native_compression` | - |`setDefaultDatabase(String database)` | *`database` - 数据库的名称 | 设置默认数据库。

默认值:`default`
枚举值:`ClientConfigProperties.DATABASE`
键:`database` | - |`addProxy(ProxyType type, String host, int port)` | -`type`- 代理类型。
-`host`- 代理主机名或 IP 地址。
-`port` - 代理端口。 | 设置用于与服务器通信时使用的代理。如果代理要求身份验证,则必须配置代理。

默认值:-
枚举:`ClientConfigProperties.PROXY_TYPE`
键:`proxy_type`

默认值:-
枚举:`ClientConfigProperties.PROXY_HOST`
键:`proxy_host`

默认值:-
枚举:`ClientConfigProperties.PROXY_PORT`
键:`proxy_port`| - |`setProxyCredentials(String user, String pass)` | *`user`- 代理用户名
-`pass` - 密码 | 设置用于代理认证的用户凭据。

默认值:-
枚举:`ClientConfigProperties.PROXY_USER`
键:`proxy_user`

默认值:-
枚举:`ClientConfigProperties.PROXY_PASSWORD`
键:`proxy_password` | - |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- 按某个时间单位表示的超时时长。
-`timeUnit`-`timeout` 的时间单位 | 用于设置查询的最⼤执行超时时间

默认值:`0`
枚举:`ClientConfigProperties.MAX_EXECUTION_TIME`
键:`max_execution_time` | - |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- 指示是否启用该选项的标志 | 设置是否在后续请求中保存并回传 HTTP cookie。 | - |`setSSLTrustStore(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置客户端是否应使用 SSL 信任库来验证服务器主机。

默认值:-
枚举:`ClientConfigProperties.SSL_TRUST_STORE`
键:`trust_store` | - |`setSSLTrustStorePassword(String password)` |`password`- 秘密值 | 设置用于解锁由`setSSLTrustStore(String path)`指定的 SSL 信任库的密码

默认: -
枚举:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
键:`key_store_password` | - |`setSSLTrustStoreType(String type)` |`type`- 信任库类型名称 | 设置由`setSSLTrustStore(String path)` 指定的信任库类型。

默认值:-
枚举值:`ClientConfigProperties.SSL_KEYSTORE_TYPE`
键:`key_store_type` | - |`setRootCertificate(String path)` |`path` - 本地(客户端)系统上的文件路径 | 设置客户端是否应使用指定的根(CA)证书对服务器主机进行验证。

默认值:-
枚举:`ClientConfigProperties.CA_CERTIFICATE`
键名:`sslrootcert` | - |`setClientCertificate(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置在发起 SSL 连接以及进行 SSL 认证时使用的客户端证书路径。

默认值:-
枚举值:`ClientConfigProperties.SSL_CERTIFICATE`
键名:`sslcert` | - |`setClientKey(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置用于与服务器进行 SSL 加密通信的客户端私钥。

默认值:-
枚举:`ClientConfigProperties.SSL_KEY`
键:`ssl_key` | - |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- 表示是否启用该选项的标志 | 指定在解码 DateTime 和 Date 列值时,客户端是否应使用服务器时区。启用后,应通过`setServerTimeZone(String timeZone)` 设置服务器时区。

默认值:`true`
枚举:`ClientConfigProperties.USE_SERVER_TIMEZONE`
键:`use_server_time_zone` | - |`useTimeZone(String timeZone)` |`timeZone`- Java 中有效时区 ID 的字符串(参见`java.time.ZoneId`) | 设置在解码 DateTime 和 Date 列值时,是否应使用指定的时区。会覆盖服务器的时区设置。

默认值:-
枚举:`ClientConfigProperties.USE_TIMEZONE`
键:`use_time_zone` | - |`setServerTimeZone(String timeZone)` |`timeZone`- Java 中有效时区 ID 的字符串值(参见`java.time.ZoneId`) | 设置服务器端时区。默认使用 UTC 时区。

默认值:`UTC`
枚举:`ClientConfigProperties.SERVER_TIMEZONE`
键:`server_time_zone` | - |`useAsyncRequests(boolean async)` |`async` - 指示是否应启用该选项的标志。 | 设置是否在单独线程中执行客户端请求。默认禁用,因为应用程序更清楚如何组织多线程任务,而且在单独线程中运行任务并不会提升性能。

默认值:`false`
枚举:`ClientConfigProperties.ASYNC_OPERATIONS`
键:`async` | - |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService` - 执行器服务的实例。 | 设置用于操作任务的 executor 服务。

默认值:`none`
枚举:`none`
键:`none` | - |`setClientNetworkBufferSize(int size)` | *`size`- 以字节为单位的大小 | 设置应用程序内存空间中缓冲区的大小,用于在套接字与应用程序之间来回复制数据。更大的缓冲区可以减少对 TCP 栈的系统调用次数,但会增加每个连接占用的内存。由于连接的生命周期通常较短,该缓冲区也会被垃圾回收(GC)。还需注意,分配大块连续内存可能会带来问题。

默认值:`300000`
枚举:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
键:`client_network_buffer_size` | - |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`-`com.clickhouse.client.api.ClientFaultCause` 枚举中的常量 | 设置可恢复/可重试的错误类型。

默认值:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
枚举值:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
键:`client_retry_on_failures` | - |`setMaxRetries(int maxRetries)` | *`maxRetries`- 重试次数 | 为针对`retryOnFailures(ClientFaultCause ...causes)` 定义的失败类型设置最大重试次数

默认值:`3`
枚举值:`ClientConfigProperties.RETRY_ON_FAILURE`
键名:`retry` | - |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`- 指示是否应启用该选项的标志位 | 大多數資料集都包含以小位元組序列編碼的數值資料。預設情況下,reader 會分配所需的緩衝區,將資料讀入其中,然後轉換為目標的 Number 類別。由於會分配並釋放大量小物件,這可能會造成顯著的 GC 壓力。啟用此選項後,reader 會使用預先分配的緩衝區來進行數值轉換。這是安全的,因為每個 reader 都有自己的一組緩衝區,且各個 reader 只會被單一執行緒使用。 | - |`httpHeader(String key, String value)` | *`key`- HTTP 头部键名。
-`value` - 该头部的字符串值。 | 为单个 HTTP 头部设置值。之前的值将被覆盖。

默认值:`none`
枚举:`none`
键:`none` | - |`httpHeader(String key, Collection values)` | -`key`- HTTP 头部键名。
-`values` - 字符串值列表。 | 为单个 HTTP 头部设置值。之前的值会被覆盖。

默认值:`none`
枚举:`none`
键:`none` | - |`httpHeaders(Map headers)` | *`header`- 包含 HTTP 头及其对应值的映射表。 | 同时设置多个 HTTP 头部的值。

默认值:`none`
枚举:`none`
键:`none` | - |`serverSetting(String name, String value)` | -`name`- 查询级别设置的名称。
-`value` - 此设置的字符串值。 | 设置随每个查询一起传递给服务器的设置。单个操作的设置可以覆盖它。[设置列表](/operations/settings/query-level)

默认值:`none`
枚举值:`none`
键:`none` | - |`serverSetting(String name, Collection values)` | *`name`- 查询级别设置的名称。
-`values`- 该设置的字符串值。 | 配置随每个查询一同传递给服务器的设置。单个操作的设置可以覆盖它。参见 [设置列表](/operations/settings/query-level)。此方法适用于为可接受多个值的设置赋值,例如 [roles](/interfaces/http#setting-role-with-query-parameters)

默认值:`none`
枚举:`none`
键:`none` | - |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`- 用于列与字段匹配的策略实现 | 设置在注册 DTO 时用于匹配 DTO 类字段和数据库列的自定义策略。

默认值:`none`
枚举:`none`
键:`none` | - |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth` - 用于指示是否应启用该选项的标志 | 设置是否使用基本 HTTP 身份验证进行用户密码认证。默认启用。使用此类型的身份验证可以解决密码中包含特殊字符而无法通过 HTTP 头部传输所导致的问题。

默认值:`true`
枚举:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
键:`http_use_basic_auth` | - |`setClientName(String clientName)` | -`clientName`- 表示应用程序名称的字符串 | 设置关于调用方应用程序的附加信息。该字符串将作为客户端名称传递给服务器。对于 HTTP 协议,将作为`User-Agent` 头部字段传递。

默认值:-
枚举:`ClientConfigProperties.CLIENT_NAME`
键:`client_name` | - |`useBearerTokenAuth(String bearerToken)` | *`bearerToken` - 编码后的 Bearer 令牌 | 指定是否使用 Bearer 认证以及要使用的 token。token 将按原样发送,因此在传递给此方法之前应先进行编码处理。

默认值:-
枚举:`ClientConfigProperties.BEARERTOKEN_AUTH`
键:`bearer_token` | - |`registerClientMetrics(Object registry, String name)` | -`registry`- Micrometer registry 实例
-`name`- 指标组名称 | 在 Micrometer([https://micrometer.io/](https://micrometer.io/))的注册表实例中注册传感器。 | - |`setServerVersion(String version)` | *`version` - 服务器版本的字符串形式 | 设置服务器版本以避免版本检测。

默认值:-
枚举值:`ClientConfigProperties.SERVER_VERSION`
键名:`server_version` | - |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping` - 类型提示映射 | 为 ClickHouse 类型设置类型提示映射。例如,可以让多维数组以 Java 容器类型呈现,而不是作为独立的 Array 对象。

默认值:-
枚举:`ClientConfigProperties.TYPE_HINT_MAPPING`
键:`type_hint_mapping` | - |`sslSocketSNI(String sni)` | *`sni` - 服务器名称的字符串形式 | 在 SSL/TLS 连接中设置用于 SNI(Server Name Indication,服务器名称指示)的服务器名称。

默认值:-
枚举值:`ClientConfigProperties.SSL_SOCKET_SNI`
键名:`ssl_socket_sni` | + |`setSocketRcvbuf(long size)` | -`size` - 大小(字节) | 设置 TCP socket 接收缓冲区。该缓冲区不在 JVM 内存中分配。

默认值:`8196`
枚举:`ClientConfigProperties.SOCKET_RCVBUF_OPT`
键:`socket_rcvbuf` | + |`setSocketSndbuf(long size)` | *`size` - 大小(以字节为单位) | 设置 TCP socket 接收缓冲区。该缓冲区不占用 JVM 内存。

默认值:`8196`
枚举:`ClientConfigProperties.SOCKET_SNDBUF_OPT`
键:`socket_sndbuf` | + |`setSocketKeepAlive(boolean value)` | -`value`- 标志位,表示是否启用该选项。 | 为客户端创建的每个 TCP 套接字设置`SO_KEEPALIVE` 选项。TCP Keepalive 启用一种机制,用于检查连接的存活状态,并有助于检测被突然终止的连接。

默认值:-
枚举:`ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
键:`socket_keepalive` | + |`setSocketTcpNodelay(boolean value)` | *`value`- 指示是否启用该选项的标志。 | 为客户端创建的每个 TCP 套接字设置`SO_NODELAY`选项。该 TCP 选项会使套接字尽可能快地发送数据。

默认值: -
枚举:`ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
键:`socket_tcp_nodelay` | + |`setSocketLinger(int secondsToWait)` | -`secondsToWait` - 要等待的秒数。 | 为客户端创建的每个 TCP 套接字设置 Linger(延迟关闭)时间。

默认值:-
枚举:`ClientConfigProperties.SOCKET_LINGER_OPT`
键:`socket_linger` | + |`compressServerResponse(boolean enabled)` | *`enabled` - 用于指示是否应启用该选项的标志 | 设置服务器是否压缩其响应内容。

默认值:`true`
枚举:`ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
键:`compress` | + |`compressClientRequest(boolean enabled)` | -`enabled` - 标志,用于指示是否启用该选项 | 设置客户端是否应对其请求进行压缩。

默认值:`false`
枚举:`ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
键:`decompress` | + |`useHttpCompression(boolean enabled)` | *`enabled`- 表示是否启用该选项的标志 | 设置在启用相应选项时客户端/服务器通信是否使用 HTTP 压缩 | + |`appCompressedData(boolean enabled)` | -`enabled` - 指示是否启用该选项的标志位 | 通知客户端,压缩将由应用程序负责处理。

默认值:`false`
枚举值:`ClientConfigProperties.APP_COMPRESSED_DATA`
键:`app_compressed_data` | + |`setLZ4UncompressedBufferSize(int size)` | *`size` - 大小(字节) | 设置用于接收数据流未压缩部分的缓冲区大小。若缓冲区大小设置过小,将会创建一个新的缓冲区,并在日志中记录相应的警告。

默认值:`65536`
枚举常量:`ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
配置键:`compression.lz4.uncompressed_buffer_size` | + |`disableNativeCompression` | -`disable` - 用于指示是否应禁用该选项的标志 | 禁用原生压缩。如果设置为 true,则会禁用原生压缩。

默认值:`false`
枚举:`ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
键:`disable_native_compression` | + |`setDefaultDatabase(String database)` | *`database` - 数据库的名称 | 设置默认数据库。

默认值:`default`
枚举值:`ClientConfigProperties.DATABASE`
键:`database` | + |`addProxy(ProxyType type, String host, int port)` | -`type`- 代理类型。
-`host`- 代理主机名或 IP 地址。
-`port` - 代理端口。 | 设置用于与服务器通信时使用的代理。如果代理要求身份验证,则必须配置代理。

默认值:-
枚举:`ClientConfigProperties.PROXY_TYPE`
键:`proxy_type`

默认值:-
枚举:`ClientConfigProperties.PROXY_HOST`
键:`proxy_host`

默认值:-
枚举:`ClientConfigProperties.PROXY_PORT`
键:`proxy_port`| + |`setProxyCredentials(String user, String pass)` | *`user`- 代理用户名
-`pass` - 密码 | 设置用于代理认证的用户凭据。

默认值:-
枚举:`ClientConfigProperties.PROXY_USER`
键:`proxy_user`

默认值:-
枚举:`ClientConfigProperties.PROXY_PASSWORD`
键:`proxy_password` | + |`setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | -`timeout`- 按某个时间单位表示的超时时长。
-`timeUnit`-`timeout` 的时间单位 | 用于设置查询的最⼤执行超时时间

默认值:`0`
枚举:`ClientConfigProperties.MAX_EXECUTION_TIME`
键:`max_execution_time` | + |`setHttpCookiesEnabled(boolean enabled)` |`enabled`- 指示是否启用该选项的标志 | 设置是否在后续请求中保存并回传 HTTP cookie。 | + |`setSSLTrustStore(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置客户端是否应使用 SSL 信任库来验证服务器主机。

默认值:-
枚举:`ClientConfigProperties.SSL_TRUST_STORE`
键:`trust_store` | + |`setSSLTrustStorePassword(String password)` |`password`- 秘密值 | 设置用于解锁由`setSSLTrustStore(String path)`指定的 SSL 信任库的密码

默认: -
枚举:`ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
键:`key_store_password` | + |`setSSLTrustStoreType(String type)` |`type`- 信任库类型名称 | 设置由`setSSLTrustStore(String path)` 指定的信任库类型。

默认值:-
枚举值:`ClientConfigProperties.SSL_KEYSTORE_TYPE`
键:`key_store_type` | + |`setRootCertificate(String path)` |`path` - 本地(客户端)系统上的文件路径 | 设置客户端是否应使用指定的根(CA)证书对服务器主机进行验证。

默认值:-
枚举:`ClientConfigProperties.CA_CERTIFICATE`
键名:`sslrootcert` | + |`setClientCertificate(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置在发起 SSL 连接以及进行 SSL 认证时使用的客户端证书路径。

默认值:-
枚举值:`ClientConfigProperties.SSL_CERTIFICATE`
键名:`sslcert` | + |`setClientKey(String path)` |`path` - 本地(客户端)系统中的文件路径 | 设置用于与服务器进行 SSL 加密通信的客户端私钥。

默认值:-
枚举:`ClientConfigProperties.SSL_KEY`
键:`ssl_key` | + |`useServerTimeZone(boolean useServerTimeZone)` |`useServerTimeZone`- 表示是否启用该选项的标志 | 指定在解码 DateTime 和 Date 列值时,客户端是否应使用服务器时区。启用后,应通过`setServerTimeZone(String timeZone)` 设置服务器时区。

默认值:`true`
枚举:`ClientConfigProperties.USE_SERVER_TIMEZONE`
键:`use_server_time_zone` | + |`useTimeZone(String timeZone)` |`timeZone`- Java 中有效时区 ID 的字符串(参见`java.time.ZoneId`) | 设置在解码 DateTime 和 Date 列值时,是否应使用指定的时区。会覆盖服务器的时区设置。

默认值:-
枚举:`ClientConfigProperties.USE_TIMEZONE`
键:`use_time_zone` | + |`setServerTimeZone(String timeZone)` |`timeZone`- Java 中有效时区 ID 的字符串值(参见`java.time.ZoneId`) | 设置服务器端时区。默认使用 UTC 时区。

默认值:`UTC`
枚举:`ClientConfigProperties.SERVER_TIMEZONE`
键:`server_time_zone` | + |`useAsyncRequests(boolean async)` |`async` - 指示是否应启用该选项的标志。 | 设置是否在单独线程中执行客户端请求。默认禁用,因为应用程序更清楚如何组织多线程任务,而且在单独线程中运行任务并不会提升性能。

默认值:`false`
枚举:`ClientConfigProperties.ASYNC_OPERATIONS`
键:`async` | + |`setSharedOperationExecutor(ExecutorService executorService)` |`executorService` - 执行器服务的实例。 | 设置用于操作任务的 executor 服务。

默认值:`none`
枚举:`none`
键:`none` | + |`setClientNetworkBufferSize(int size)` | *`size`- 以字节为单位的大小 | 设置应用程序内存空间中缓冲区的大小,用于在套接字与应用程序之间来回复制数据。更大的缓冲区可以减少对 TCP 栈的系统调用次数,但会增加每个连接占用的内存。由于连接的生命周期通常较短,该缓冲区也会被垃圾回收(GC)。还需注意,分配大块连续内存可能会带来问题。

默认值:`300000`
枚举:`ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
键:`client_network_buffer_size` | + |`retryOnFailures(ClientFaultCause ...causes)` | -`causes`-`com.clickhouse.client.api.ClientFaultCause` 枚举中的常量 | 设置可恢复/可重试的错误类型。

默认值:`NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
枚举值:`ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
键:`client_retry_on_failures` | + |`setMaxRetries(int maxRetries)` | *`maxRetries`- 重试次数 | 为针对`retryOnFailures(ClientFaultCause ...causes)` 定义的失败类型设置最大重试次数

默认值:`3`
枚举值:`ClientConfigProperties.RETRY_ON_FAILURE`
键名:`retry` | + |`allowBinaryReaderToReuseBuffers(boolean reuse)` | -`reuse`- 指示是否应启用该选项的标志位 | 大多數資料集都包含以小位元組序列編碼的數值資料。預設情況下,reader 會分配所需的緩衝區,將資料讀入其中,然後轉換為目標的 Number 類別。由於會分配並釋放大量小物件,這可能會造成顯著的 GC 壓力。啟用此選項後,reader 會使用預先分配的緩衝區來進行數值轉換。這是安全的,因為每個 reader 都有自己的一組緩衝區,且各個 reader 只會被單一執行緒使用。 | + |`httpHeader(String key, String value)` | *`key`- HTTP 头部键名。
-`value` - 该头部的字符串值。 | 为单个 HTTP 头部设置值。之前的值将被覆盖。

默认值:`none`
枚举:`none`
键:`none` | + |`httpHeader(String key, Collection values)` | -`key`- HTTP 头部键名。
-`values` - 字符串值列表。 | 为单个 HTTP 头部设置值。之前的值会被覆盖。

默认值:`none`
枚举:`none`
键:`none` | + |`httpHeaders(Map headers)` | *`header`- 包含 HTTP 头及其对应值的映射表。 | 同时设置多个 HTTP 头部的值。

默认值:`none`
枚举:`none`
键:`none` | + |`serverSetting(String name, String value)` | -`name`- 查询级别设置的名称。
-`value` - 此设置的字符串值。 | 设置随每个查询一起传递给服务器的设置。单个操作的设置可以覆盖它。[设置列表](/operations/settings/query-level)

默认值:`none`
枚举值:`none`
键:`none` | + |`serverSetting(String name, Collection values)` | *`name`- 查询级别设置的名称。
-`values`- 该设置的字符串值。 | 配置随每个查询一同传递给服务器的设置。单个操作的设置可以覆盖它。参见 [设置列表](/operations/settings/query-level)。此方法适用于为可接受多个值的设置赋值,例如 [roles](/interfaces/http#setting-role-with-query-parameters)

默认值:`none`
枚举:`none`
键:`none` | + |`columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)`| -`strategy`- 用于列与字段匹配的策略实现 | 设置在注册 DTO 时用于匹配 DTO 类字段和数据库列的自定义策略。

默认值:`none`
枚举:`none`
键:`none` | + |`useHTTPBasicAuth(boolean useBasicAuth)` | *`useBasicAuth` - 用于指示是否应启用该选项的标志 | 设置是否使用基本 HTTP 身份验证进行用户密码认证。默认启用。使用此类型的身份验证可以解决密码中包含特殊字符而无法通过 HTTP 头部传输所导致的问题。

默认值:`true`
枚举:`ClientConfigProperties.HTTP_USE_BASIC_AUTH`
键:`http_use_basic_auth` | + |`setClientName(String clientName)` | -`clientName`- 表示应用程序名称的字符串 | 设置关于调用方应用程序的附加信息。该字符串将作为客户端名称传递给服务器。对于 HTTP 协议,将作为`User-Agent` 头部字段传递。

默认值:-
枚举:`ClientConfigProperties.CLIENT_NAME`
键:`client_name` | + |`useBearerTokenAuth(String bearerToken)` | *`bearerToken` - 编码后的 Bearer 令牌 | 指定是否使用 Bearer 认证以及要使用的 token。token 将按原样发送,因此在传递给此方法之前应先进行编码处理。

默认值:-
枚举:`ClientConfigProperties.BEARERTOKEN_AUTH`
键:`bearer_token` | + |`registerClientMetrics(Object registry, String name)` | -`registry`- Micrometer registry 实例
-`name`- 指标组名称 | 在 Micrometer([https://micrometer.io/](https://micrometer.io/))的注册表实例中注册传感器。 | + |`setServerVersion(String version)` | *`version` - 服务器版本的字符串形式 | 设置服务器版本以避免版本检测。

默认值:-
枚举值:`ClientConfigProperties.SERVER_VERSION`
键名:`server_version` | + |`typeHintMapping(Map typeHintMapping)` | -`typeHintMapping` - 类型提示映射 | 为 ClickHouse 类型设置类型提示映射。例如,可以让多维数组以 Java 容器类型呈现,而不是作为独立的 Array 对象。

默认值:-
枚举:`ClientConfigProperties.TYPE_HINT_MAPPING`
键:`type_hint_mapping` | + |`sslSocketSNI(String sni)` | *`sni` - 服务器名称的字符串形式 | 在 SSL/TLS 连接中设置用于 SNI(Server Name Indication,服务器名称指示)的服务器名称。

默认值:-
枚举值:`ClientConfigProperties.SSL_SOCKET_SNI`
键名:`ssl_socket_sni` | ### 服务器设置 \{#server-settings\} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md index 1efe2efb310..f8c89ab776e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/index.md @@ -10,7 +10,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # Java 客户端概览 {#java-clients-overview} - [Client 0.8+](./client/client.mdx) @@ -152,7 +151,6 @@ JDBC 驱动继承其底层客户端实现所具备的相同功能。其他 JDBC ``` - #### 配置日志记录 {#configuring-logging} 具体配置方式取决于你所使用的日志框架。例如,如果你使用的是 `Logback`,可以在名为 `logback.xml` 的文件中进行配置: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md index d3289620b9b..d58ed95fd23 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/java/r2dbc.md @@ -12,7 +12,6 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import CodeBlock from '@theme/CodeBlock'; - # R2DBC 驱动 {#r2dbc-driver} ## R2DBC 驱动程序 {#r2dbc-driver} @@ -42,7 +41,6 @@ import CodeBlock from '@theme/CodeBlock'; ``` - ### 连接 ClickHouse {#connect-to-clickhouse} ```java showLineNumbers @@ -53,7 +51,6 @@ ConnectionFactory connectionFactory = ConnectionFactories .flatMapMany(connection -> connection ``` - ### 查询 {#query} ```java showLineNumbers @@ -71,7 +68,6 @@ connection .subscribe(); ``` - ### 插入 {#insert} ```java showLineNumbers diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md index 17b1a43b4d3..4646a7d8168 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/js.md @@ -15,7 +15,6 @@ integration: import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # ClickHouse JS {#clickhouse-js} 用于连接 ClickHouse 的官方 JS 客户端。 @@ -66,7 +65,6 @@ Web 版安装: npm i @clickhouse/client-web ``` - ## 与 ClickHouse 的兼容性 {#compatibility-with-clickhouse} | Client version | ClickHouse | @@ -111,7 +109,6 @@ const client = createClient({ 客户端实例可以在创建时进行[预配置](./js.md#configuration)。 - #### 配置 {#configuration} 在创建客户端实例时,可以调整以下连接设置: @@ -190,7 +187,6 @@ createClient({ }) ``` - ### 连接 {#connecting} #### 收集连接信息 {#gather-your-connection-details} @@ -217,7 +213,6 @@ const client = createClient({ 客户端代码仓库包含多个使用环境变量的示例,例如[在 ClickHouse Cloud 中创建表](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/create_table_cloud.ts)、[使用异步插入](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/async_insert.ts)等。 - #### 连接池(仅限 Node.js) {#connection-pool-nodejs-only} 为避免为每个请求重新建立连接所带来的开销,客户端会创建一个到 ClickHouse 的连接池以复用连接,并利用 Keep-Alive 机制。默认情况下 Keep-Alive 是启用的,连接池大小为 `10`,但你可以通过 `max_open_connections` [配置项](./js.md#configuration) 来修改它。 @@ -257,7 +252,6 @@ interface BaseQueryParams { } ``` - ### 查询方法 {#query-method} 此方法用于大多数会返回响应的语句,例如 `SELECT`,或用于发送诸如 `CREATE TABLE` 的 DDL 语句,并且应当使用 `await` 等待其完成。返回的结果集通常由应用程序进行消费和处理。 @@ -285,7 +279,6 @@ interface ClickHouseClient { 不要在 `query` 中指定 FORMAT 子句,请改用 `format` 参数。 ::: - #### 结果集与行抽象 {#result-set-and-row-abstractions} `ResultSet` 为你的应用程序提供了若干便于进行数据处理的辅助方法。 @@ -370,7 +363,6 @@ await new Promise((resolve, reject) => { **示例:**(仅限 Node.js)通过经典的 `on('data')` 方式,以 `CSV` 格式流式读取查询结果。此方式可与 `for await const` 语法互换使用。 [源代码](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/select_streaming_text_line_by_line.ts) - ```ts const resultSet = await client.query({ query: 'SELECT number FROM system.numbers_mt LIMIT 5', @@ -429,7 +421,6 @@ while (true) { } ``` - ### Insert 方法 {#insert-method} 这是插入数据的主要方法。 @@ -451,7 +442,6 @@ interface ClickHouseClient { 如果 insert 语句已发送到服务器,则 `executed` 标志将为 `true`。 - #### Node.js 中的 insert 方法与流式处理 {#insert-method-and-streaming-in-nodejs} 它既可以与 `Stream.Readable` 一起使用,也可以与普通的 `Array` 一起使用,具体取决于传递给 `insert` 方法的[数据格式](./js.md#supported-data-formats)。另请参阅本节中关于[文件流式处理](./js.md#streaming-files-nodejs-only)的内容。 @@ -554,7 +544,6 @@ await client.insert({ 有关更多详细信息,请参阅[源代码](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_exclude_columns.ts)。 - **示例**:向一个不同于客户端实例所配置数据库的其他数据库中插入数据。[源代码](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_into_different_db.ts)。 ```ts @@ -565,7 +554,6 @@ await client.insert({ }) ``` - #### Web 版本的限制 {#web-version-limitations} 目前,`@clickhouse/client-web` 中的插入操作仅支持 `Array` 和 `JSON*` 格式。 @@ -593,7 +581,6 @@ interface InsertParams extends BaseQueryParams { 此内容将来可能会有所变动。另请参阅:[所有客户端方法的基础参数](./js.md#base-parameters-for-all-client-methods)。 - ### 命令方法 {#command-method} 可用于没有任何输出的语句、`FORMAT` 子句不适用的情况,或当你对响应结果完全不感兴趣时。此类语句的示例包括 `CREATE TABLE` 或 `ALTER TABLE`。 @@ -664,7 +651,6 @@ await client.command({ 使用 `abort_signal` 取消请求并不能保证服务器未执行该语句。 ::: - ### Exec 方法 {#exec-method} 如果有某个自定义查询不适用于 `query`/`insert`,并且你关心其返回结果,可以使用 `exec` 作为 `command` 的替代方案。 @@ -704,7 +690,6 @@ export interface QueryResult { } ``` - ### Ping {#ping} 用于检查连接状态的 `ping` 方法会在服务器可达时返回 `true`。 @@ -762,7 +747,6 @@ const result = await client.ping({ select: true, /* query_id、abort_signal、ht `ping` 方法可以使用大多数标准的 `query` 方法参数——参见 `PingParamsWithSelectQuery` 类型定义。 - ### 关闭(仅限 Node.js) {#close-nodejs-only} 关闭所有已打开的连接并释放资源。在 Web 版本中不执行任何操作。 @@ -771,7 +755,6 @@ const result = await client.ping({ select: true, /* query_id、abort_signal、ht await client.close() ``` - ## 流式处理文件(仅限 Node.js) {#streaming-files-nodejs-only} 在客户端仓库中,提供了多种使用常见数据格式(NDJSON、CSV、Parquet)的文件流式处理示例。 @@ -900,7 +883,6 @@ await client.insert({ 但是,如果你使用的是 `DateTime` 或 `DateTime64` 列,则可以同时使用字符串和 JS Date 对象。在将 `date_time_input_format` 设置为 `best_effort` 时,可以将 JS Date 对象原样传递给 `insert`。有关更多详情,请参阅此[示例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_js_dates.ts)。 - ### Decimal* 类型注意事项 {#decimal-types-caveats} 可以使用 `JSON*` 系列格式插入 Decimal 类型的数据。假设我们有如下定义的表: @@ -951,7 +933,6 @@ await client.query({ 更多详细信息请参见[此示例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/insert_decimals.ts)。 - ### 整数类型:Int64、Int128、Int256、UInt64、UInt128、UInt256 {#integral-types-int64-int128-int256-uint64-uint128-uint256} 虽然服务器可以将其作为数字接收,但在 `JSON*` 系列输出格式中会以字符串形式返回,以避免整数溢出,因为这些类型的最大值大于 `Number.MAX_SAFE_INTEGER`。 @@ -979,7 +960,6 @@ const resultSet = await client.query({ expect(await resultSet.json()).toEqual([ { number: 0 } ]) ``` - ## ClickHouse 设置 {#clickhouse-settings} 客户端可以通过 [settings](/operations/settings/settings/) 机制调整 ClickHouse 的行为。 @@ -1006,7 +986,6 @@ client.query({ 请确保代表其发起查询的用户具备足够的权限来修改这些设置。 ::: - ## 高级主题 {#advanced-topics} ### 带参数的查询 {#queries-with-parameters} @@ -1041,7 +1020,6 @@ await client.query({ 有关更多详情,请参阅 [https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax](https://clickhouse.com/docs/interfaces/cli#cli-queries-with-parameters-syntax)。 - ### 压缩 {#compression} 注意:目前 Web 版本尚不支持请求压缩。响应压缩可正常使用。Node.js 版本同时支持请求和响应压缩。 @@ -1062,7 +1040,6 @@ createClient({ * `response: true` 表示 ClickHouse 服务器将返回压缩后的响应体。默认值:`response: false` * `request: true` 表示对客户端请求体启用压缩。默认值:`request: false` - ### 日志(仅限 Node.js) {#logging-nodejs-only} :::important @@ -1120,7 +1097,6 @@ const client = createClient({ 可以在[此处](https://github.com/ClickHouse/clickhouse-js/blob/main/packages/client-common/src/logger.ts)找到默认的 Logger 实现。 - ### TLS 证书(仅限 Node.js) {#tls-certificates-nodejs-only} Node.js 客户端可选支持基本(仅证书颁发机构) @@ -1156,7 +1132,6 @@ const client = createClient({ 请在代码仓库中查看 [基本](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/basic_tls.ts) 和 [双向](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/node/mutual_tls.ts) TLS 的完整示例。 - ### Keep-alive 配置(仅适用于 Node.js) {#keep-alive-configuration-nodejs-only} 客户端默认在底层 HTTP 代理中启用了 Keep-Alive,这意味着已建立的套接字会被复用于后续请求,并且会发送 `Connection: keep-alive` 头。空闲套接字默认会在连接池中保留 2500 毫秒(参见[有关调整此选项的说明](./js.md#adjusting-idle_socket_ttl))。 @@ -1188,7 +1163,6 @@ curl -v --data-binary "SELECT 1" 在这种情况下,`keep_alive_timeout` 为 10 秒,你可以尝试将 `keep_alive.idle_socket_ttl` 增加到 9000 甚至 9500 毫秒,以便让空闲 socket 比默认情况下多保持打开一会儿。密切关注可能出现的 "Socket hang-up" 错误,这将表明服务器在客户端之前关闭了连接;如有必要,逐步降低该值,直到错误不再出现为止。 - #### 故障排查 {#troubleshooting} 如果即使使用了最新版本的客户端仍然遇到 `socket hang up` 错误,可以通过以下方式来解决这个问题: @@ -1249,7 +1223,6 @@ const client = createClient({ 请参阅此[示例](https://github.com/ClickHouse/clickhouse-js/blob/main/examples/read_only_user.ts),其中更详细地展示了 `readonly=1` 用户的各项限制。 - ### 带路径名的代理 {#proxy-with-a-pathname} 如果你的 ClickHouse 实例部署在代理之后,并且其 URL 中包含路径名,例如 [http://proxy:8123/clickhouse_server](http://proxy:8123/clickhouse_server),请将 `clickhouse_server` 设置为 `pathname` 配置选项(可以带或不带前导斜杠);否则,如果在 `url` 中直接包含该路径,它将被视为 `database` 选项。支持多级路径,例如 `/my_proxy/db`。 @@ -1261,7 +1234,6 @@ const client = createClient({ }) ``` - ### 带身份验证的反向代理 {#reverse-proxy-with-authentication} 如果在 ClickHouse 部署前面有一个带身份验证的反向代理,可以使用 `http_headers` 设置来提供所需的请求头: @@ -1274,7 +1246,6 @@ const client = createClient({ }) ``` - ### 自定义 HTTP/HTTPS agent(实验性功能,仅适用于 Node.js) {#custom-httphttps-agent-experimental-nodejs-only} :::warning @@ -1356,7 +1327,6 @@ const client = createClient({ 在同时使用证书 *和* 自定义 *HTTPS* Agent 时,很可能需要通过 `set_basic_auth_header` 设置(在 1.2.0 中引入)来禁用默认的授权头,因为它会与 TLS 头产生冲突。所有 TLS 头都应由用户手动提供。 - ## 已知限制(Node.js/web) {#known-limitations-nodejsweb} - 结果集没有数据映射器,因此只使用语言的基础类型。计划在未来提供某些数据类型的映射器,并支持 [RowBinary 格式](https://github.com/ClickHouse/clickhouse-js/issues/216)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md index 9253e6356b7..1d64c47693b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/moose-olap.md @@ -10,7 +10,6 @@ doc_type: 'guide' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 使用 Moose OLAP 在 ClickHouse 上进行开发 {#developing-on-clickhouse-with-moose-olap} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md index 28dc34f9850..a2bceade0e2 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/additional-options.md @@ -45,7 +45,6 @@ common.get_setting('invalid_setting_action') | http_buffer_size | 10MB | | 用于 HTTP 流式查询的“内存中”缓冲区大小(以字节为单位)。 | | preserve_pandas_datetime_resolution | False | True, False | 当为 True 且使用 pandas 2.x 时,会保留 datetime64/timedelta64 数据类型的分辨率(例如 's'、'ms'、'us'、'ns')。如果为 False(或在 pandas <2.x 上),则为兼容性起见会强制转换为纳秒('ns')分辨率。 | - ## 压缩 {#compression} ClickHouse Connect 支持对查询结果和插入数据使用 lz4、zstd、brotli 和 gzip 压缩。请始终牢记,启用压缩通常意味着在网络带宽/传输速度与 CPU 使用率(客户端和服务器端)之间进行权衡。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md index 31c64181796..beebd15c30d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-inserting.md @@ -31,7 +31,6 @@ assert qr[0][0] == 4 `InsertContext` 包含在插入过程中会被更新的可变状态,因此并不是线程安全的。 - ### 写入格式 {#write-formats} 当前仅对少量类型实现了写入格式支持。在大多数情况下,ClickHouse Connect 会尝试通过检查首个(非空)数据值的类型,自动推断列的正确写入格式。举例来说,如果要向 `DateTime` 列插入数据,并且该列的第一个插入值是一个 Python 整数,ClickHouse Connect 会在假定该值实际表示 Unix epoch 秒数的前提下,直接插入该整数值。 @@ -97,7 +96,6 @@ df = pd.DataFrame({ client.insert_df("users", df) ``` - #### PyArrow 表插入 {#pyarrow-table-insert} ```python @@ -115,7 +113,6 @@ arrow_table = pa.table({ client.insert_arrow("users", arrow_table) ``` - #### 基于 Arrow 的 DataFrame 插入(pandas 2.x) {#arrow-backed-dataframe-insert-pandas-2} ```python @@ -134,7 +131,6 @@ df = pd.DataFrame({ client.insert_df_arrow("users", df) ``` - ### 时区 {#time-zones} 当将 Python 的 `datetime.datetime` 对象插入到 ClickHouse 的 `DateTime` 或 `DateTime64` 列中时,ClickHouse Connect 会自动处理时区信息。由于 ClickHouse 在内部将所有 DateTime 值存储为不带时区信息的 Unix 时间戳(自 Unix 纪元起的秒数或其小数部分),因此在插入时,时区转换会在客户端自动完成。 @@ -176,7 +172,6 @@ print(*results.result_rows, sep="\n") 使用 pytz 时,必须通过 `localize()` 方法为一个“朴素”(naive)的 datetime 对象附加时区信息。直接向 datetime 构造函数传入 `tzinfo=` 会导致使用错误的历史偏移量。对于 UTC,`tzinfo=pytz.UTC` 可以正常工作。更多信息请参阅 [pytz 文档](https://pythonhosted.org/pytz/#localized-times-and-date-arithmetic)。 ::: - #### 不含时区信息的 datetime 对象 {#timezone-naive-datetime-objects} 如果你插入一个不含时区信息的 Python `datetime.datetime` 对象(即没有 `tzinfo`),`.timestamp()` 方法会将其按系统本地时区进行解释。为避免歧义,建议: @@ -202,7 +197,6 @@ epoch_timestamp = int(naive_time.replace(tzinfo=pytz.UTC).timestamp()) client.insert('events', [[epoch_timestamp]], column_names=['event_time']) ``` - #### 带有时区元数据的 DateTime 列 {#datetime-columns-with-timezone-metadata} ClickHouse 列可以定义时区元数据(例如 `DateTime('America/Denver')` 或 `DateTime64(3, 'Asia/Tokyo')`)。这些元数据本身不会影响数据的存储方式(仍然以 UTC 时间戳存储),但会控制从 ClickHouse 查询数据时所使用的时区。 @@ -232,7 +226,6 @@ print(*results.result_rows, sep="\n") # (datetime.datetime(2023, 6, 15, 7, 30, tzinfo=),) {#datetimedatetime2023-6-15-7-30-tzinfodsttzinfo-americalos_angeles-pdt-1-day-170000-dst} ``` - ## 文件插入 {#file-inserts} `clickhouse_connect.driver.tools` 包中包含 `insert_file` 方法,可将数据直接从文件系统插入到现有的 ClickHouse 表中。数据解析由 ClickHouse 服务器负责。`insert_file` 接受以下参数: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md index f920721d920..bbe82584376 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-querying.md @@ -31,7 +31,6 @@ assert result.result_set[1][0] == 'first_value2' 请注意,`QueryContext` 不是线程安全的,但在多线程环境中可以通过调用 `QueryContext.updated_copy` 方法来获取其副本。 - ## 流式查询 {#streaming-queries} ClickHouse Connect 客户端提供多种以流(实现为 Python 生成器)形式检索数据的方法: @@ -76,7 +75,6 @@ with client.query_row_block_stream('SELECT pickup, dropoff, pickup_longitude, pi 你可以使用 `StreamContext` 的 `source` 属性来访问其父级 `QueryResult` 对象,其中包含列名和类型。 - ### 流类型 {#stream-types} `query_column_block_stream` 方法会将块(block)作为一系列按列存储的数据返回,并使用原生的 Python 数据类型。结合上面的 `taxi_trips` 查询示例,返回的数据将是一个列表,其中每个元素又是另一个列表(或元组),包含对应列的所有数据。因此,`block[0]` 将是一个只包含字符串的元组。列式格式最常用于对某一列的全部值执行聚合操作,例如对总车费求和。 @@ -101,7 +99,6 @@ with df_stream: 最后,`query_arrow_stream` 方法会返回一个 ClickHouse `ArrowStream` 格式的结果,其类型为包装在 `StreamContext` 中的 `pyarrow.ipc.RecordBatchStreamReader`。流的每次迭代都会返回一个 PyArrow RecordBlock。 - ### 流式传输示例 {#streaming-examples} #### 流式传输行 {#stream-rows} @@ -122,7 +119,6 @@ with client.query_rows_stream("SELECT number, number * 2 as doubled FROM system. # .... ``` - #### 流式行数据块 {#stream-row-blocks} ```python @@ -139,7 +135,6 @@ with client.query_row_block_stream("SELECT number, number * 2 FROM system.number # 收到包含 34591 行的数据块 ``` - #### 以流式方式传输 Pandas DataFrame {#stream-pandas-dataframes} ```python @@ -166,7 +161,6 @@ with client.query_df_stream("SELECT number, toString(number) AS str FROM system. # 2 65411 65411 ``` - #### 流式传输 Arrow 批处理 {#stream-arrow-batches} ```python @@ -184,7 +178,6 @@ with client.query_arrow_stream("SELECT * FROM large_table") as stream: # 已接收包含 34591 行的 Arrow 批次 ``` - ## NumPy、Pandas 和 Arrow 查询 {#numpy-pandas-and-arrow-queries} ClickHouse Connect 提供了用于处理 NumPy、Pandas 和 Arrow 数据结构的专用查询方法。通过这些方法,可以直接以这些常用数据格式获取查询结果,而无需手动进行格式转换。 @@ -214,7 +207,6 @@ print(np_array) # [4 8]] {#4-8} ``` - ### Pandas 查询 {#pandas-queries} `query_df` 方法会将查询结果作为 Pandas DataFrame 返回,而不是 ClickHouse Connect 的 `QueryResult`。 @@ -239,7 +231,6 @@ print(df) # 4 4 8 {#4-4-8} ``` - ### PyArrow 查询 {#pyarrow-queries} `query_arrow` 方法会以 PyArrow Table 的形式返回查询结果。它直接使用 ClickHouse 的 `Arrow` 格式,因此只接受与主 `query` 方法相同的三个参数:`query`、`parameters` 和 `settings`。另外还有一个附加参数 `use_strings`,用于决定 Arrow Table 在渲染 ClickHouse 的 String 类型时,是作为字符串(当为 `True`)还是作为字节(当为 `False`)。 @@ -266,7 +257,6 @@ print(arrow_table) # str: [["0","1","2"]] {#str-012} ``` - ### 基于 Arrow 的 DataFrame {#arrow-backed-dataframes} ClickHouse Connect 通过 `query_df_arrow` 和 `query_df_arrow_stream` 方法,支持从 Arrow 查询结果快速且高效地创建 DataFrame,并节省内存。这些方法是对 Arrow 查询方法的轻量封装,并在可能的情况下执行零拷贝转换为 DataFrame: @@ -316,7 +306,6 @@ with client.query_df_arrow_stream( # 接收到 批次,包含 34591 行,数据类型: [UInt64, String] ``` - #### 注意事项和说明 {#notes-and-caveats} - Arrow 类型映射:当以 Arrow 格式返回数据时,ClickHouse 会将类型映射到最接近且受支持的 Arrow 类型。某些 ClickHouse 类型没有原生的 Arrow 等价类型,会作为原始字节返回在 Arrow 字段中(通常为 `BINARY` 或 `FIXED_SIZE_BINARY`)。 @@ -366,7 +355,6 @@ print([int.from_bytes(n, byteorder="little") for n in df["int_128_col"].to_list( 关键要点是:应用程序代码必须根据所选 DataFrame 库的能力以及可接受的性能权衡来处理这些转换。当 DataFrame 原生转换不可用时,仍然可以采用纯 Python 的方式来实现。 - ## 读取格式 {#read-formats} 读取格式用于控制客户端 `query`、`query_np` 和 `query_df` 方法返回值的数据类型。(`raw_query` 和 `query_arrow` 不会修改来自 ClickHouse 的原始数据,因此不适用格式控制。)例如,如果将 UUID 的读取格式从默认的 `native` 格式更改为可选的 `string` 格式,那么对 `UUID` 列的 ClickHouse 查询结果将以字符串形式返回(使用标准的 8-4-4-4-12 RFC 1422 格式),而不是 Python UUID 对象。 @@ -401,7 +389,6 @@ client.query('SELECT user_id, user_uuid, device_uuid from users', query_formats= client.query('SELECT device_id, dev_address, gw_address from devices', column_formats={'dev_address':'string'}) ``` - ### 读取格式选项(Python 类型) {#read-format-options-python-types} | ClickHouse Type | Native Python Type | Read Formats | Comments | @@ -462,7 +449,6 @@ result = client.query('SELECT name, avg(rating) FROM directors INNER JOIN movies 可以使用 `add_file` 方法向初始的 `ExternalData` 对象添加额外的外部数据文件,该方法接受与构造函数相同的参数。对于 HTTP,所有外部数据都会作为 `multipart/form-data` 文件上传的一部分进行传输。 - ## 时区 {#time-zones} 有多种机制可将时区应用到 ClickHouse 的 DateTime 和 DateTime64 值上。在内部,ClickHouse 服务器始终将任何 DateTime 或 `DateTime64` 对象存储为一个不含时区信息的数值,表示自 Unix 纪元(1970-01-01 00:00:00 UTC)以来的秒数。对于 `DateTime64` 值,根据精度不同,其表示形式可以是自纪元以来的毫秒、微秒或纳秒。因此,任何时区信息的应用始终发生在客户端。请注意,这会引入一定的额外计算开销,所以在对性能敏感的应用中,建议将 DateTime 类型视为纪元时间戳,仅在用户展示和转换时才使用时区(例如,Pandas Timestamps 始终是一个表示纪元纳秒的 64 位整数,以提升性能)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md index 9a684e7d5e1..4068de43b86 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/advanced-usage.md @@ -77,7 +77,6 @@ if __name__ == '__main__': 同样,你也可以将数据保存为 [TabSeparated](/interfaces/formats/TabSeparated) 以及其他格式。有关所有可用格式选项的概览,请参阅 [输入和输出数据的格式](/interfaces/formats)。 - ## 多线程、多进程和异步/事件驱动用例 {#multithreaded-multiprocess-and-asyncevent-driven-use-cases} ClickHouse Connect 在多线程、多进程以及事件循环驱动/异步应用中表现良好。所有查询和插入处理都在单个线程中执行,因此操作通常是线程安全的。(在底层对部分操作进行并行处理是未来可能的增强方向,以克服单线程带来的性能损失,但即便在那种情况下也会保持线程安全。) @@ -116,7 +115,6 @@ asyncio.run(main()) 另请参阅:[run_async 示例](https://github.com/ClickHouse/clickhouse-connect/blob/main/examples/run_async.py)。 - ## 管理 ClickHouse 会话 ID {#managing-clickhouse-session-ids} 每个 ClickHouse 查询都会在一个 ClickHouse “会话”的上下文中执行。会话目前用于两个目的: @@ -142,7 +140,6 @@ client = clickhouse_connect.get_client(host='somehost.com', user='dbuser', passw 在这种情况下,ClickHouse Connect 不会发送 `session_id`;服务器不会将各个请求视为同一会话的一部分。临时表和会话级别的设置不会在请求之间保留。 - ## 自定义 HTTP 连接池 {#customizing-the-http-connection-pool} ClickHouse Connect 使用 `urllib3` 连接池来处理与服务器的底层 HTTP 连接。默认情况下,所有客户端实例共享同一个连接池,这对于大多数使用场景已经足够。这个默认连接池会针对应用程序使用的每个 ClickHouse 服务器最多维护 8 个 HTTP Keep-Alive 连接。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md index eaf51c57ed4..23e423515a6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/driver-api.md @@ -118,7 +118,6 @@ print(client.database) # 输出:'github' {#output-github} ``` - ## 客户端生命周期和最佳实践 {#client-lifecycle-and-best-practices} 创建一个 ClickHouse Connect 客户端是一个成本较高的操作,涉及建立连接、获取服务器元数据以及初始化设置。请遵循以下最佳实践以获得最优性能: @@ -158,7 +157,6 @@ for i in range(1000): client.close() ``` - ### 多线程应用 {#multi-threaded-applications} :::warning @@ -216,7 +214,6 @@ def worker(thread_id): client.close() ``` - ### 正确清理 {#proper-cleanup} 在关闭时务必关闭客户端。请注意,只有当客户端拥有其连接池管理器时(例如使用自定义 TLS/代理选项创建时),`client.close()` 才会销毁客户端实例并关闭连接池中的 HTTP 连接。对于默认的共享连接池,请使用 `client.close_connections()` 主动清理套接字;否则,连接会通过空闲超时以及在进程退出时自动回收。 @@ -236,7 +233,6 @@ with clickhouse_connect.get_client(host='my-host', username='default', password= result = client.query('SELECT 1') ``` - ### 何时使用多个客户端 {#when-to-use-multiple-clients} 在以下场景下,适合使用多个客户端: @@ -283,7 +279,6 @@ WHERE date >= '2022-10-01 15:20:05' 服务器端绑定(由 ClickHouse 服务器提供)仅支持 `SELECT` 查询。当前不适用于 `ALTER`、`DELETE`、`INSERT` 或其他类型的查询。未来可能会有变化;参见 [https://github.com/ClickHouse/ClickHouse/issues/42092](https://github.com/ClickHouse/ClickHouse/issues/42092)。 ::: - #### 客户端绑定 {#client-side-binding} ClickHouse Connect 也支持客户端参数绑定,这在生成模板化 SQL 查询时可以提供更大的灵活性。对于客户端绑定,`parameters` 参数应为字典或序列。客户端绑定使用 Python [“printf” 风格](https://docs.python.org/3/library/stdtypes.html#old-string-formatting) 的字符串格式化来进行参数替换。 @@ -348,7 +343,6 @@ WHERE metric >= 35200.44 ::: - ### `settings` 参数 {#settings-argument-1} 所有主要的 ClickHouse Connect Client `insert` 和 `select` 方法都接受一个可选的 `settings` 关键字参数,用于为所执行的 SQL 语句传递 ClickHouse 服务器的[用户设置](/operations/settings/settings.md)。`settings` 参数应为一个字典,其中每个条目为一个 ClickHouse 设置名称及其对应的值。请注意,这些值在作为查询参数发送到服务器时会被转换为字符串。 @@ -364,7 +358,6 @@ settings = {'merge_tree_min_rows_for_concurrent_read': 65535, client.query("SELECT event_type, sum(timeout) FROM event_errors WHERE event_time > '2022-08-01'", settings=settings) ``` - ## Client `command` 方法 {#client-command-method} 使用 `Client.command` 方法向 ClickHouse 服务器发送 SQL 查询,这些查询通常不返回数据,或者只返回单个原始类型值或数组值,而不是完整数据集。该方法接受以下参数: @@ -408,7 +401,6 @@ print(result) client.command("DROP TABLE test_command") ``` - #### 返回单个值的简单查询 {#simple-queries-returning-single-values} ```python @@ -427,7 +419,6 @@ print(version) # 输出:"25.8.2.29" {#output-258229} ``` - #### 带参数的命令 {#commands-with-parameters} ```python @@ -449,7 +440,6 @@ result = client.command( ) ``` - #### 包含设置的命令 {#commands-with-settings} ```python @@ -464,7 +454,6 @@ result = client.command( ) ``` - ## Client `query` 方法 {#client-query-method} `Client.query` 方法是从 ClickHouse 服务器检索单个“批次”数据集的主要方式。它通过 HTTP 使用 ClickHouse 原生格式高效传输大型数据集(最大约一百万行)。该方法具有以下参数: @@ -512,7 +501,6 @@ print([col_type.name for col_type in result.column_types]) # 输出: ['String', 'String'] {#output-string-string} ``` - #### 获取查询结果 {#accessing-query-results} ```python @@ -547,7 +535,6 @@ print(result.first_row) # 输出: (0, "0") {#output-0-0} ``` - #### 使用客户端参数查询 {#query-with-client-side-parameters} ```python @@ -566,7 +553,6 @@ parameters = ("system", 5) result = client.query(query, parameters=parameters) ``` - #### 使用服务端参数查询 {#query-with-server-side-parameters} ```python @@ -581,7 +567,6 @@ parameters = {"db": "system", "tbl": "query_log"} result = client.query(query, parameters=parameters) ``` - #### 带有设置的查询 {#query-with-settings} ```python @@ -599,7 +584,6 @@ result = client.query( ) ``` - ### `QueryResult` 对象 {#the-queryresult-object} 基础的 `query` 方法返回一个 `QueryResult` 对象,包含以下公共属性: @@ -675,7 +659,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"]) ``` - #### 面向列的插入 {#column-oriented-insert} ```python @@ -693,7 +676,6 @@ data = [ client.insert("users", data, column_names=["id", "name", "age"], column_oriented=True) ``` - #### 显式指定列类型的插入 {#insert-with-explicit-column-types} ```python @@ -716,7 +698,6 @@ client.insert( ) ``` - #### 插入到指定数据库 {#insert-into-specific-database} ```python @@ -738,7 +719,6 @@ client.insert( ) ``` - ## 文件插入 {#file-inserts} 要将文件中的数据直接插入到 ClickHouse 表中,请参阅[高级插入(文件插入)](advanced-inserting.md#file-inserts)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md index a9e5c94aead..45faaa5caf3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/index.md @@ -16,7 +16,6 @@ import CodeBlock from '@theme/CodeBlock'; import ConnectionDetails from '@site/i18n/zh/docusaurus-plugin-content-docs/current/_snippets/_gather_your_details_http.mdx'; - # 介绍 {#introduction} ClickHouse Connect 是一个核心数据库驱动,为各类 Python 应用程序提供互操作能力。 @@ -88,7 +87,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='localhost', username='default', password='password') ``` - #### 使用 ClickHouse Connect 客户端实例连接到 ClickHouse Cloud 服务: {#use-a-clickhouse-connect-client-instance-to-connect-to-a-clickhouse-cloud-service} :::tip @@ -101,7 +99,6 @@ import clickhouse_connect client = clickhouse_connect.get_client(host='HOSTNAME.clickhouse.cloud', port=8443, username='default', password='your password') ``` - ### 与数据库交互 {#interact-with-your-database} 要执行 ClickHouse SQL 命令,请使用客户端的 `command` 方法: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md index 7a71b1d4c9a..ddd43123410 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/python/sqlalchemy.md @@ -34,7 +34,6 @@ with engine.begin() as conn: 在下文的 [Connection arguments and Settings](driver-api.md#connection-arguments) 一节可以查看支持参数的完整列表。这些参数也可以通过 SQLAlchemy DSN 进行配置。 - ## 核心查询 {#sqlalchemy-core-queries} 该方言支持 SQLAlchemy Core 的 `SELECT` 查询,包括联接、过滤、排序、限制/偏移以及 `DISTINCT`。 @@ -68,7 +67,6 @@ with engine.begin() as conn: conn.execute(delete(users).where(users.c.name.like("%temp%"))) ``` - ## DDL 和反射 {#sqlalchemy-ddl-reflection} 你可以使用提供的 DDL 辅助工具以及类型/引擎构造器来创建数据库和表。支持对表进行反射(包括列类型和引擎)。 @@ -103,7 +101,6 @@ with engine.begin() as conn: 如果服务器上存在这些属性,则反射出的列会包含方言特定的属性,例如 `clickhousedb_default_type`、`clickhousedb_codec_expression` 和 `clickhousedb_ttl_expression`。 - ## 插入(Core 和基础 ORM) {#sqlalchemy-inserts} 插入既可以通过 SQLAlchemy Core 实现,也可以为方便起见使用简单的 ORM 模型来完成。 @@ -132,7 +129,6 @@ with Session(engine) as session: session.commit() ``` - ## 范围和限制 {#scope-and-limitations} - 核心重点:支持 SQLAlchemy Core 功能,例如带有 `JOIN`(`INNER`、`LEFT OUTER`、`FULL OUTER`、`CROSS`)的 `SELECT`,以及 `WHERE`、`ORDER BY`、`LIMIT`/`OFFSET` 和 `DISTINCT`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md index 75f9dc755ff..aa68b369f9d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/language-clients/rust.md @@ -37,7 +37,6 @@ clickhouse = { version = "0.12.2", features = ["test-util"] } 另请参阅:[crates.io 页面](https://crates.io/crates/clickhouse)。 - ## Cargo 特性 {#cargo-features} * `lz4`(默认启用)— 启用 `Compression::Lz4` 和 `Compression::Lz4Hc(_)` 变体。启用后,除 `WATCH` 以外的所有查询默认使用 `Compression::Lz4`。 @@ -90,7 +89,6 @@ let client = Client::default() .with_database("test"); ``` - ### HTTPS 或 ClickHouse Cloud 连接 {#https-or-clickhouse-cloud-connection} HTTPS 可以配合 `rustls-tls` 或 `native-tls` Cargo 特性使用。 @@ -116,7 +114,6 @@ let client = Client::default() * 客户端仓库中的 [ClickHouse Cloud HTTPS 示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/clickhouse_cloud.rs)。该示例同样适用于自托管(本地部署)环境中的 HTTPS 连接。 - ### 选择行 {#selecting-rows} ```rust @@ -152,7 +149,6 @@ while let Some(row) = cursor.next().await? { .. } 在查询行数据时谨慎使用 `wait_end_of_query`,因为它可能会导致服务端更高的内存消耗,并且很可能会降低整体性能。 ::: - ### 插入数据行 {#inserting-rows} ```rust @@ -175,7 +171,6 @@ insert.end().await?; * 行将以流式方式逐步发送,以分散网络负载。 * 仅当所有行都位于同一分区且其数量小于 [`max_insert_block_size`](https://clickhouse.tech/docs/operations/settings/settings/#settings-max_insert_block_size) 时,ClickHouse 才会以原子方式插入该批次。 - ### 异步插入(服务端批量) {#async-insert-server-side-batching} 你可以使用 [ClickHouse 异步插入](/optimize/asynchronous-inserts) 来避免在客户端对传入数据进行批量处理。只需在 `insert` 方法中提供 `async_insert` 选项(或者直接在 `Client` 实例上统一配置,使其对所有 `insert` 调用生效)即可。 @@ -191,7 +186,6 @@ let client = Client::default() * 客户端仓库中的 [异步插入示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/async_insert.rs)。 - ### Inserter 特性(客户端批量写入) {#inserter-feature-client-side-batching} 需要启用 `inserter` cargo 特性。 @@ -233,7 +227,6 @@ inserter.end().await?; ::: - ### 执行 DDL {#executing-ddls} 对于单节点部署,只需按如下方式执行 DDL 语句即可: @@ -252,7 +245,6 @@ client .await?; ``` - ### ClickHouse 设置 {#clickhouse-settings} 可以使用 `with_option` 方法来应用多种 [ClickHouse 设置](/operations/settings/settings)。例如: @@ -269,7 +261,6 @@ let numbers = client 除了 `query` 之外,它也可以以类似方式用于 `insert` 和 `inserter` 方法;此外,还可以在 `Client` 实例上调用同一方法,为所有查询设置全局配置。 - ### Query ID {#query-id} 使用 `.with_option`,可以设置 `query_id` 选项,以在 ClickHouse 查询日志中标识查询。 @@ -290,7 +281,6 @@ let numbers = client 另请参阅:client 仓库中的 [query_id 示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/query_id.rs)。 - ### Session ID {#session-id} 与 `query_id` 类似,你可以通过设置 `session_id` 在同一个会话中执行语句。`session_id` 可以在客户端级别进行全局设置,也可以在每次 `query`、`insert` 或 `inserter` 调用时单独设置。 @@ -307,7 +297,6 @@ let client = Client::default() 另请参阅 client 仓库中的 [session_id 示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/session_id.rs)。 - ### 自定义 HTTP 头部 {#custom-http-headers} 如果你使用代理认证或需要传递自定义请求头,可以按如下方式进行: @@ -320,7 +309,6 @@ let client = Client::default() 另请参见客户端仓库中的 [自定义 HTTP 头示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_headers.rs)。 - ### 自定义 HTTP 客户端 {#custom-http-client} 这对于微调底层 HTTP 连接池的设置很有用。 @@ -349,7 +337,6 @@ let client = Client::with_http_client(hyper_client).with_url("http://localhost:8 另请参阅客户端仓库中的 [自定义 HTTP 客户端示例](https://github.com/ClickHouse/clickhouse-rs/blob/main/examples/custom_http_client.rs)。 - ## 数据类型 {#data-types} :::info @@ -456,7 +443,6 @@ struct MyRow { } ``` - * `DateTime` 可与 `u32` 或其对应的 newtype 封装类型互相映射,用于表示自 UNIX 纪元以来经过的秒数。另一个受支持的类型是 [`time::OffsetDateTime`](https://docs.rs/time/latest/time/struct.OffsetDateTime.html),通过 `serde::time::datetime` 提供支持,这需要启用 `time` 特性。 ```rust @@ -535,7 +521,6 @@ struct MyRow { * `Variant`、`Dynamic` 和(新的)`JSON` 数据类型目前尚不支持。 - ## 模拟 {#mocking} 该 crate 提供了用于模拟 ClickHouse 服务器并测试 DDL、`SELECT`、`INSERT` 和 `WATCH` 查询的工具。可以通过启用 `test-util` 功能特性来使用此功能。**仅**将其作为开发依赖(dev-dependency)使用。 @@ -580,7 +565,6 @@ struct EventLog { } ``` - ## 已知限制 {#known-limitations} * 尚不支持 `Variant`、`Dynamic` 和(新的)`JSON` 数据类型。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md index 5a6e4021594..524cb9db532 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/datagrip.md @@ -19,24 +19,17 @@ import datagrip_6 from '@site/static/images/integrations/sql-clients/datagrip-6. import datagrip_7 from '@site/static/images/integrations/sql-clients/datagrip-7.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 在 DataGrip 中连接 ClickHouse {#connecting-datagrip-to-clickhouse} - - ## 启动或下载 DataGrip {#start-or-download-datagrip} DataGrip 可从 https://www.jetbrains.com/datagrip/ 获取 - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 加载 ClickHouse 驱动程序 {#2-load-the-clickhouse-driver} 1. 启动 DataGrip,在 **Data Sources and Drivers** 对话框的 **Data Sources** 选项卡中,点击 **+** 图标 @@ -58,8 +51,6 @@ DataGrip 可从 https://www.jetbrains.com/datagrip/ 获取 - - ## 3. 连接到 ClickHouse {#3-connect-to-clickhouse} - 指定数据库连接详细信息,然后单击 **Test Connection**。 @@ -79,8 +70,6 @@ ClickHouse Cloud 要求所有连接都使用 SSL 加密。如果没有 `?ssl=tru - - ## 深入了解 {#learn-more} 如需了解有关 DataGrip 的更多信息,请参阅 DataGrip 文档。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md index 6d224cfc903..2c8c4321a57 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/dbeaver.md @@ -21,7 +21,6 @@ import dbeaver_sql_editor from '@site/static/images/integrations/sql-clients/dbe import dbeaver_query_log_select from '@site/static/images/integrations/sql-clients/dbeaver-query-log-select.png'; import ClickHouseSupportedBadge from '@theme/badges/ClickHouseSupported'; - # 将 DBeaver 连接到 ClickHouse {#connect-dbeaver-to-clickhouse} @@ -32,8 +31,6 @@ DBeaver 提供多个版本。在本指南中,我们使用的是 [DBeaver Commu 请使用 23.1.0 或更高版本的 DBeaver,以获得对 ClickHouse 中 `Nullable` 列的更佳支持。 ::: - - ## 1. 收集您的 ClickHouse 连接信息 {#1-gather-your-clickhouse-details} DBeaver 通过基于 HTTP(S) 的 JDBC 连接到 ClickHouse,您需要准备: @@ -43,14 +40,10 @@ DBeaver 通过基于 HTTP(S) 的 JDBC 连接到 ClickHouse,您需要准备: - 用户名 - 密码 - - ## 2. 下载 DBeaver {#2-download-dbeaver} 您可以从 https://dbeaver.io/download/ 下载 DBeaver - - ## 3. 添加数据库 {#3-add-a-database} - 通过 **Database > New Database Connection** 菜单,或 **Database Navigator** 中的 **New Database Connection** 图标,打开 **Connect to a database** 对话框: @@ -79,8 +72,6 @@ DBeaver 通过基于 HTTP(S) 的 JDBC 连接到 ClickHouse,您需要准备: - - ## 4. 查询 ClickHouse {#4-query-clickhouse} 打开查询编辑器并执行查询。 @@ -93,8 +84,6 @@ DBeaver 通过基于 HTTP(S) 的 JDBC 连接到 ClickHouse,您需要准备: - - ## 后续步骤 {#next-steps} 请参阅 [DBeaver wiki](https://github.com/dbeaver/dbeaver/wiki) 了解 DBeaver 的功能,并查阅 [ClickHouse 文档](https://clickhouse.com/docs) 了解 ClickHouse 的功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md index 1c970dfd340..5bb1fa66b78 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/marimo.md @@ -18,7 +18,6 @@ import dropdown_cell_chart from '@site/static/images/integrations/sql-clients/ma import run_app_view from '@site/static/images/integrations/sql-clients/marimo/run-app-view.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 marimo 与 ClickHouse 配合使用 {#using-marimo-with-clickhouse} @@ -27,8 +26,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 1. 安装支持 SQL 的 marimo {#install-marimo-sql} ```shell @@ -38,7 +35,6 @@ marimo edit clickhouse_demo.py 这会在本机上打开一个指向 localhost 的浏览器窗口。 - ## 2. 连接到 ClickHouse。 {#connect-to-clickhouse} 在 marimo 编辑器左侧进入数据源面板,点击 “Add database”。 @@ -53,8 +49,6 @@ marimo edit clickhouse_demo.py - - ## 3. 运行 SQL {#run-sql} 在设置好连接之后,你可以创建一个新的 SQL 单元格,并选择 ClickHouse 引擎。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md index 64aa8dad4c8..8417ff6b9c8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/sql-console.md @@ -47,7 +47,6 @@ import adjust_axis_scale from '@site/static/images/cloud/sqlconsole/adjust-axis- import give_a_query_a_name from '@site/static/images/cloud/sqlconsole/give-a-query-a-name.png' import save_the_query from '@site/static/images/cloud/sqlconsole/save-the-query.png' - # SQL 控制台 {#sql-console} SQL 控制台是在 ClickHouse Cloud 中探索和查询数据库的最快、最简便方式。您可以使用 SQL 控制台: @@ -57,8 +56,6 @@ SQL 控制台是在 ClickHouse Cloud 中探索和查询数据库的最快、最 - 只需几次点击即可执行查询并将结果数据可视化 - 与团队成员共享查询,从而更高效地协作 - - ## 浏览数据表 {#exploring-tables} ### 查看数据表列表和表结构信息 {#viewing-table-list-and-schema-info} @@ -83,8 +80,6 @@ SQL 控制台是在 ClickHouse Cloud 中探索和查询数据库的最快、最 - - ## 筛选和排序表格 {#filtering-and-sorting-tables} ### 排序表格 {#sorting-a-table} @@ -125,8 +120,6 @@ SQL 控制台可以一键将当前的排序和筛选条件转换为查询。只 你可以通过阅读 (link) 查询文档来进一步了解如何在 SQL 控制台中编写查询。 - - ## 创建和运行查询 {#creating-and-running-a-query} ### 创建查询 {#creating-a-query} @@ -182,8 +175,6 @@ SQL 控制台可以一键将当前的排序和筛选条件转换为查询。只 - - ## 使用 GenAI 管理查询 {#using-genai-to-manage-queries} 此功能允许用户以自然语言问题的形式编写查询,由查询控制台根据可用数据表的上下文生成 SQL 查询。GenAI 还可以帮助用户调试查询。 @@ -294,8 +285,6 @@ SQL 控制台可以一键将当前的排序和筛选条件转换为查询。只 1. 点击 _+_ 图标创建一个新查询,并粘贴以下代码: - - ```sql -- 按年份显示 uk_price_paid 表中所有交易的总价格和总交易数。 SELECT year(date), sum(pricee) as total_price, Count(*) as total_transactions @@ -310,7 +299,6 @@ SQL 控制台可以一键将当前的排序和筛选条件转换为查询。只 请注意,GenAI 是一项实验性功能。在对任何数据集运行由 GenAI 生成的查询时,请务必谨慎。 - ## 高级查询功能 {#advanced-querying-features} ### 搜索查询结果 {#searching-query-results} @@ -339,8 +327,6 @@ SQL 控制台可以一键将当前的排序和筛选条件转换为查询。只 - - ## 可视化查询数据 {#visualizing-query-data} 某些数据以图表形式展示会更易于理解。你可以在 SQL 控制台中直接基于查询结果数据快速创建可视化,只需几次点击。作为示例,我们将使用一个查询来计算纽约市出租车行程的每周统计信息: @@ -401,7 +387,6 @@ SQL 控制台支持十种图表类型,可以在图表配置面板中的图表 - ## 共享查询 {#sharing-queries} SQL 控制台支持你与团队共享查询。查询一旦共享,团队的所有成员都可以查看和编辑该查询。共享查询是与团队协作的有效方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md index 72975ab4b55..637a33eba61 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/sql-clients/tablum.md @@ -17,29 +17,22 @@ import tablum_ch_2 from '@site/static/images/integrations/sql-clients/tablum-ch- import tablum_ch_3 from '@site/static/images/integrations/sql-clients/tablum-ch-3.png'; import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 将 TABLUM.IO 连接到 ClickHouse {#connecting-tablumio-to-clickhouse} - - ## 打开 TABLUM.IO 启动页面 {#open-the-tablumio-startup-page} :::note 你可以通过 Docker 在 Linux 服务器上安装 TABLUM.IO 的自托管版本。 ::: - - ## 1. 注册或登录服务 {#1-sign-up-or-sign-in-to-the-service} 首先,使用您的邮箱在 TABLUM.IO 注册,或者通过 Google 或 Facebook 账号快速登录。 - - ## 2. 添加 ClickHouse 连接器 {#2-add-a-clickhouse-connector} 准备好 ClickHouse 连接信息,进入 **Connector** 选项卡,填写 host URL、port、username、password、database name 以及连接器名称。完成这些字段后,点击 **Test connection** 按钮验证配置信息,然后点击 **Save connector for me** 以便持久保存该连接器。 @@ -54,16 +47,12 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - - ## 3. 选择连接器 {#3-select-the-connector} 进入 **Dataset** 选项卡。在下拉列表中选择刚创建的 ClickHouse 连接器。在右侧面板中,您将看到可用的表和架构列表。 - - ## 4. 输入 SQL 查询并运行 {#4-input-a-sql-query-and-run-it} 在 SQL 控制台中输入查询,然后点击 **Run Query**。结果会以电子表格形式显示。 @@ -81,8 +70,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; * 将查询结果共享为新的 ClickHouse 数据库。 ::: - - ## 了解更多 {#learn-more} 请访问 https://tablum.io 获取更多关于 TABLUM.IO 的信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md index 3b1e03b8662..08f8f2ee80c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/easypanel/index.md @@ -9,7 +9,6 @@ doc_type: '指南' import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; - # 在 Easypanel 上部署 ClickHouse {#deploying-clickhouse-on-easypanel} @@ -18,8 +17,6 @@ import CommunityMaintainedBadge from '@theme/badges/CommunityMaintained'; [![Deploy to Easypanel](https://easypanel.io/img/deploy-on-easypanel-40.svg)](https://easypanel.io/docs/templates/clickhouse) - - ## 操作步骤 {#instructions} 1. 在你的云平台上创建一台运行 Ubuntu 的虚拟机。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md index 691d542e51a..c560a5f58f0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/retool/index.md @@ -19,18 +19,13 @@ import retool_04 from '@site/static/images/integrations/tools/data-integration/r import retool_05 from '@site/static/images/integrations/tools/data-integration/retool/retool_05.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # 在 Retool 中连接 ClickHouse {#connecting-retool-to-clickhouse} - - ## 1. 收集连接信息 {#1-gather-your-connection-details} - - ## 2. 创建 ClickHouse 资源 {#2-create-a-clickhouse-resource} 登录你的 Retool 账户,并导航到 _Resources_ 标签页。选择 "Create New" -> "Resource": diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md index 71ed8cdf09a..22cb0938ffb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations/tools/data-integration/splunk/index.md @@ -22,7 +22,6 @@ import splunk_011 from '@site/static/images/integrations/tools/data-integration/ import splunk_012 from '@site/static/images/integrations/tools/data-integration/splunk/splunk_012.png'; import PartnerBadge from '@theme/badges/PartnerBadge'; - # 将 ClickHouse Cloud 审计日志存储到 Splunk 中 {#storing-clickhouse-cloud-audit-logs-into-splunk} @@ -33,12 +32,8 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; 此附加组件仅包含模块化输入,不提供任何额外的 UI。 - - # 安装 {#installation} - - ## 适用于 Splunk Enterprise {#for-splunk-enterprise} 从 [Splunkbase](https://splunkbase.splunk.com/app/7709) 下载 ClickHouse Cloud Audit Add-on for Splunk。 @@ -55,8 +50,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; 如果一切正常,此时应能看到已安装的 ClickHouse Audit logs 应用。否则,请检查 Splunkd 日志以定位错误。 - - # 模块化输入配置 {#modular-input-configuration} 要配置模块化输入,首先需要从您的 ClickHouse Cloud 部署中获取以下信息: @@ -64,8 +57,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; - 组织 ID - 管理员 [API Key](/cloud/manage/openapi) - - ## 从 ClickHouse Cloud 获取信息 {#getting-information-from-clickhouse-cloud} 登录 [ClickHouse Cloud 控制台](https://console.clickhouse.cloud/)。 @@ -86,8 +77,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; - - ## 在 Splunk 中配置数据输入 {#configure-data-input-in-splunk} 回到 Splunk,依次进入 Settings -> Data inputs。 @@ -108,8 +97,6 @@ import PartnerBadge from '@theme/badges/PartnerBadge'; 数据输入已配置完成,现在可以开始浏览审计日志。 - - # 用法 {#usage} 模块化输入会将数据存储在 Splunk 中。要查看这些数据,可以在 Splunk 中使用通用搜索视图。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md index b53a89f3f23..0b594e2dca8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/arrowflight.md @@ -7,16 +7,12 @@ title: 'Arrow Flight 接口' doc_type: 'reference' --- - - # Apache Arrow Flight 接口 {#apache-arrow-flight-interface} ClickHouse 支持集成 [Apache Arrow Flight](https://arrow.apache.org/docs/format/Flight.html) 协议——一个为在 gRPC 之上使用 Arrow IPC 格式进行高效列式数据传输而设计的高性能 RPC 框架。 通过此接口,Flight SQL 客户端可以查询 ClickHouse,并以 Arrow 格式获取结果,为分析型工作负载提供高吞吐量和低延迟。 - - ## 功能 {#features} * 通过 Arrow Flight SQL 协议执行 SQL 查询 @@ -24,8 +20,6 @@ ClickHouse 支持集成 [Apache Arrow Flight](https://arrow.apache.org/docs/form * 与支持 Arrow Flight 的 BI 工具和自定义数据应用程序集成 * 通过 gRPC 实现轻量且高效的通信 - - ## 限制 {#limitations} Arrow Flight 接口目前为实验性功能,仍在积极开发中。已知限制包括: @@ -36,8 +30,6 @@ Arrow Flight 接口目前为实验性功能,仍在积极开发中。已知限 如果您遇到兼容性问题或希望参与贡献,请在 ClickHouse 仓库中[创建一个 issue](https://github.com/ClickHouse/ClickHouse/issues)。 - - ## 运行 Arrow Flight 服务器 {#running-server} 要在自托管的 ClickHouse 实例中启用 Arrow Flight 服务器,请在服务器的配置文件中加入如下配置: @@ -54,7 +46,6 @@ Arrow Flight 接口目前为实验性功能,仍在积极开发中。已知限 {} Application: Arrow Flight 兼容协议:0.0.0.0:9005 ``` - ## 通过 Arrow Flight SQL 连接 ClickHouse {#connecting-to-clickhouse} 可以使用任何支持 Arrow Flight SQL 的客户端。例如,使用 `pyarrow`: @@ -70,7 +61,6 @@ for batch in reader: print(batch.to_pandas()) ``` - ## 兼容性 {#compatibility} Arrow Flight 接口与支持 Arrow Flight SQL 的工具兼容,包括使用以下技术构建的自定义应用程序: @@ -81,8 +71,6 @@ Arrow Flight 接口与支持 Arrow Flight SQL 的工具兼容,包括使用以 如果您的工具提供原生 ClickHouse 连接器(例如 JDBC、ODBC),除非在性能或格式兼容性方面有明确需要使用 Arrow Flight,否则应优先选择原生连接器。 - - ## 查询取消 {#query-cancellation} 可以通过在客户端关闭 gRPC 连接来取消长时间运行的查询。计划在未来提供对更高级取消功能的支持。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/cli.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/cli.md index a61b8111f6c..bca4fbd6013 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/cli.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/cli.md @@ -20,7 +20,6 @@ ClickHouse 提供了一个原生命令行客户端,用于直接对 ClickHouse 该客户端通过进度条以及已读取行数、已处理字节数和查询执行时间,为查询执行提供实时反馈。 它既支持[命令行选项](#command-line-options),也支持[配置文件](#configuration_files)。 - ## 安装 {#install} 若要下载 ClickHouse,请运行: @@ -39,7 +38,6 @@ sudo ./clickhouse install 不同版本的客户端和服务器之间是兼容的,但某些功能在较旧的客户端中可能不可用。我们建议客户端和服务器使用相同的版本。 - ## 运行 {#run} :::note @@ -71,7 +69,6 @@ ClickHouse 客户端版本 24.12.2.29(官方构建)。 有关命令行选项的完整列表,请参阅[命令行选项](#command-line-options)。 - ### 连接到 ClickHouse Cloud {#connecting-cloud} ClickHouse Cloud 服务的详细信息可在 ClickHouse Cloud 控制台中查看。选择要连接的服务并点击 **Connect**(连接): @@ -123,7 +120,6 @@ ClickHouse Cloud 服务的详细信息可在 ClickHouse Cloud 控制台中查看 为了专注于查询语法,其余示例省略了连接详细信息(`--host`、`--port` 等)。在实际使用这些命令时,请记得补充这些参数。 ::: - ## 交互模式 {#interactive-mode} ### 使用交互式模式 {#using-interactive-mode} @@ -168,7 +164,6 @@ ClickHouse Client 基于 `replxx`(类似于 `readline`),因此支持常见 * `q`、`Q` 或 `:q` * `logout` 或 `logout;` - ### 查询处理信息 {#processing-info} 在处理查询时,客户端会显示: @@ -247,7 +242,6 @@ $ echo "Hello\nGoodbye" | clickhouse-client --query "INSERT INTO messages FORMAT 当指定 `--query` 时,所有输入内容都会在一个换行符之后被追加到请求中。 - ### 向远程 ClickHouse 服务插入 CSV 文件 {#cloud-example} 本示例将示例数据集 CSV 文件 `cell_towers.csv` 插入到 `default` 数据库中已存在的 `cell_towers` 表中: @@ -261,7 +255,6 @@ clickhouse-client --host HOSTNAME.clickhouse.cloud \ < cell_towers.csv ``` - ### 从命令行插入数据的示例 {#more-examples} 可以通过多种方式在命令行中插入数据。 @@ -290,7 +283,6 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA 在批量模式下,默认的数据[格式](formats.md)为 `TabSeparated`。 您可以在查询的 `FORMAT` 子句中设置格式,如上例所示。 - ## 带参数的查询 {#cli-queries-with-parameters} 你可以在查询中指定参数,并通过命令行选项向其传递参数值。 @@ -333,7 +325,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 结果集包含 1 行。耗时:0.006 秒。 ``` - ### 查询语法 {#cli-queries-with-parameters-syntax} 在查询中,将你希望通过命令行参数传入的值用大括号括起来,格式如下: @@ -347,7 +338,6 @@ Query id: 0358a729-7bbe-4191-bb48-29b063c548a7 | `name` | 占位符标识符。对应的命令行选项为 `--param_ = value`。 | | `data type` | 参数的[数据类型](../sql-reference/data-types/index.md)。

例如,类似 `(integer, ('string', integer))` 的数据结构可以使用 `Tuple(UInt8, Tuple(String, UInt8))` 数据类型(也可以采用其他[整数](../sql-reference/data-types/int-uint.md)类型)。

也可以将表名、数据库名和列名作为参数传递,在这种情况下,则需要将其数据类型指定为 `Identifier`。 | - ### 示例 {#cli-queries-with-parameters-examples} ```bash @@ -358,7 +348,6 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe --query "SELECT {col:Identifier} as {alias:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 10" ``` - ## 基于 AI 的 SQL 生成 {#ai-sql-generation} ClickHouse 客户端内置了 AI 助手,可以根据自然语言描述生成 SQL 查询。此功能可帮助用户在不具备深厚 SQL 知识的情况下编写复杂查询。 @@ -379,7 +368,6 @@ AI 将会: 2. 基于发现的表和列生成合适的 SQL 查询 3. 立即执行生成的查询 - ### 示例 {#ai-sql-generation-example} ```bash @@ -413,7 +401,6 @@ GROUP BY c.name ORDER BY order_count DESC ``` - ### 配置 {#ai-sql-generation-configuration} 要使用 AI 生成 SQL,需要在 ClickHouse Client 配置文件中配置一个 AI 提供方。你可以使用 OpenAI、Anthropic,或任意与 OpenAI 兼容的 API 服务。 @@ -438,7 +425,6 @@ export ANTHROPIC_API_KEY=your-anthropic-key clickhouse-client ``` - #### 配置文件 {#ai-sql-generation-configuration-file} 若要对 AI 设置进行更精细的控制,请在以下位置的 ClickHouse 客户端配置文件中进行配置: @@ -543,7 +529,6 @@ ai: model: gpt-3.5-turbo ``` - ### 参数 {#ai-sql-generation-parameters}
@@ -650,7 +635,6 @@ clickhouse:[//[user[:password]@][hosts_and_ports]][/database][?query_parameters] | `database` | 数据库名称。 | `default` | | `query_parameters` | 键值对列表:`param1=value1[,¶m2=value2], ...`。对于某些参数,可以不指定值。参数名称和值区分大小写。 | - | - ### 注意事项 {#connection-string-notes} 如果在连接字符串中已经指定了用户名、密码或数据库,则不能再通过 `--user`、`--password` 或 `--database` 指定(反之亦然)。 @@ -685,7 +669,6 @@ ClickHouse 客户端会按顺序(从左到右)尝试连接这些主机。 * `database` * `query parameters` - ### 示例 {#connection_string_examples} 连接到 `localhost` 的 9000 端口并执行查询 `SELECT 1`。 @@ -766,7 +749,6 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000 clickhouse-client clickhouse://192.168.1.15,192.168.1.25 ``` - ## 查询 ID 格式 {#query-id-format} 在交互模式下,ClickHouse 客户端会为每个查询显示其查询 ID。默认情况下,ID 的格式如下: @@ -794,7 +776,6 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25 speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d ``` - ## 配置文件 {#configuration_files} ClickHouse 客户端会按以下顺序查找,并使用第一个存在的配置文件: @@ -895,7 +876,6 @@ $ clickhouse-client --max_threads 1 有关所有设置的列表,请参阅[设置](../operations/settings/settings.md)。 - ### 格式选项 {#command-line-options-formatting} | 选项 | 说明 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md index eb2c6f678ab..9670c7d03b5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Arrow/Arrow.md @@ -72,7 +72,6 @@ ClickHouse 表列的数据类型不必与对应的 Arrow 数据字段完全一 $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow" ``` - ### 选择数据 {#selecting-data} 可以使用以下命令,从 ClickHouse 表中选择数据,并将其保存为 Arrow 格式的文件: @@ -81,7 +80,6 @@ $ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow} ``` - ## 格式设置 {#format-settings} | 设置 | 描述 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md index e8273480aff..6c9b7a88199 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/Avro.md @@ -15,7 +15,6 @@ import DataTypeMapping from './_snippets/data-types-matching.md' | -- | -- | -- | | ✔ | ✔ | | - ## 描述 {#description} [Apache Avro](https://avro.apache.org/) 是一种面向行的序列化格式,使用二进制编码,实现高效的数据处理。`Avro` 格式支持读写 [Avro 数据文件](https://avro.apache.org/docs/++version++/specification/#object-container-files)。此格式要求消息是自描述的,并在其中内嵌 schema。如果您将 Avro 与 schema registry 结合使用,请参阅 [`AvroConfluent`](./AvroConfluent.md) 格式。 @@ -54,7 +53,6 @@ ClickHouse 表列的数据类型可以与插入的 Avro 数据中对应字段的 在导入数据时,如果在模式中找不到某个字段,并且已启用设置 [`input_format_avro_allow_missing_fields`](/operations/settings/settings-formats.md/#input_format_avro_allow_missing_fields),则会使用默认值,而不是抛出错误。 - ### 写入 Avro 数据 {#writing-avro-data} 要将 ClickHouse 表中的数据写入 Avro 文件: @@ -70,7 +68,6 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro Avro 文件的输出压缩方式和同步间隔可以分别通过 [`output_format_avro_codec`](/operations/settings/settings-formats.md/#output_format_avro_codec) 和 [`output_format_avro_sync_interval`](/operations/settings/settings-formats.md/#output_format_avro_sync_interval) 设置进行配置。 - ### 推断 Avro 模式 {#inferring-the-avro-schema} 使用 ClickHouse 的 [`DESCRIBE`](/sql-reference/statements/describe-table) 函数,可以快速查看 Avro 文件的推断格式,如下例所示。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md index a08ee563204..7665f678183 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Avro/AvroConfluent.md @@ -15,7 +15,6 @@ import DataTypesMatching from './_snippets/data-types-matching.md' | -- | -- | -- | | ✔ | ✗ | | - ## 描述 {#description} [Apache Avro](https://avro.apache.org/) 是一种面向行的序列化格式,使用二进制编码以实现高效的数据处理。`AvroConfluent` 格式支持解码使用 [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html)(或 API 兼容服务)序列化的、单对象且使用 Avro 编码的 Kafka 消息。 @@ -61,7 +60,6 @@ format_avro_schema_registry_url = 'http://schema-registry-url'; SELECT * FROM topic1_stream; ``` - #### 使用基本身份验证 {#using-basic-authentication} 如果 schema registry 需要基本身份验证(例如使用 Confluent Cloud 时),可以在 `format_avro_schema_registry_url` 设置中提供经过 URL 编码的凭证。 @@ -81,7 +79,6 @@ kafka_format = 'AvroConfluent', format_avro_schema_registry_url = 'https://:@schema-registry-url'; ``` - ## 故障排查 {#troubleshooting} 要监控摄取进度并调试 Kafka 消费者的错误,可以查询 [`system.kafka_consumers` 系统表](../../../operations/system-tables/kafka_consumers.md)。如果您的部署有多个副本(例如 ClickHouse Cloud),则必须使用 [`clusterAllReplicas`](../../../sql-reference/table-functions/cluster.md) 表函数。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md index 4f04e2ef274..98fee482008 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/BSONEachRow.md @@ -114,7 +114,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.bson' FORMAT BSONEachRow; ``` - ### 读取数据 {#reading-data} 以 `BSONEachRow` 格式读取数据: @@ -129,7 +128,6 @@ FORMAT BSONEachRow BSON 是一种二进制格式,无法在终端以人类可读形式显示。使用 `INTO OUTFILE` 将数据输出为 BSON 文件。 ::: - ## 格式设置 {#format-settings} | 设置 | 描述 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md index 408f2268c09..f2d508dfc7f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSV.md @@ -46,7 +46,6 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR 如果失败并且输入值是数字,则会尝试将该数字与 ENUM ID 匹配。 如果输入数据只包含 ENUM ID,建议启用设置 [input_format_csv_enum_as_number](/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) 以优化 `ENUM` 解析。 - ## 示例用法 {#example-usage} ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md index bdb8ff80347..228c102eaf8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNames.md @@ -70,7 +70,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football.csv' FORMAT CSVWithNames; ``` - ### 读取数据 {#reading-data} 使用 `CSVWithNames` 格式读取数据: @@ -104,7 +103,6 @@ FORMAT CSVWithNames "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md index 0a89c95f2df..38966850a41 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CSV/CSVWithNamesAndTypes.md @@ -71,7 +71,6 @@ ORDER BY (date, home_team); INSERT INTO football FROM INFILE 'football_types.csv' FORMAT CSVWithNamesAndTypes; ``` - ### 读取数据 {#reading-data} 使用 `CSVWithNamesAndTypes` 格式来读取数据: @@ -106,7 +105,6 @@ FORMAT CSVWithNamesAndTypes "2022-05-07",2021,"Walsall","Swindon Town",0,3 ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md index 29e85b44a93..4280b87e1eb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CapnProto.md @@ -17,7 +17,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | -- | -- | -- | | ✔ | ✔ | | - ## 描述 {#description} `CapnProto` 格式是一种二进制消息格式,类似 [`Protocol Buffers`](https://developers.google.com/protocol-buffers/) 格式和 [Thrift](https://en.wikipedia.org/wiki/Apache_Thrift),但不同于 [JSON](./JSON/JSON.md) 或 [MessagePack](https://msgpack.org/)。 @@ -81,7 +80,6 @@ struct Message { $ clickhouse-client --query = "SELECT * FROM test.hits FORMAT CapnProto SETTINGS format_schema = 'schema:Message'" ``` - ### 使用自动生成的 schema {#using-autogenerated-capn-proto-schema} 如果你的数据没有外部定义的 `CapnProto` schema,你仍然可以使用自动生成的 schema 以 `CapnProto` 格式输入/输出数据。 @@ -102,7 +100,6 @@ SETTINGS format_capn_proto_use_autogenerated_schema=1 $ cat hits.bin | clickhouse-client --query "INSERT INTO test.hits SETTINGS format_capn_proto_use_autogenerated_schema=1 FORMAT CapnProto" ``` - ## 格式设置 {#format-settings} 设置 [`format_capn_proto_use_autogenerated_schema`](../../operations/settings/settings-formats.md/#format_capn_proto_use_autogenerated_schema) 默认启用,仅在未设置 [`format_schema`](/interfaces/formats#formatschema) 时生效。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md index 7918570174a..ff4705f86be 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparated.md @@ -56,7 +56,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparated; ``` - ### 读取数据 {#reading-data} 配置自定义分隔符: @@ -83,7 +82,6 @@ FORMAT CustomSeparated row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## 格式设置 {#format-settings} 其他设置: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md index 5bd32a4d72c..1d118fe60b1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpaces.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpaces; ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md index 9c93e28b8b4..49d0d38297f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNames.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNames; ``` - ## 格式配置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md index 508bc4d51db..f6978c5dfe1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedIgnoreSpacesWithNamesAndTypes.md @@ -38,5 +38,4 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedIgnoreSpacesWithNamesAndTypes; ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md index 1075e6aa9d3..1999ade6ae4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNames.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNames; ``` - ### 读取数据 {#reading-data} 配置自定义分隔符: @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNames row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md index 0ad6c5e5bc5..778845902fa 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/CustomSeparated/CustomSeparatedWithNamesAndTypes.md @@ -43,7 +43,6 @@ SET format_custom_escaping_rule = 'Quoted'; INSERT INTO football FROM INFILE 'football.txt' FORMAT CustomSeparatedWithNamesAndTypes; ``` - ### 读取数据 {#reading-data} 配置自定义分隔符: @@ -70,7 +69,6 @@ FORMAT CustomSeparatedWithNamesAndTypes row('date';'season';'home_team';'away_team';'home_team_goals';'away_team_goals'),row('Date';'Int16';'LowCardinality(String)';'LowCardinality(String)';'Int8';'Int8'),row('2022-04-30';2021;'Sutton United';'Bradford City';1;4),row('2022-04-30';2021;'Swindon Town';'Barrow';2;1),row('2022-04-30';2021;'Tranmere Rovers';'Oldham Athletic';2;0),row('2022-05-02';2021;'Port Vale';'Newport County';1;2),row('2022-05-02';2021;'Salford City';'Mansfield Town';2;2),row('2022-05-07';2021;'Barrow';'Northampton Town';1;3),row('2022-05-07';2021;'Bradford City';'Carlisle United';2;0),row('2022-05-07';2021;'Bristol Rovers';'Scunthorpe United';7;0),row('2022-05-07';2021;'Exeter City';'Port Vale';0;1),row('2022-05-07';2021;'Harrogate Town A.F.C.';'Sutton United';0;2),row('2022-05-07';2021;'Hartlepool United';'Colchester United';0;2),row('2022-05-07';2021;'Leyton Orient';'Tranmere Rovers';0;1),row('2022-05-07';2021;'Mansfield Town';'Forest Green Rovers';2;2),row('2022-05-07';2021;'Newport County';'Rochdale';0;2),row('2022-05-07';2021;'Oldham Athletic';'Crawley Town';3;3),row('2022-05-07';2021;'Stevenage Borough';'Salford City';4;2),row('2022-05-07';2021;'Walsall';'Swindon Town';0;3) ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md index 2fa4daa3aef..c6bcf903a9a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/DWARF.md @@ -83,5 +83,4 @@ LIMIT 3 峰值内存使用:271.92 MiB。 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md index 271946999f6..9f19a04af5e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Form.md @@ -40,5 +40,4 @@ rt.start: navigation rt.bmr: 390,11,10 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md index 2f4eae3a785..8f5e26761bf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Hash.md @@ -62,5 +62,4 @@ df2ec2f0669b000edff6adee264e7d68 返回 1 行。用时:0.154 秒。 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md index c4b4f553c76..3105c2b99fc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSON.md @@ -99,7 +99,6 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA } ``` - ## 格式设置 {#format-settings} 对于 JSON 输入格式,如果将设置项 [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) 设为 `1`, diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md index 8f20428bc04..9508e69cde0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsObject.md @@ -31,7 +31,6 @@ SELECT * FROM json_as_object FORMAT JSONEachRow; {"json":{"any json stucture":"1"}} ``` - ### JSON 对象数组 {#an-array-of-json-objects} ```sql title="Query" @@ -45,7 +44,6 @@ SELECT * FROM json_square_brackets FORMAT JSONEachRow; {"field":{"id":"2","name":"name2"}} ``` - ### 具有默认值的列 {#columns-with-default-values} ```sql title="Query" @@ -62,5 +60,4 @@ SELECT time, json FROM json_as_object FORMAT JSONEachRow {"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}} ``` - ## 格式设定 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md index 81fd2640fdb..1966450374d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONAsString.md @@ -46,7 +46,6 @@ SELECT * FROM json_as_string; └───────────────────────────────────┘ ``` - ### JSON 对象数组 {#an-array-of-json-objects} ```sql title="Query" @@ -63,5 +62,4 @@ SELECT * FROM json_square_brackets; └────────────────────────────┘ ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md index 04e7424f95d..ca8b6e1f538 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumns.md @@ -48,7 +48,6 @@ JSONColumns* 格式的输出首先输出 ClickHouse 字段名,随后给出该 INSERT INTO football FROM INFILE 'football.json' FORMAT JSONColumns; ``` - ### 读取数据 {#reading-data} 使用 `JSONColumns` 格式来读取数据: @@ -72,7 +71,6 @@ FORMAT JSONColumns } ``` - ## 格式设置 {#format-settings} 在导入过程中,如果将 [`input_format_skip_unknown_fields`](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 设置为 `1`,则名称未知的列会被跳过。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md index 6473a277058..f863ba4c4de 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONColumnsWithMetadata.md @@ -66,5 +66,4 @@ doc_type: 'reference' 对于 `JSONColumnsWithMetadata` 输入格式,如果将 [`input_format_json_validate_types_from_metadata`](/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) 设置为 `1`,则会将输入数据中元数据中的类型与表中对应列的类型进行比较。 - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md index 3e5c48ded13..9bbdde7811d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompact.md @@ -81,7 +81,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompact; ``` - ### 读取数据 {#reading-data} 以 `JSONCompact` 格式读取数据: @@ -156,5 +155,4 @@ FORMAT JSONCompact } ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md index 474439024ec..5b7ff7bfb38 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactColumns.md @@ -44,7 +44,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactColumns; ``` - ### 读取数据 {#reading-data} 以 `JSONCompactColumns` 格式读取数据: @@ -70,5 +69,4 @@ FORMAT JSONCompactColumns 在该数据块中不存在的列会被填充为默认值(此处可以使用 [`input_format_defaults_for_omitted_fields`](/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) 设置) - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md index bf7aa239952..c16e2d656d1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRow; ``` - ### 读取数据 {#reading-data} 使用 `JSONCompactEachRow` 格式来读取数据: @@ -82,5 +81,4 @@ FORMAT JSONCompactEachRow ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md index 752c07792cf..35337ab1214 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNames; ``` - ### 读取数据 {#reading-data} 使用 `JSONCompactEachRowWithNames` 格式来读取数据: @@ -84,7 +83,6 @@ FORMAT JSONCompactEachRowWithNames ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md index 9358bbaeff2..074a8bbef7b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithNamesAndTypes.md @@ -51,7 +51,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactEachRowWithNamesAndTypes; ``` - ### 读取数据 {#reading-data} 使用 `JSONCompactEachRowWithNamesAndTypes` 格式来读取数据: @@ -86,7 +85,6 @@ FORMAT JSONCompactEachRowWithNamesAndTypes ["2022-05-07", 2021, "Walsall", "Swindon Town", 0, 3] ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md index c0223a66b92..0b4240b057c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactEachRowWithProgress.md @@ -46,5 +46,4 @@ FORMAT JSONCompactEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md index 9ae9808f14d..1d88539ef40 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStrings.md @@ -93,5 +93,4 @@ FORMAT JSONCompactStrings } ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md index fc0295544bb..13cbe090dd7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRow.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRow; ``` - ### 读取数据 {#reading-data} 使用 `JSONCompactStringsEachRow` 格式读取数据: @@ -82,5 +81,4 @@ FORMAT JSONCompactStringsEachRow ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md index aa1f53f303f..da6f4e9ab48 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNames.md @@ -50,7 +50,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNames; ``` - ### 读取数据 {#reading-data} 以 `JSONCompactStringsEachRowWithNames` 格式读取数据: @@ -84,7 +83,6 @@ FORMAT JSONCompactStringsEachRowWithNames ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md index 81a23f850c9..7cf86916075 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithNamesAndTypes.md @@ -48,7 +48,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONCompactStringsEachRowWithNamesAndTypes; ``` - ### 读取数据 {#reading-data} 使用 `JSONCompactStringsEachRowWithNamesAndTypes` 格式来读取数据: @@ -83,7 +82,6 @@ FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["2022-05-07", "2021", "Walsall", "Swindon Town", "0", "3"] ``` - ## 格式设置 {#format-settings} :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md index 1a2024c994d..cba03d790cc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONCompactStringsEachRowWithProgress.md @@ -47,5 +47,4 @@ FORMAT JSONCompactStringsEachRowWithProgress {"rows_before_limit_at_least":5} ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md index 0295e1511f6..43e0077fd04 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRow.md @@ -47,7 +47,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONEachRow; ``` - ### 读取数据 {#reading-data} 使用 `JSONEachRow` 格式来读取数据: @@ -82,5 +81,4 @@ FORMAT JSONEachRow 若将设置 [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 设为 1,则会跳过导入名称未知的数据列。 - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md index 3ffbc0cf0ba..2db26298bb3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONEachRowWithProgress.md @@ -26,5 +26,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md index 5cbe513eb4c..13c3292a1eb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONLines.md @@ -49,7 +49,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONLines; ``` - ### 读取数据 {#reading-data} 以 `JSONLines` 格式读取数据: @@ -84,5 +83,4 @@ FORMAT JSONLines 如果将 [input_format_skip_unknown_fields](/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) 设置为 1,则导入数据时会跳过名称未知的列。 - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md index 30049a00104..f537b642546 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONObjectEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} 在这种格式中,所有数据都表示为单个 JSON 对象,其中每一行对应该对象的一个独立字段,类似于 [`JSONEachRow`](./JSONEachRow.md) 格式。 - - ## 示例用法 {#example-usage} ### 基本示例 {#basic-example} @@ -133,7 +129,6 @@ CREATE TABLE IF NOT EXISTS example_table 以 `UserActivity` 表为例: - ```response ┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ │ 4324182021466249494 │ 5 │ 146 │ -1 │ @@ -213,11 +208,8 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` - ## 格式设置 {#format-settings} - - | 配置 | 说明 | 默认 | 注意事项 | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | | [`input_format_import_nested_json`](/operations/settings/settings-formats.md/#input_format_import_nested_json) | 将嵌套 JSON 数据映射为嵌套表(适用于 JSONEachRow 格式)。 | `false` | | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md index 9126d28c42e..3aca2cd6de9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStrings.md @@ -200,7 +200,6 @@ doc_type: 'reference' INSERT INTO football FROM INFILE 'football.json' FORMAT JSONStrings; ``` - ### 读取数据 {#reading-data} 使用 `JSONStrings` 格式来读取数据: @@ -213,7 +212,6 @@ FORMAT JSONStrings 输出为 JSON 格式: - ```json { "meta": diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md index a1d26c09dbb..e0584d77619 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRow.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} 与 [`JSONEachRow`](./JSONEachRow.md) 的唯一区别在于,数据字段会以字符串形式输出,而不是输出为带类型的 JSON 值。 - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -65,7 +61,6 @@ FORMAT JSONStringsEachRow 输出将为 JSON 格式: - ```json {"date":"2022-04-30","season":"2021","home_team":"Sutton United","away_team":"Bradford City","home_team_goals":"1","away_team_goals":"4"} {"date":"2022-04-30","season":"2021","home_team":"Swindon Town","away_team":"Barrow","home_team_goals":"2","away_team_goals":"1"} @@ -86,5 +81,4 @@ FORMAT JSONStringsEachRow {"date":"2022-05-07","season":"2021","home_team":"Walsall","away_team":"Swindon Town","home_team_goals":"0","away_team_goals":"3"} ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md index c895d31e5f1..3d5beaa482f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/JSONStringsEachRowWithProgress.md @@ -6,14 +6,10 @@ title: 'JSONStringsEachRowWithProgress' doc_type: 'reference' --- - - ## 描述 {#description} 与 `JSONEachRow`/`JSONStringsEachRow` 不同,ClickHouse 还会以 JSON 值的形式返回进度信息。 - - ## 使用示例 {#example-usage} ```json @@ -23,5 +19,4 @@ doc_type: 'reference' {"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md index 73a766115e5..9cd06f874d9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/JSON/PrettyJSONEachRow.md @@ -13,21 +13,15 @@ doc_type: 'guide' |-------|--------|-----------------------------------| | ✗ | ✔ | `PrettyJSONLines`, `PrettyNDJSON` | - - ## 描述 {#description} 与 [JSONEachRow](./JSONEachRow.md) 的唯一区别在于,JSON 采用带换行符和四个空格缩进的美化格式。 - - ## 使用示例 {#example-usage} ### 插入数据 {#inserting-data} 使用包含以下数据的 JSON 文件,并将其命名为 `football.json`: - - ```json { "date": "2022-04-30", @@ -185,7 +179,6 @@ FORMAT PrettyJSONEachRow 输出结果将为 JSON 格式: - ```json { "date": "2022-04-30", @@ -327,6 +320,4 @@ FORMAT PrettyJSONEachRow - - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md index a3265b31253..1ab7144cffb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsString.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} `LineAsString` 格式将输入数据的每一行都视为一个单独的字符串值。 此格式只能用于解析仅包含一个 [String](/sql-reference/data-types/string.md) 类型字段的表。 其余列必须设置为 [`DEFAULT`](/sql-reference/statements/create/table.md/#default)、[`MATERIALIZED`](/sql-reference/statements/create/view#materialized-view) 或省略。 - - ## 使用示例 {#example-usage} ```sql title="Query" @@ -38,5 +34,4 @@ SELECT * FROM line_as_string; └───────────────────────────────────────────────────┘ ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md index 5ccf0840814..e70d518db1c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNames.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} `LineAsStringWithNames` 格式与 [`LineAsString`](./LineAsString.md) 格式类似,但会额外输出包含列名的表头行。 - - ## 使用示例 {#example-usage} ```sql title="Query" @@ -42,5 +38,4 @@ Jane 25 Peter 35 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md index 5722541644b..5d31b688c26 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/LineAsString/LineAsStringWithNamesAndTypes.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} `LineAsStringWithNames` 格式类似于 [`LineAsString`](./LineAsString.md) 格式, 但会打印两行表头:第一行是列名,第二行是类型。 - - ## 使用示例 {#example-usage} ```sql @@ -44,5 +40,4 @@ Jane 25 Peter 35 ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md index d3046fbcf24..da9fcf8f468 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Markdown.md @@ -11,16 +11,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | `MD` | - - ## 描述 {#description} 你可以使用 [Markdown](https://en.wikipedia.org/wiki/Markdown) 格式导出结果,生成可以直接粘贴到 `.md` 文件中的输出: Markdown 表格会自动生成,并且可以在支持 Markdown 的平台(例如 GitHub)上使用。此格式仅用于输出。 - - ## 示例用法 {#example-usage} ```sql @@ -41,5 +37,4 @@ FORMAT Markdown | 4 | 8 | ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md index 434c4901b6f..e4c147794f7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MsgPack.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} ClickHouse 支持读写 [MessagePack](https://msgpack.org/) 数据文件。 - - ## 数据类型对应关系 {#data-types-matching} | MessagePack 数据类型(`INSERT`) | ClickHouse 数据类型 | MessagePack 数据类型(`SELECT`) | @@ -46,8 +42,6 @@ ClickHouse 支持读写 [MessagePack](https://msgpack.org/) 数据文件。 | `int 64` | [`Decimal64`](/sql-reference/data-types/decimal.md) | `int 64` | | `bin 8` | [`Decimal128`/`Decimal256`](/sql-reference/data-types/decimal.md) | `bin 8 ` | - - ## 示例用法 {#example-usage} 写入“.msgpk”文件: @@ -58,7 +52,6 @@ $ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 2 $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk; ``` - ## 格式设置 {#format-settings} | 设置 | 描述 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md index 82c99c80270..030a4df2afd 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/MySQLDump.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|---------|-------| | ✔ | ✗ | | - - ## 描述 {#description} ClickHouse 支持读取 MySQL 的 [转储文件(dump)](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html)。 @@ -26,8 +24,6 @@ ClickHouse 支持读取 MySQL 的 [转储文件(dump)](https://dev.mysql.com 此格式支持表结构推断:如果转储文件中包含该指定表的 `CREATE` 语句,则从中推断表结构;否则从 `INSERT` 语句中的数据推断表结构。 ::: - - ## 示例用法 {#example-usage} 假设有如下 SQL 转储文件: @@ -84,7 +80,6 @@ SETTINGS input_format_mysql_dump_table_name = 'test2' └───┘ ``` - ## 格式设置 {#format-settings} 可以使用 [`input_format_mysql_dump_table_name`](/operations/settings/settings-formats.md/#input_format_mysql_dump_table_name) 设置来指定要读取数据的表名。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md index 5f6a48db91d..7fa49fabc5f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Npy.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} `Npy` 格式用于将 `.npy` 文件中的 NumPy 数组加载到 ClickHouse 中。 @@ -23,8 +21,6 @@ NumPy 文件格式是一种用于高效存储数值数据数组的二进制格 下表列出了受支持的 Npy 数据类型及其在 ClickHouse 中对应的类型: - - ## 数据类型对应关系 {#data_types-matching} | Npy 数据类型(`INSERT`) | ClickHouse 数据类型 | Npy 数据类型(`SELECT`) | @@ -42,8 +38,6 @@ NumPy 文件格式是一种用于高效存储数值数据数组的二进制格 | `S`, `U` | [String](/sql-reference/data-types/string.md) | `S` | | | [FixedString](/sql-reference/data-types/fixedstring.md) | `S` | - - ## 示例用法 {#example-usage} ### 使用 Python 将数组保存为 .npy 格式 {#saving-an-array-in-npy-format-using-python} @@ -76,5 +70,4 @@ FROM file('example_array.npy', Npy) $ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy} ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md index ff1f9c6cf9f..f798ae66d7c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Null.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} 在 `Null` 格式下,不会输出任何内容。 @@ -25,8 +23,6 @@ doc_type: 'reference' `Null` 格式可用于性能测试。 ::: - - ## 示例用法 {#example-usage} ### 读取数据 {#reading-data} @@ -69,5 +65,4 @@ FORMAT Null 结果集包含 0 行。耗时:0.154 秒。 ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md index 417e3bf2895..c16318d525e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/ORC.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} [Apache ORC](https://orc.apache.org/) 是一种列式存储格式,在 [Hadoop](https://hadoop.apache.org/) 生态系统中被广泛使用。 - - ## 数据类型匹配 {#data-types-matching-orc} 下表比较了在 `INSERT` 和 `SELECT` 查询中支持的 ORC 数据类型及其对应的 ClickHouse [数据类型](/sql-reference/data-types/index.md)。 @@ -50,8 +46,6 @@ doc_type: 'reference' - 数组可以嵌套,并且其参数可以是 `Nullable` 类型。`Tuple` 和 `Map` 类型也可以嵌套。 - ClickHouse 表列的数据类型不必与对应的 ORC 数据字段完全一致。在插入数据时,ClickHouse 会根据上表解析数据类型,然后将数据[转换](/sql-reference/functions/type-conversion-functions#cast)为为 ClickHouse 表列设置的数据类型。 - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -101,7 +95,6 @@ FORMAT ORC ORC 是一种二进制格式,无法在终端中以人类可读的形式查看。请使用 `INTO OUTFILE` 来输出 ORC 文件。 ::: - ## 格式设置 {#format-settings} | 设置 | 描述 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/One.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/One.md index ce335b6ff81..63f4e276300 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/One.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/One.md @@ -13,15 +13,11 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 描述 {#description} `One` 格式是一种特殊的输入格式,它不会从文件中读取任何数据,而是只返回一行数据,该行包含一列,类型为 [`UInt8`](../../sql-reference/data-types/int-uint.md)、名称为 `dummy`、值为 `0`(类似于 `system.one` 表)。 可以配合虚拟列 `_file/_path` 使用,在不读取实际数据的情况下列出所有文件。 - - ## 使用示例 {#example-usage} 示例: @@ -45,5 +41,4 @@ SELECT _file FROM file('path/to/files/data*', One); └──────────────┘ ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md index 1632ca2cd0d..aa063656f5a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/Parquet.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} [Apache Parquet](https://parquet.apache.org/) 是 Hadoop 生态系统中广泛使用的列式存储格式。ClickHouse 支持对该格式进行读写操作。 - - ## 数据类型匹配 {#data-types-matching-parquet} 下表展示了 Parquet 数据类型与 ClickHouse [数据类型](/sql-reference/data-types/index.md)之间的对应关系。 @@ -66,15 +62,12 @@ doc_type: 'reference' ClickHouse 表列的数据类型可以与插入的 Parquet 数据中对应字段的数据类型不同。插入数据时,ClickHouse 会根据上表解释数据类型,然后将数据[转换](/sql-reference/functions/type-conversion-functions#cast)为 ClickHouse 表列所设置的数据类型。例如,可以将一个 `UINT_32` Parquet 列读取到一个 [IPv4](/sql-reference/data-types/ipv4.md) ClickHouse 列中。 - - 对于某些 Parquet 类型,没有与之紧密匹配的 ClickHouse 类型。我们按如下方式读取它们: + * `TIME`(一天中的时间)会被读取为时间戳。例如,`10:23:13.000` 会变成 `1970-01-01 10:23:13.000`。 * 具有 `isAdjustedToUTC=false` 的 `TIMESTAMP`/`TIME` 表示本地挂钟时间(本地时区下的年、月、日、时、分、秒和子秒字段,而不考虑具体本地时区是哪一个),等同于 SQL 中的 `TIMESTAMP WITHOUT TIME ZONE`。ClickHouse 会将其当作 UTC 时间戳来读取。例如,`2025-09-29 18:42:13.000`(表示本地挂钟的读数)会变成 `2025-09-29 18:42:13.000`(`DateTime64(3, 'UTC')`,表示某个时间点)。如果将其转换为 String,它会显示正确的年、月、日、时、分、秒和子秒,然后可以将其解释为某个本地时区中的时间,而不是 UTC。违背直觉的是,将类型从 `DateTime64(3, 'UTC')` 改为 `DateTime64(3)` 并不会有帮助,因为这两种类型都表示时间点而不是挂钟读数,但 `DateTime64(3)` 会错误地使用本地时区来格式化。 * `INTERVAL` 当前会被读取为 `FixedString(12)`,其内容是 Parquet 文件中编码的时间间隔的原始二进制表示。 - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -109,6 +102,7 @@ ClickHouse 表列的数据类型可以与插入的 Parquet 数据中对应字段 INSERT INTO football FROM INFILE 'football.parquet' FORMAT Parquet; ``` + ### 读取数据 {#reading-data} 以 `Parquet` 格式读取数据: @@ -129,33 +123,30 @@ Parquet 是一种二进制格式,无法在终端中以人类可读的形式显 ## 格式设置 {#format-settings} - - -| 设置 | 描述 | 默认 | +| 设置 | 描述 | 默认值 | | ------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `input_format_parquet_case_insensitive_column_matching` | 在匹配 Parquet 列与 ClickHouse 列时不区分大小写。 | `0` | -| `input_format_parquet_preserve_order` | 在读取 Parquet 文件时避免对行重新排序,这通常会大大降低读取性能。 | `0` | +| `input_format_parquet_preserve_order` | 在读取 Parquet 文件时避免对行重新排序,因为这通常会明显降低读取性能。 | `0` | | `input_format_parquet_filter_push_down` | 在读取 Parquet 文件时,可以根据 WHERE/PREWHERE 表达式以及 Parquet 元数据中的最小值/最大值统计信息跳过整个行组。 | `1` | -| `input_format_parquet_bloom_filter_push_down` | 在读取 Parquet 文件时,可根据 WHERE 条件和 Parquet 元数据中的布隆过滤器跳过整个行组。 | `0` | -| `input_format_parquet_use_native_reader` | 在读取 Parquet 文件时,使用原生读取器而不是 Arrow 读取器。 | `0` | -| `input_format_parquet_allow_missing_columns` | 在读取 Parquet 输入格式时允许列缺失 | `1` | -| `input_format_parquet_local_file_min_bytes_for_seek` | 在 Parquet 输入格式中,本地读取文件时,为执行 seek 而不是执行带 ignore 选项的读取所需的最小字节数 | `8192` | -| `input_format_parquet_enable_row_group_prefetch` | 在解析 Parquet 时启用行组预取。当前仅支持在单线程解析时进行预取。 | `1` | -| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | 在为 Parquet 格式进行模式推断时跳过类型不受支持的列 | `0` | -| `input_format_parquet_max_block_size` | Parquet 读取器的最大块大小。 | `65409` | -| `input_format_parquet_prefer_block_bytes` | Parquet 读取器输出的平均数据块大小(字节) | `16744704` | -| `input_format_parquet_enable_json_parsing` | 在读取 Parquet 文件时,将 JSON 列解析为 ClickHouse 的 JSON Column 类型。 | `1` | +| `input_format_parquet_bloom_filter_push_down` | 在读取 Parquet 文件时,可以根据 WHERE 表达式以及 Parquet 元数据中的布隆过滤器跳过整个行组。 | `0` | +| `input_format_parquet_allow_missing_columns` | 在读取 Parquet 输入格式时允许存在缺失列 | `1` | +| `input_format_parquet_local_file_min_bytes_for_seek` | 在使用 Parquet 输入格式本地读取文件时,为选择执行 seek 而不是执行带 ignore 选项的读取所需的最小字节数 | `8192` | +| `input_format_parquet_enable_row_group_prefetch` | 在解析 Parquet 时启用行组预取。当前仅支持单线程解析时进行预取。 | `1` | +| `input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference` | 在为 Parquet 格式进行模式推断时跳过不受支持类型的列 | `0` | +| `input_format_parquet_max_block_size` | Parquet 读取器的最大数据块大小。 | `65409` | +| `input_format_parquet_prefer_block_bytes` | Parquet 读取器输出的数据块平均大小(字节) | `16744704` | +| `input_format_parquet_enable_json_parsing` | 在读取 Parquet 文件时,将 JSON 列解析为 ClickHouse 的 JSON Column。 | `1` | | `output_format_parquet_row_group_size` | 目标行组大小(以行数计)。 | `1000000` | -| `output_format_parquet_row_group_size_bytes` | 目标行组大小(压缩前),单位为字节。 | `536870912` | -| `output_format_parquet_string_as_string` | 对 String 列使用 Parquet 的 String 类型,而不是 Binary 类型。 | `1` | -| `output_format_parquet_fixed_string_as_fixed_byte_array` | 对于 FixedString 列,请使用 Parquet 的 FIXED_LEN_BYTE_ARRAY 类型而不是 Binary。 | `1` | -| `output_format_parquet_version` | 输出时使用的 Parquet 格式版本。支持的版本:1.0、2.4、2.6 和 2.latest(默认) | `2.latest` | +| `output_format_parquet_row_group_size_bytes` | 压缩前的目标行组大小(字节)。 | `536870912` | +| `output_format_parquet_string_as_string` | 对于 String 列,请使用 Parquet 的 String 类型而不是 Binary 类型。 | `1` | +| `output_format_parquet_fixed_string_as_fixed_byte_array` | 对于 FixedString 列,请使用 Parquet 的 FIXED_LEN_BYTE_ARRAY 类型,而不是 Binary 类型。 | `1` | +| `output_format_parquet_version` | Parquet 输出格式的版本。支持的版本:1.0、2.4、2.6 和 2.latest(默认) | `2.latest` | | `output_format_parquet_compression_method` | Parquet 输出格式的压缩方式。支持的编解码器:snappy、lz4、brotli、zstd、gzip、none(不压缩) | `zstd` | -| `output_format_parquet_compliant_nested_types` | 在 Parquet 文件模式中,列表元素的名称应使用 'element' 而不是 'item'。这是 Arrow 库实现中的历史遗留问题。一般情况下可以提高兼容性,但某些旧版本的 Arrow 可能不兼容。 | `1` | -| `output_format_parquet_use_custom_encoder` | 使用更快速的 Parquet 编码器实现。 | `1` | -| `output_format_parquet_parallel_encoding` | 使用多线程进行 Parquet 编码。需要启用 output_format_parquet_use_custom_encoder。 | `1` | -| `output_format_parquet_data_page_size` | 压缩前的目标页大小(字节)。 | `1048576` | -| `output_format_parquet_batch_size` | 每隔这么多行检查一次页大小。如果某些列中单个值的平均大小超过数 KB,建议适当减小该值。 | `1024` | -| `output_format_parquet_write_page_index` | 新增在 Parquet 文件中写入页索引的能力。 | `1` | -| `input_format_parquet_import_nested` | 已废弃的设置,不会产生任何作用。 | `0` | -| `input_format_parquet_local_time_as_utc` | true | 确定在 `isAdjustedToUTC=false` 的情况下,模式推断对 Parquet 时间戳使用的数据类型。若为 true:DateTime64(..., 'UTC'),若为 false:DateTime64(...)。这两种行为都不完全正确,因为 ClickHouse 没有用于本地墙钟时间的数据类型。看似有些反直觉,但 true 可能是错误更小的选项,因为将带有 'UTC' 的时间戳格式化为 String 时,会得到正确本地时间的表示。 | +| `output_format_parquet_compliant_nested_types` | 在 Parquet 文件模式中,对于列表元素,应使用名称 'element' 而不是 'item'。这是 Arrow 库实现中的历史遗留问题。通常可以提高兼容性,但某些旧版本的 Arrow 可能是个例外。 | `1` | +| `output_format_parquet_use_custom_encoder` | 使用更快的 Parquet 编码器实现。 | `1` | +| `output_format_parquet_parallel_encoding` | 在多个线程中执行 Parquet 编码。前提是已启用 output_format_parquet_use_custom_encoder。 | `1` | +| `output_format_parquet_data_page_size` | 目标页大小(压缩前),单位为字节。 | `1048576` | +| `output_format_parquet_batch_size` | 每隔指定行数检查一次页大小。如果某些列中单个值的平均大小超过数 KB,建议适当减小该参数。 | `1024` | +| `output_format_parquet_write_page_index` | 新增允许在 Parquet 文件中写入页索引的功能。 | `1` | +| `input_format_parquet_import_nested` | 已废弃的设置,不起任何作用。 | `0` | +| `input_format_parquet_local_time_as_utc` | true | 确定在 `isAdjustedToUTC=false` 的情况下,模式推断对 Parquet 时间戳使用的数据类型。若为 true:DateTime64(..., 'UTC'),若为 false:DateTime64(...)。这两种行为都不完全正确,因为 ClickHouse 没有用于本地墙钟时间的数据类型。看似有些反直觉,但 true 可能是错误更小的选项,因为将带有 'UTC' 的时间戳格式化为 String 时,会得到正确本地时间的表示。 | \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md index 73b7099a2a5..7223cc848ef 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Parquet/ParquetMetadata.md @@ -6,8 +6,6 @@ title: 'ParquetMetadata' doc_type: 'reference' --- - - ## 描述 {#description} 用于读取 Parquet 文件元数据(https://parquet.apache.org/docs/file-format/metadata/)的特殊格式。它始终只输出一行,结构/内容如下: @@ -47,8 +45,6 @@ doc_type: 'reference' - `min` - 列块的最小值 - `max` - 列块的最大值 - - ## 使用示例 {#example-usage} 示例: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md index ef7c895a9a1..e84bc5d764d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/Pretty.md @@ -15,7 +15,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | -- | -- | -- | | ✗ | ✔ | | - ## 描述 {#description} `Pretty` 格式以 Unicode 绘制的字符表格形式输出数据, @@ -26,8 +25,6 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; [NULL](/sql-reference/syntax.md) 会被输出为 `ᴺᵁᴸᴸ`。 - - ## 使用示例 {#example-usage} 示例(以 [`PrettyCompact`](./PrettyCompact.md) 格式为例): @@ -97,7 +94,6 @@ FORMAT PrettyCompact └────────────┴─────────┘ ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md index c1dadbf2f5d..2d04b6fdcde 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Pretty/PrettyNoEscapes.md @@ -15,14 +15,11 @@ import PrettyFormatSettings from './_snippets/common-pretty-format-settings.md'; | -- | -- | -- | | ✗ | ✔ | | - ## 描述 {#description} 与 [Pretty](/interfaces/formats/Pretty) 的区别在于它不使用 [ANSI 转义序列](http://en.wikipedia.org/wiki/ANSI_escape_code)。 这对于在浏览器中显示该格式以及使用 `watch` 命令行工具是必需的。 - - ## 使用示例 {#example-usage} 示例: @@ -35,7 +32,6 @@ $ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events F 可以使用 [HTTP 接口](../../../interfaces/http.md) 在浏览器中显示该格式。 ::: - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md index 1c0d0c7a18a..e0277d80b6f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/Protobuf.md @@ -94,7 +94,6 @@ Enum(以及 Enum8 或 Enum16)必须包含 oneof 的所有可能标签外加 ClickHouse 以 `length-delimited` 格式输入和输出 protobuf 消息。 这意味着在每条消息之前,都应将其长度写为[可变宽度整数(varint)](https://developers.google.com/protocol-buffers/docs/encoding#varints)。 - ## 示例用法 {#example-usage} ### 读取和写入数据 {#basic-examples} @@ -118,7 +117,6 @@ message MessageType { }; ``` -
生成二进制文件 @@ -246,7 +244,6 @@ ENGINE = MergeTree() ORDER BY tuple() ``` - 在命令行中将数据插入表中: ```bash @@ -261,8 +258,7 @@ SELECT * FROM test.protobuf_messages INTO OUTFILE 'protobuf_message_from_clickho 有了你的 Protobuf 模式定义,你现在可以对 ClickHouse 写入到文件 `protobuf_message_from_clickhouse.bin` 的数据进行反序列化了。 - -### 使用 ClickHouse Cloud 读取和写入数据 +### 使用 ClickHouse Cloud 读取和写入数据 {#basic-examples-cloud} 在 ClickHouse Cloud 中,您无法上传 Protobuf schema 文件。不过,您可以使用 `format_protobuf_schema` 设置项在查询中指定该 schema。下面的示例演示如何从本地机器读取序列化数据,并将其插入 ClickHouse Cloud 中的一张表。 @@ -289,8 +285,7 @@ ORDER BY tuple() * 'string':`format_schema` 为 schema 的字面内容。 * 'query':`format_schema` 为用于获取 schema 的查询语句。 - -### `format_schema_source='string'` +### `format_schema_source='string'` {#format-schema-source-string} 要将数据插入 ClickHouse Cloud,并将 schema 以字符串形式指定时,请运行: @@ -310,8 +305,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### `format_schema_source='query'` +### `format_schema_source='query'` {#format-schema-source-query} 你还可以将 Protobuf schema 存储在一张表中。 @@ -347,8 +341,7 @@ Javier Rodriguez 20001015 ['(555) 891-2046','(555) 738-5129'] Mei Ling 19980616 ['(555) 956-1834','(555) 403-7682'] ``` - -### 使用自动生成的 schema +### 使用自动生成的 schema {#using-autogenerated-protobuf-schema} 如果你的数据没有外部的 Protobuf schema,仍然可以使用自动生成的 schema 以 Protobuf 格式输出/输入数据。为此,请使用 `format_protobuf_use_autogenerated_schema` 设置。 @@ -377,7 +370,6 @@ SELECT * FROM test.hits format Protobuf SETTINGS format_protobuf_use_autogenerat 在这种情况下,会将自动生成的 Protobuf schema 保存在文件 `path/to/schema/schema.capnp` 中。 - ### 删除 Protobuf 缓存 {#basic-examples-cloud} 要重新加载从 [`format_schema_path`](/operations/server-configuration-parameters/settings.md/#format_schema_path) 加载的 Protobuf 架构,请使用 [`SYSTEM DROP ... FORMAT CACHE`](/sql-reference/statements/system.md/#system-drop-schema-format) 语句。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md index 38d600f4ea8..b48205cc587 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Protobuf/ProtobufList.md @@ -17,13 +17,10 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; | -- | -- | -- | | ✔ | ✔ | | - ## 描述 {#description} `ProtobufList` 格式与 [`Protobuf`](./Protobuf.md) 格式类似,但每一行表示为一系列子消息,这些子消息包含在一个名称固定为 "Envelope" 的消息中。 - - ## 示例用法 {#example-usage} 例如: @@ -51,5 +48,4 @@ message Envelope { }; ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md index 92f30f2f18a..1911e23dee4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RawBLOB.md @@ -6,8 +6,6 @@ title: 'RawBLOB' doc_type: 'reference' --- - - ## 描述 {#description} `RawBLOB` 格式会将所有输入数据读取为单个值。它只能用于解析仅包含一个 [`String`](/sql-reference/data-types/string.md) 类型或类似类型字段的表。 @@ -45,7 +43,6 @@ doc_type: 'reference' 代码:108. DB::Exception:无可插入的数据 ``` - ## 使用示例 {#example-usage} ```bash title="Query" @@ -58,5 +55,4 @@ $ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum f9725a22f9191e064120d718e26862a9 - ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md index 969658bd8d7..a01a53b1d00 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Regexp.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 描述 {#description} `Regex` 格式会根据提供的正则表达式,对导入数据的每一行进行解析。 @@ -29,8 +27,6 @@ doc_type: 'reference' 如果正则表达式未能匹配某一行,并且 [format_regexp_skip_unmatched](/operations/settings/settings-formats.md/#format_regexp_escaping_rule) 被设置为 1,则该行会被静默跳过。否则会抛出异常。 - - ## 示例用法 {#example-usage} 假设有文件 `data.tsv`: @@ -67,7 +63,6 @@ SELECT * FROM imp_regex_table; └────┴─────────┴────────┴────────────┘ ``` - ## 格式设置 {#format-settings} 在使用 `Regexp` 格式时,可以使用以下设置: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md index de737c37a22..c5b8294a0fd 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/RowBinary/RowBinaryWithDefaults.md @@ -15,13 +15,10 @@ import RowBinaryFormatSettings from './_snippets/common-row-binary-format-settin | -- | -- | -- | | ✔ | ✗ | | - ## 描述 {#description} 与 [`RowBinary`](./RowBinary.md) 格式类似,但在每一列前多了一个字节,用于表示是否使用默认值。 - - ## 使用示例 {#example-usage} 示例: @@ -39,7 +36,6 @@ SELECT * FROM FORMAT('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x * 对于列 `x`,只有一个字节 `01`,它表示应使用默认值,在此字节之后不再提供其他数据。 * 对于列 `y`,数据以字节 `00` 开头,它表示该列包含实际值,应从后续数据 `01000000` 中读取。 - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md index 74330dce043..ea06cda01ce 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/SQLInsert.md @@ -13,14 +13,10 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} 以一系列 `INSERT INTO table (columns...) VALUES (...), (...) ...;` 语句的形式输出数据。 - - ## 使用示例 {#example-usage} 示例: @@ -39,7 +35,6 @@ INSERT INTO table (x, y, z) VALUES (8, 9, '你好'), (9, 10, '你好'); 可以使用 [MySQLDump](../formats/MySQLDump.md) 输入格式来读取此格式输出的数据。 - ## 格式设置 {#format-settings} | 设置 | 描述 | 默认值 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md index 3e902084de2..71d64e998ec 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TSKV.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} 类似于 [`TabSeparated`](./TabSeparated.md) 格式,但以 `name=value` 格式输出值。 @@ -57,7 +55,6 @@ x=1 y=\N [NULL](/sql-reference/syntax.md) 会被格式化为 `\N`。 - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -102,7 +99,6 @@ FORMAT TSKV 输出将采用制表符分隔格式,并包含两行表头:第一行为列名,第二行为列类型: - ```tsv date=2022-04-30 season=2021 home_team=萨顿联队 away_team=布拉德福德城 home_team_goals=1 away_team_goals=4 date=2022-04-30 season=2021 home_team=斯温登镇 away_team=巴罗 home_team_goals=2 away_team_goals=1 @@ -123,5 +119,4 @@ date=2022-05-07 season=2021 home_team=斯蒂夫尼奇自治市 away_team date=2022-05-07 season=2021 home_team=沃尔索尔 away_team=斯温登镇 home_team_goals=0 away_team_goals=3 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md index bbe2bbc576f..067a43fa843 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparated.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------| | ✔ | ✔ | `TSV` | - - ## 描述 {#description} 在 TabSeparated 格式中,数据按行写入。每一行包含由制表符分隔的值。每个值后面都跟着一个制表符,除了该行的最后一个值,它后面跟的是换行符。在所有场景下都假定使用 Unix 风格换行符。最后一行的末尾也必须包含一个换行符。各个值以文本格式写入,不带引号,且特殊字符会被转义。 @@ -42,7 +40,6 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 2014-03-23 1406958 ``` - ## 数据格式化 {#tabseparated-data-formatting} 整数以十进制形式书写。数字开头可以包含额外的“+”字符(解析时会被忽略,格式化输出时也不会被输出)。非负数不能包含负号。在读取时,允许将空字符串解析为零,或者(对于有符号类型)将仅包含一个减号的字符串解析为零。超出对应数据类型范围的数字可能会被解析为其他数值,而不会产生错误信息。 @@ -108,7 +105,6 @@ SELECT * FROM nestedt FORMAT TSV 1 [1] ['a'] ``` - ## 使用示例 {#example-usage} ### 插入数据 {#inserting-data} @@ -173,7 +169,6 @@ FORMAT TabSeparated 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式设置 {#format-settings} | Setting | Description | Default | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md index 0b2246d103b..d1798791e73 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRaw.md @@ -13,8 +13,6 @@ doc_type: 'reference' |------|------|-------------------| | ✔ | ✔ | `TSVRaw`, `Raw` | - - ## 描述 {#description} 本格式与 [`TabSeparated`](/interfaces/formats/TabSeparated) 格式的不同之处在于,写入行时不会进行转义。 @@ -25,8 +23,6 @@ doc_type: 'reference' 关于 `TabSeparatedRaw` 格式与 `RawBlob` 格式的比较,请参见:[原始格式比较](../RawBLOB.md/#raw-formats-comparison) - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -91,5 +87,4 @@ FORMAT TabSeparatedRaw 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式配置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md index cc0b7f54fc8..dbc720844c3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-----------------------------------| | ✔ | ✔ | `TSVRawWithNames`, `RawWithNames` | - - ## 描述 {#description} 与 [`TabSeparatedWithNames`](./TabSeparatedWithNames.md) 格式不同, @@ -24,8 +22,6 @@ doc_type: 'reference' 使用此格式进行解析时,每个字段中不允许包含制表符或换行符。 ::: - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -92,5 +88,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md index 08555fd09ab..c65cb2aac5e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedRawWithNamesAndTypes.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|---------------------------------------------------| | ✔ | ✔ | `TSVRawWithNamesAndNames`, `RawWithNamesAndNames` | - - ## 描述 {#description} 与 [`TabSeparatedWithNamesAndTypes`](./TabSeparatedWithNamesAndTypes.md) 格式不同, @@ -24,8 +22,6 @@ doc_type: 'reference' 使用此格式进行解析时,每个字段中不允许包含制表符或换行符。 ::: - - ## 使用示例 {#example-usage} ### 插入数据 {#inserting-data} @@ -72,7 +68,6 @@ FORMAT TabSeparatedRawWithNamesAndTypes 输出将采用制表符分隔的格式,并包含两行表头,分别用于列名和列类型: - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -95,5 +90,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md index 170c72f6537..cf64d6d1f4b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNames.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|--------------------------------| | ✔ | ✔ | `TSVWithNames`, `RawWithNames` | - - ## 描述 {#description} 与 [`TabSeparated`](./TabSeparated.md) 格式的区别在于,第一行写入了列名。 @@ -27,8 +25,6 @@ doc_type: 'reference' 否则,将跳过第一行。 ::: - - ## 示例用法 {#example-usage} ### 插入数据 {#inserting-data} @@ -95,5 +91,4 @@ date season home_team away_team home_team_goals away_team_goals 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md index 48d54050d91..b6991119a16 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/TabSeparated/TabSeparatedWithNamesAndTypes.md @@ -10,8 +10,6 @@ doc_type: 'reference' |-------|--------|------------------------------------------------| | ✔ | ✔ | `TSVWithNamesAndTypes`, `RawWithNamesAndTypes` | - - ## 描述 {#description} 与 [`TabSeparated`](./TabSeparated.md) 格式的区别在于:第一行写入列名,第二行写入列类型。 @@ -24,8 +22,6 @@ doc_type: 'reference' 则会将输入数据中的类型与表中对应列的类型进行比较。否则,第二行将被跳过。 ::: - - ## 使用示例 {#example-usage} ### 插入数据 {#inserting-data} @@ -72,7 +68,6 @@ FORMAT TabSeparatedWithNamesAndTypes 输出将采用制表符分隔的格式,并包含两行表头,分别表示列名和列类型: - ```tsv date season home_team away_team home_team_goals away_team_goals Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 @@ -95,5 +90,4 @@ Date Int16 LowCardinality(String) LowCardinality(String) Int8 Int8 2022-05-07 2021 Walsall Swindon Town 0 3 ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md index cf553b2810f..616a771cff7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/Template.md @@ -13,8 +13,6 @@ doc_type: 'guide' |-------|--------|-------| | ✔ | ✔ | | - - ## 描述 {#description} 在需要比其他标准格式更高的自定义能力时, @@ -32,8 +30,6 @@ doc_type: 'guide' | `format_template_resultset_format` | 指定[内联](#inline_specification)的结果集格式字符串。 | | 某些其他格式的设置(例如使用 `JSON` 转义时的 `output_format_json_quote_64bit_integers`) | | - - ## 设置和转义规则 {#settings-and-escaping-rules} ### format_template_row {#format_template_row} @@ -114,7 +110,6 @@ doc_type: 'guide' 如果 `format_template_resultset` 设置为空字符串,则默认使用 `${data}`。 ::: - 对于 INSERT 查询,如果存在前缀或后缀(见示例),该格式允许省略某些列或字段。 ### 内联指定 {#inline_specification} @@ -132,8 +127,6 @@ doc_type: 'guide' - 使用 `format_template_resultset_format` 时,对应 [`format_template_resultset`](#format_template_resultset)。 ::: - - ## 示例用法 {#example-usage} 让我们来看两个关于如何使用 `Template` 格式的示例,首先是用于查询数据,其次是用于插入数据。 @@ -217,7 +210,6 @@ FORMAT Template 厌倦了手动编写和排版 Markdown 表格?在本示例中,我们将介绍如何使用 `Template` 格式和内联规格设置来完成一个简单任务——从 `system.formats` 表中 `SELECT` 出若干 ClickHouse 格式的名称,并将它们格式化为 Markdown 表格。通过使用 `Template` 格式以及 `format_template_row_format` 和 `format_template_resultset_format` 设置,即可轻松实现这一点。 - 在之前的示例中,我们将结果集和行格式字符串放在单独的文件中,并分别通过设置 `format_template_resultset` 和 `format_template_row` 来指定这些文件的路径。这里我们会直接内联定义这些内容,因为我们的模板非常简单,只包含少量的 `|` 和 `-` 用于构造 Markdown 表格。我们将使用设置 `format_template_resultset_format` 来指定结果集模板字符串。为了生成表头,我们在 `${data}` 之前添加了 `|ClickHouse Formats|\n|---|\n`。我们使用设置 `format_template_row_format` 为每一行指定模板字符串 ``|`{0:XML}`|``。`Template` 格式会将按给定格式生成的行插入到占位符 `${data}` 中。在这个示例中我们只有一列,但如果你想添加更多列,可以在行模板字符串中添加 `{1:XML}`、`{2:XML}` 等,并根据需要选择合适的转义规则。在本示例中我们使用的是转义规则 `XML`。 ```sql title="Query" diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md index 8791fa9b103..287e39f3532 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Template/TemplateIgnoreSpaces.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✔ | ✗ | | - - ## 描述 {#description} 与 [`Template`] 类似,但会跳过输入流中分隔符与值之间的空白字符。 @@ -27,8 +25,6 @@ doc_type: 'reference' 此格式仅支持输入。 ::: - - ## 示例用法 {#example-usage} 以下请求可用于根据其 [JSON](/interfaces/formats/JSON) 格式的输出示例插入数据: @@ -50,5 +46,4 @@ FORMAT TemplateIgnoreSpaces {${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} ``` - ## 格式设置 {#format-settings} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md index bfc9b14ec54..be6993bbddb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/Vertical.md @@ -13,16 +13,12 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} 将每个值与其列名一起输出在单独的一行上。如果每行包含大量列,这种格式便于打印单行或少量行的数据。 请注意,[`NULL`](/sql-reference/syntax.md) 会输出为 `ᴺᵁᴸᴸ`,以便更容易区分字符串值 `NULL` 和空值。JSON 列会以美化后的格式输出,并且 `NULL` 会输出为 `null`,因为它是一个有效的 JSON 值,并且与 `"null"` 容易区分。 - - ## 使用示例 {#example-usage} 示例: @@ -53,5 +49,4 @@ test: string with 'quotes' and with some special 此格式仅适合用于输出查询结果,不适合用于解析(检索要插入到表中的数据)。 - ## 格式设置 {#format-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md index 30d16752843..fa5d5da6184 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/formats/XML.md @@ -13,8 +13,6 @@ doc_type: 'reference' |-------|--------|-------| | ✗ | ✔ | | - - ## 描述 {#description} `XML` 格式仅适用于输出,不适用于解析。 @@ -26,8 +24,6 @@ doc_type: 'reference' 数组会输出为 `HelloWorld...`,元组会输出为 `HelloWorld...`。 - - ## 使用示例 {#example-usage} 示例: @@ -94,9 +90,6 @@ doc_type: 'reference' ``` - ## 格式设置 {#format-settings} - - ## XML {#xml} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/grpc.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/grpc.md index a0c16ddf83f..a33d36d5d55 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/grpc.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/grpc.md @@ -7,12 +7,8 @@ title: 'gRPC 接口' doc_type: 'reference' --- - - # gRPC 接口 {#grpc-interface} - - ## 简介 {#grpc-interface-introduction} ClickHouse 支持 [gRPC](https://grpc.io/) 接口。gRPC 是一个开源的远程过程调用系统,使用 HTTP/2 和 [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers)。ClickHouse 中 gRPC 的实现支持: @@ -28,8 +24,6 @@ ClickHouse 支持 [gRPC](https://grpc.io/) 接口。gRPC 是一个开源的远 接口规范定义在 [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto) 中。 - - ## gRPC 配置 {#grpc-interface-configuration} 要使用 gRPC 接口,请在[主服务器配置文件](../operations/configuration-files.md)中设置 `grpc_port`。其他配置选项请参考下例: @@ -66,7 +60,6 @@ ClickHouse 支持 [gRPC](https://grpc.io/) 接口。gRPC 是一个开源的远 ``` - ## 内置客户端 {#grpc-client} 你可以使用 gRPC 支持的任意编程语言,基于提供的[规范](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto)编写客户端。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/http.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/http.md index 834673aa07e..63e65bbbc3e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/http.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/http.md @@ -10,19 +10,14 @@ doc_type: 'reference' import PlayUI from '@site/static/images/play.png'; import Image from '@theme/IdealImage'; - # HTTP 接口 {#http-interface} - - ## 前置条件 {#prerequisites} 要完成本文中的示例,你需要: - 一个处于运行状态的 ClickHouse 服务器实例 - 已安装 `curl`。在 Ubuntu 或 Debian 上,运行 `sudo apt install curl`,或参阅此[文档](https://curl.se/download.html)获取安装说明。 - - ## 概览 {#overview} HTTP 接口以 REST API 的形式提供服务,让你可以在任何平台、使用任何编程语言来使用 ClickHouse。HTTP 接口相比原生接口功能更有限,但对各类编程语言有更好的支持。 @@ -43,7 +38,6 @@ Ok. 另请参阅:[HTTP 响应码注意事项](#http_response_codes_caveats)。 - ## Web 用户界面 {#web-ui} ClickHouse 提供了一个 Web 用户界面,可通过以下地址访问: @@ -70,7 +64,6 @@ $ curl 'http://localhost:8123/replicas_status' Ok. ``` - ## 通过 HTTP/HTTPS 查询 {#querying} 要通过 HTTP/HTTPS 执行查询,有三种方式: @@ -164,7 +157,6 @@ ECT 1 wget -nv -O- 'http://localhost:8123/?query=SELECT 1, 2, 3 FORMAT JSON' ``` - ```response title="Response" { "meta": @@ -222,7 +214,6 @@ $ curl -X POST -F 'query=select {p1:UInt8} + {p2:UInt8}' -F "param_p1=3" -F "par 7 ``` - ## 通过 HTTP/HTTPS 执行 INSERT 查询 {#insert-queries} 在执行 `INSERT` 查询时,需要使用用于传输数据的 `POST` 方法。在这种情况下,可以在 URL 参数中写入查询的开头部分,并使用 POST 传递要插入的数据。要插入的数据可以是例如来自 MySQL 的制表符分隔导出数据。通过这种方式,`INSERT` 查询可以替代 MySQL 中的 `LOAD DATA LOCAL INFILE`。 @@ -289,7 +280,6 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- 对于成功但不返回数据表的请求,将返回空响应体。 - ## 压缩 {#compression} 压缩可用于在传输大量数据时减少网络流量,也可用于创建直接以压缩形式保存的转储文件。 @@ -321,8 +311,6 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- 某些 HTTP 客户端可能会默认解压来自服务器的数据(例如使用 `gzip` 和 `deflate` 时),因此即使正确配置了压缩设置,仍有可能收到已解压的数据。 ::: - - ## 示例 {#examples-compression} 要向服务器发送压缩后的数据: @@ -354,7 +342,6 @@ curl -sS "http://localhost:8123/?enable_http_compression=1" \ 2 ``` - ## 默认数据库 {#default-database} 你可以使用 `database` URL 参数或 `X-ClickHouse-Database` 请求头来指定默认数据库。 @@ -375,7 +362,6 @@ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?databa 默认情况下,服务器设置中登记的数据库会被用作默认数据库。开箱即用时,该数据库名为 `default`。另外,你也可以通过在表名前加上“数据库名.” 的方式来显式指定要使用的数据库。 - ## 认证 {#authentication} 可以通过以下三种方式之一指定用户名和密码: @@ -436,7 +422,6 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 * [设置](/operations/settings/settings) * [SET](/sql-reference/statements/set) - ## 在 HTTP 协议中使用 ClickHouse 会话 {#using-clickhouse-sessions-in-the-http-protocol} 你也可以在 HTTP 协议中使用 ClickHouse 会话。为此,需要在请求中添加 `session_id` `GET` 参数。你可以使用任意字符串作为会话 ID。 @@ -478,7 +463,6 @@ X-ClickHouse-Progress: {"read_rows":"1000000","read_bytes":"8000000","total_rows HTTP 接口允许传递外部数据(外部临时表)用于查询。更多信息请参见[“用于查询处理的外部数据”](/engines/table-engines/special/external-data)。 - ## 响应缓冲 {#response-buffering} 可以在服务端启用响应缓冲。为此可使用以下 URL 参数: @@ -505,7 +489,6 @@ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wa 使用缓冲可以避免出现这样一种情况:在已经向客户端发送响应状态码和 HTTP 头之后,查询处理才发生错误。在这种情况下,错误消息会被写入响应正文的末尾,而在客户端只能在解析阶段才能检测到该错误。 ::: - ## 使用查询参数设置角色 {#setting-role-with-query-parameters} 该功能在 ClickHouse 24.4 中引入。 @@ -539,7 +522,6 @@ curl -sS "http://localhost:8123?role=my_role&role=my_other_role" --data-binary " 在这种情况下,`?role=my_role&role=my_other_role` 与在执行该语句之前运行 `SET ROLE my_role, my_other_role` 的效果类似。 - ## HTTP 响应状态码注意事项 {#http_response_codes_caveats} 由于 HTTP 协议的限制,HTTP 200 响应状态码并不能保证查询一定成功。 @@ -625,7 +607,6 @@ $ curl -v -Ss "http://localhost:8123/?max_block_size=1&query=select+sleepEachRow 0,0 ``` - **异常** rumfyutuqkncbgau Code: 395. DB::Exception: 传递给 'throwIf' 函数的值为非零:在执行 'FUNCTION throwIf(equals(__table1.number, 2_UInt8) :: 1) -> throwIf(equals(__table1.number, 2_UInt8)) UInt8 : 0' 时。(FUNCTION_THROW_IF_VALUE_IS_NON_ZERO) (version 25.11.1.1) @@ -635,7 +616,6 @@ Code: 395. DB::Exception: 传递给 'throwIf' 函数的值为非零: ``` ``` - ## 参数化查询 {#cli-queries-with-parameters} 可以创建参数化查询,并通过相应 HTTP 请求中的参数为其传递值。欲了解更多信息,请参阅[CLI 参数化查询](../interfaces/cli.md#cli-queries-with-parameters)。 @@ -675,7 +655,6 @@ curl -sS "http://localhost:8123?param_arg1=abc%5C%09123" -d "SELECT splitByChar( ['abc','123'] ``` - ## 预定义的 HTTP 接口 {#predefined_http_interface} ClickHouse 通过 HTTP 接口支持特定查询。例如,可以通过以下方式向表中写入数据: @@ -707,7 +686,6 @@ ClickHouse 还支持预定义 HTTP 接口(Predefined HTTP Interface),可 现在可以直接通过该 URL 请求 Prometheus 格式的数据: - ```bash $ curl -v 'http://localhost:8123/predefined_query' * Trying ::1... @@ -734,25 +712,18 @@ $ curl -v 'http://localhost:8123/predefined_query' "Query" 1 ``` - # HELP "Merge" "后台正在执行的合并数量" {#help-merge-number-of-executing-background-merges} # TYPE "Merge" counter {#type-merge-counter} "Merge" 0 - - # HELP "PartMutation" "Mutation 操作次数(ALTER DELETE/UPDATE)" {#help-partmutation-number-of-mutations-alter-deleteupdate} # TYPE "PartMutation" counter {#type-partmutation-counter} "PartMutation" 0 - - # HELP "ReplicatedFetch" "正在从副本拉取的数据分片数量" {#help-replicatedfetch-number-of-data-parts-being-fetched-from-replica} # TYPE "ReplicatedFetch" counter {#type-replicatedfetch-counter} "ReplicatedFetch" 0 - - # HELP "ReplicatedSend" "正在发送到副本的数据分片数量" {#help-replicatedsend-number-of-data-parts-being-sent-to-replicas} # TYPE "ReplicatedSend" counter {#type-replicatedsend-counter} @@ -825,7 +796,6 @@ $ curl -v 'http://localhost:8123/predefined_query' 例如: ``` - ```yaml @@ -917,7 +887,6 @@ max_final_threads 2 可以使用 `http_response_headers` 来设置内容类型,而无需使用 `content_type`。 - ```yaml @@ -1005,7 +974,6 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' 要在发送给客户端的文件中查找内容: - ```yaml @@ -1102,7 +1070,6 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' ``` - ## HTTP 响应头 {#http-response-headers} ClickHouse 允许配置自定义的 HTTP 响应头,这些响应头可以应用于任何可配置的处理程序。可以通过 `http_response_headers` 设置这些响应头,该设置接受表示响应头名称及其值的键值对。此功能对于实现自定义安全响应头、CORS 策略,或在 ClickHouse HTTP 接口中统一满足其他 HTTP 响应头需求特别有用。 @@ -1139,7 +1106,6 @@ ClickHouse 允许配置自定义的 HTTP 响应头,这些响应头可以应用 ``` - ## 在 HTTP 流式传输期间出现异常时返回合法的 JSON/XML 响应 {#valid-output-on-exception-http-streaming} 当通过 HTTP 执行查询时,即便部分数据已经发送,仍然可能会抛出异常。通常情况下,异常会以纯文本的形式发送给客户端。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/mysql.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/mysql.md index e5c9234b283..58784c53f14 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/mysql.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/mysql.md @@ -13,7 +13,6 @@ import mysql1 from '@site/static/images/interfaces/mysql1.png'; import mysql2 from '@site/static/images/interfaces/mysql2.png'; import mysql3 from '@site/static/images/interfaces/mysql3.png'; - # MySQL 接口 {#mysql-interface} ClickHouse 支持 MySQL 线协议(wire protocol)。这使得某些没有原生 ClickHouse 连接器的客户端可以改用 MySQL 协议进行连接,并且已经与以下 BI 工具完成验证: @@ -36,8 +35,6 @@ ClickHouse 支持 MySQL 线协议(wire protocol)。这使得某些没有原 这一行为无法关闭,并且在极少数边缘场景下,可能会导致发送到 ClickHouse 常规查询接口与 MySQL 查询接口的查询产生不同行为。 :::: - - ## 在 ClickHouse Cloud 上启用 MySQL 接口 {#enabling-the-mysql-interface-on-clickhouse-cloud} 1. 创建好 ClickHouse Cloud 服务后,点击 `Connect` 按钮。 @@ -62,8 +59,6 @@ ClickHouse 支持 MySQL 线协议(wire protocol)。这使得某些没有原 - - ## 在 ClickHouse Cloud 中创建多个 MySQL 用户 {#creating-multiple-mysql-users-in-clickhouse-cloud} 默认情况下,系统内置了一个 `mysql4` 用户,它使用与 `default` 用户相同的密码。`` 部分是你的 ClickHouse Cloud 主机名的第一个片段。要与那些实现了安全连接、但在 TLS 握手中**不**提供 [SNI 信息](https://www.cloudflare.com/learning/ssl/what-is-sni) 的工具配合使用,就必须采用这种格式;否则在用户名中没有这个额外提示的情况下,无法完成内部路由(MySQL 控制台客户端就是此类工具之一)。 @@ -116,7 +111,6 @@ ERROR 2013 (HY000): 在'读取授权数据包'时与 MySQL 服务器失去连接 在这种情况下,请确保用户名符合 `mysql4_` 格式,如[上文](#creating-multiple-mysql-users-in-clickhouse-cloud)所述。 - ## 在自管 ClickHouse 上启用 MySQL 接口 {#enabling-the-mysql-interface-on-self-managed-clickhouse} 将 [mysql_port](../operations/server-configuration-parameters/settings.md#mysql_port) 设置添加到服务器的配置文件中。例如,你可以在 `config.d/` [文件夹](../operations/configuration-files) 中新建一个 XML 文件来定义该端口: @@ -133,7 +127,6 @@ ERROR 2013 (HY000): 在'读取授权数据包'时与 MySQL 服务器失去连接 {} Application: 正在监听 MySQL 兼容协议:127.0.0.1:9004 ``` - ## 将 MySQL 连接到 ClickHouse {#connect-mysql-to-clickhouse} 以下命令演示了如何使用 MySQL 客户端 `mysql` 连接到 ClickHouse: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md index 4a53a95c008..0b5212ab1c8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/schema-inference.md @@ -9,14 +9,10 @@ doc_type: 'reference' ClickHouse 可以在几乎所有受支持的 [输入格式](formats.md) 中自动确定输入数据的结构。 本文档将介绍在何种情况下会使用 schema 推断、它在不同输入格式中的工作方式,以及可以用来控制它的设置。 - - ## 用法 {#usage} 当 ClickHouse 需要读取特定格式的数据但其结构未知时,会使用模式推断。 - - ## 表函数 [file](../sql-reference/table-functions/file.md)、[s3](../sql-reference/table-functions/s3.md)、[url](../sql-reference/table-functions/url.md)、[hdfs](../sql-reference/table-functions/hdfs.md)、[azureBlobStorage](../sql-reference/table-functions/azureBlobStorage.md)。 {#table-functions-file-s3-url-hdfs-azureblobstorage} 这些表函数支持一个可选参数 `structure`,用于指定输入数据的结构。如果未指定该参数或将其设置为 `auto`,则会自动从数据中推断结构。 @@ -64,7 +60,6 @@ DESCRIBE file('hobbies.jsonl') └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## 表引擎 [File](../engines/table-engines/special/file.md)、[S3](../engines/table-engines/integrations/s3.md)、[URL](../engines/table-engines/special/url.md)、[HDFS](../engines/table-engines/integrations/hdfs.md)、[azureBlobStorage](../engines/table-engines/integrations/azureBlobStorage.md) {#table-engines-file-s3-url-hdfs-azureblobstorage} 如果在 `CREATE TABLE` 查询中未指定列列表,表结构将会根据数据自动推断。 @@ -107,7 +102,6 @@ DESCRIBE TABLE hobbies └─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## clickhouse-local {#clickhouse-local} `clickhouse-local` 提供一个可选参数 `-S/--structure`,用于指定输入数据的结构。如果未指定该参数或将其设置为 `auto`,则会从数据中自动推断结构。 @@ -138,7 +132,6 @@ clickhouse-local --file='hobbies.jsonl' --table='hobbies' --query='SELECT * FROM 4 47 Brayan ['movies','skydiving'] ``` - ## 使用插入表的结构 {#using-structure-from-insertion-table} 当使用 `file/s3/url/hdfs` 表函数向表中插入数据时, @@ -247,7 +240,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob 在这种情况下,由于在 `SELECT` 查询中对列 `hobbies` 进行了某些操作后再将其插入表中,ClickHouse 无法复用插入目标表的结构,而是会使用 schema 推断。 - ## Schema inference cache {#schema-inference-cache} 对于大多数输入格式,schema 推断会读取一部分数据来确定其结构,这个过程可能需要一定时间。 @@ -270,8 +262,6 @@ INSERT INTO hobbies4 SELECT id, empty(hobbies) ? NULL : hobbies[1] FROM file(hob 我们来尝试对来自 S3 的示例数据集 `github-2022.ndjson.gz` 进行结构推断,并观察 schema 推断缓存是如何工作的: - - ```sql DESCRIBE TABLE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/github/github-2022.ndjson.gz') ``` @@ -415,7 +405,6 @@ SELECT count() FROM system.schema_inference_cache WHERE storage='S3' └─────────┘ ``` - ## 文本格式 {#text-formats} 对于文本格式,ClickHouse 逐行读取数据,根据格式提取列值, @@ -487,7 +476,6 @@ DESC format(JSONEachRow, '{"arr" : [null, 42, null]}') └──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 如果一个数组包含不同类型的值,并且启用了设置 `input_format_json_infer_array_of_dynamic_from_array_of_different_types`(该设置默认启用),则该数组的类型为 `Array(Dynamic)`: ```sql @@ -554,7 +542,6 @@ Maps: 在 JSON 中,我们可以将值类型相同的对象读取为 Map 类型。 注意:仅当设置 `input_format_json_read_objects_as_strings` 和 `input_format_json_try_infer_named_tuples_from_objects` 被禁用时,此功能才会生效。 - ```sql SET input_format_json_read_objects_as_strings = 0, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, '{"map" : {"key1" : 42, "key2" : 24, "key3" : 4}}') @@ -641,7 +628,6 @@ DESC format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello"}}, {"obj" : {"a" : 4 结果: - ```response ┌─name─┬─type───────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ obj │ Tuple(a Nullable(Int64), b Nullable(String), c Array(Nullable(Int64)), d Tuple(e Nullable(Int64))) │ │ │ │ │ │ @@ -716,7 +702,6 @@ SELECT * FROM format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : 注意:仅当设置 `input_format_json_try_infer_named_tuples_from_objects` 被禁用时,启用此设置才会生效。 - ```sql SET input_format_json_read_objects_as_strings = 1, input_format_json_try_infer_named_tuples_from_objects = 0; DESC format(JSONEachRow, $$ @@ -818,7 +803,6 @@ SELECT arr, toTypeName(arr), JSONExtractArrayRaw(arr)[3] from format(JSONEachRow ##### input_format_json_infer_incomplete_types_as_strings {#input_format_json_infer_incomplete_types_as_strings} - 启用此设置后,在模式推断期间,对于数据样本中仅包含 `Null`/`{}`/`[]` 的 JSON 键,可以使用 String 类型。 在 JSON 格式中,如果启用了所有相关设置(默认均启用),任何值都可以按 String 类型读取。这样,在模式推断时,对于类型未知的键使用 String 类型,就可以避免出现类似 `Cannot determine type for column 'column_name' by first 25000 rows of data, most likely this column contains only Nulls or empty Arrays/Maps` 的错误。 @@ -883,7 +867,6 @@ DESC format(CSV, 'Hello world!,World hello!') Date 和 DateTime: - ```sql DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"') ``` @@ -956,7 +939,6 @@ DESC format(CSV, $$"[{'key1' : [[42, 42], []], 'key2' : [[null], [42]]}]"$$) └──────┴───────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 如果 ClickHouse 无法根据引号中的内容确定类型,而数据又只包含 null 值时,ClickHouse 会将其视为 String: ```sql @@ -1062,7 +1044,6 @@ DESC format(CSV, '42,42.42'); └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ### TSV/TSKV {#tsv-tskv} 在 TSV/TSKV 格式中,ClickHouse 会根据制表符分隔符从行中提取列值,然后使用递归解析器解析该值以确定最合适的类型。若无法确定类型,ClickHouse 会将该值视为 String。 @@ -1116,7 +1097,6 @@ DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000') └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 数组: ```sql @@ -1189,7 +1169,6 @@ DESC format(TSV, $$[{'key1' : [(42, 'Hello'), (24, NULL)], 'key2' : [(NULL, ',') └──────┴─────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 如果 ClickHouse 无法确定类型,因为数据仅包含 null 值,则会将其视为 String: ```sql @@ -1279,7 +1258,6 @@ $$) **示例:** - 整数、浮点数、布尔值、字符串: ```sql @@ -1358,7 +1336,6 @@ DESC format(Values, $$({'key1' : 42, 'key2' : 24})$$) └──────┴──────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - 嵌套数组、元组和映射: ```sql @@ -1434,7 +1411,6 @@ $$) 表头自动检测示例(启用 `input_format_custom_detect_header` 后): - ```sql SET format_custom_row_before_delimiter = '', format_custom_row_after_delimiter = '\n', @@ -1514,7 +1490,6 @@ SET format_regexp = '^Line: value_1=(.+?), value_2=(.+?), value_3=(.+?)', format_regexp_escaping_rule = 'CSV' ``` - DESC format(Regexp, $$Line: value_1=42, value_2="Some string 1", value_3="[1, NULL, 3]" Line: value_1=2, value_2="Some string 2", value_3="[4, 5, NULL]"$$) @@ -1579,7 +1554,6 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul #### schema_inference_make_columns_nullable $ {#schema-inference-make-columns-nullable} - 控制在对缺少空值信息的格式进行 schema 推断时,是否将推断出的类型设为 `Nullable`。可能的取值: * 0 - 推断类型永远不会是 `Nullable`, @@ -1646,7 +1620,6 @@ DESC format(JSONEachRow, $$ └─────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - #### input_format_try_infer_integers {#input-format-try-infer-integers} :::note @@ -1725,7 +1698,6 @@ DESC format(JSONEachRow, $$ **示例** - ```sql SET input_format_try_infer_datetimes = 0; DESC format(JSONEachRow, $$ @@ -1796,7 +1768,6 @@ DESC format(JSONEachRow, $$ 注意:在进行模式推断时解析日期时间值,会遵守设置 [date_time_input_format](/operations/settings/settings-formats.md#date_time_input_format)。 - #### input_format_try_infer_dates {#input-format-try-infer-dates} 启用后,ClickHouse 会在对文本格式进行 schema 推断时,尝试从字符串字段中推断出 `Date` 类型。 @@ -1871,7 +1842,6 @@ $$) └──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## 自描述格式 {#self-describing-formats} 自描述格式在数据本身中就包含关于数据结构的信息, @@ -1961,7 +1931,6 @@ $$) 在 Avro 格式中,ClickHouse 从数据中读取模式(schema),并使用以下类型映射将其转换为 ClickHouse 的模式: - | Avro 数据类型 | ClickHouse 数据类型 | |------------------------------------|--------------------------------------------------------------------------------| | `boolean` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2015,8 +1984,6 @@ $$) 在 Arrow 格式中,ClickHouse 从数据中读取 schema,并使用以下类型映射将其转换为 ClickHouse 的 schema: - - | Arrow 数据类型 | ClickHouse 数据类型 | |---------------------------------|---------------------------------------------------------| | `BOOL` | [Bool](../sql-reference/data-types/boolean.md) | @@ -2069,8 +2036,6 @@ $$) Native 格式在 ClickHouse 内部使用,并在数据中包含模式(schema)。 在模式推断时,ClickHouse 直接从数据中读取模式,而不进行任何转换。 - - ## 具有外部模式的格式 {#formats-with-external-schema} 此类格式需要在单独的文件中,使用特定的模式语言来描述数据的模式。 @@ -2117,8 +2082,6 @@ Native 格式在 ClickHouse 内部使用,并在数据中包含模式(schema | `struct` | [Tuple](../sql-reference/data-types/tuple.md) | | `union(T, Void)`, `union(Void, T)` | [Nullable(T)](../sql-reference/data-types/nullable.md) | - - ## 强类型二进制格式 {#strong-typed-binary-formats} 在此类格式中,每个序列化值都包含其类型的信息(以及可能包含其名称的信息),但不会包含关于整个表的信息。 @@ -2165,8 +2128,6 @@ ClickHouse 使用以下类型对应关系: 默认情况下,所有推断出的类型都会被包装在 `Nullable` 中,但可以通过设置 `schema_inference_make_columns_nullable` 来更改这一行为。 - - ## 具有固定 schema 的格式 {#formats-with-constant-schema} 此类格式中的数据始终使用相同的 schema。 @@ -2219,7 +2180,6 @@ DESC format(JSONAsObject, '{"x" : 42, "y" : "Hello, World!"}'); └──────┴──────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` - ## Schema 推断模式 {#schema-inference-modes} 从一组数据文件中进行 Schema 推断时,可以使用两种不同的工作模式:`default` 和 `union`。 @@ -2330,7 +2290,6 @@ DESC format(JSONAsObject, '{"x" : 42, "y" : "Hello, World!"}'); * 如果 ClickHouse 无法从某个文件推断出 schema,将会抛出异常。 * 如果你有大量文件,从所有文件中读取 schema 可能会花费很多时间。 - ## 自动格式检测 {#automatic-format-detection} 如果未指定数据格式且无法通过文件扩展名确定,ClickHouse 将尝试根据文件内容检测文件格式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md index 3de0f498e2b..897c93304e0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/gui.md @@ -7,12 +7,8 @@ title: '第三方开发者提供的图形界面' doc_type: 'reference' --- - - # 第三方开发者的可视化界面 {#visual-interfaces-from-third-party-developers} - - ## 开源 {#open-source} ### agx {#agx} @@ -117,8 +113,6 @@ doc_type: 'reference' ### LightHouse {#lighthouse} - - [LightHouse](https://github.com/VKCOM/lighthouse) 是一个适用于 ClickHouse 的轻量级 Web 界面。 功能: @@ -201,8 +195,6 @@ ClickHouse 数据源插件为 ClickHouse 作为后端数据库提供支持。 ### MindsDB Studio {#mindsdb} - - [MindsDB](https://mindsdb.com/) 是一个面向包括 ClickHouse 在内的数据库的开源 AI 层,可以让你轻松开发、训练和部署最先进的机器学习模型。MindsDB Studio(GUI)可以从数据库中训练新模型、解释模型生成的预测结果、识别潜在的数据偏差,并使用可解释 AI 功能评估和可视化模型精度,从而更快速地调整和优化你的机器学习模型。 ### DBM {#dbm} @@ -303,8 +295,6 @@ ClickHouse 数据源插件为 ClickHouse 作为后端数据库提供支持。 ### CKibana {#ckibana} - - [CKibana](https://github.com/TongchengOpenSource/ckibana) 是一款轻量级服务,可让你使用原生 Kibana UI 轻松搜索、探索和可视化 ClickHouse 数据。 功能特性: @@ -329,8 +319,6 @@ ClickHouse 数据源插件为 ClickHouse 作为后端数据库提供支持。 [Telescope 源码](https://github.com/iamtelescope/telescope) · [在线演示](https://demo.iamtelescope.net) - - ## 商业版 {#commercial} ### DataGrip {#datagrip} @@ -410,8 +398,6 @@ SeekTable 对于个人/个体使用是[免费的](https://www.seektable.com/help [TABLUM.IO](https://tablum.io/) 是一款用于 ETL 和可视化的在线查询与分析工具。它支持连接 ClickHouse,可通过通用的 SQL 控制台查询数据,也可以从静态文件和第三方服务加载数据。TABLUM.IO 可以将查询结果可视化为图表和表格。 - - 功能: - ETL:从常见数据库、本地和远程文件以及 API 调用中加载数据。 - 功能强大的 SQL 控制台,支持语法高亮和可视化查询构建器。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md index c1f85a23bb5..0940f5cd31a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/interfaces/third-party/proxy.md @@ -7,12 +7,8 @@ title: '第三方开发的代理服务器' doc_type: 'reference' --- - - # 第三方开发的代理服务器 {#proxy-servers-from-third-party-developers} - - ## chproxy {#chproxy} [chproxy](https://github.com/Vertamedia/chproxy) 是一个用于 ClickHouse 数据库的 HTTP 代理和负载均衡器。 @@ -25,8 +21,6 @@ doc_type: 'reference' 由 Go 语言实现。 - - ## KittenHouse {#kittenhouse} [KittenHouse](https://github.com/VKCOM/kittenhouse) 旨在在无法或不方便在应用程序端对 `INSERT` 数据进行缓冲时,作为 ClickHouse 与应用服务器之间的本地代理。 @@ -39,8 +33,6 @@ doc_type: 'reference' 使用 Go 语言实现。 - - ## ClickHouse-Bulk {#clickhouse-bulk} [ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) 是一个简单的 ClickHouse 数据写入收集器。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/intro.md b/i18n/zh/docusaurus-plugin-content-docs/current/intro.md index 232912a3736..08d574b7995 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/intro.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/intro.md @@ -14,7 +14,6 @@ import Image from '@theme/IdealImage'; ClickHouse® 是一款高性能的列式存储 SQL 数据库管理系统(DBMS),用于联机分析处理(OLAP)。它既可以作为[开源软件](https://github.com/ClickHouse/ClickHouse),也可以作为[云服务](https://clickhouse.com/cloud)提供。 - ## 什么是分析? {#what-are-analytics} Analytics(也称为 OLAP,即联机分析处理,Online Analytical Processing)是指在海量数据集上执行包含复杂计算(例如聚合、字符串处理、算术运算)的 SQL 查询。 @@ -23,8 +22,6 @@ Analytics(也称为 OLAP,即联机分析处理,Online Analytical Processin 在许多应用场景中,[分析查询必须是“实时”的](https://clickhouse.com/engineering-resources/what-is-real-time-analytics),即在不到一秒内返回结果。 - - ## 行式存储 vs 列式存储 {#row-oriented-vs-column-oriented-storage} 只有采用合适的数据组织方式,才能达到这样的性能水平。 @@ -65,51 +62,36 @@ LIMIT 8; **列式 DBMS** - 由于每一列的值在磁盘上依次顺序存储,执行上面的查询时不会加载不必要的数据。 由于按数据块进行存储并从磁盘传输到内存的方式与分析型查询的数据访问模式相匹配,查询只会从磁盘读取所需的列,从而避免为未使用的数据执行不必要的 I/O 操作。相比之下,在[基于行的存储](https://benchmark.clickhouse.com/)中,会读取整行数据(包括无关的列),效率要低得多: - - ## 数据复制与完整性 {#data-replication-and-integrity} ClickHouse 使用异步多主复制架构,确保数据在多个节点上冗余存储。数据写入任一可用副本后,其余所有副本会在后台获取各自的副本数据。系统会在不同副本上维护完全一致的数据。在大多数故障情况下,系统能够自动完成恢复;在复杂场景下,则采用半自动方式完成恢复。 - - ## 基于角色的访问控制 {#role-based-access-control} ClickHouse 通过 SQL 查询实现用户账号管理,并支持配置基于角色的访问控制,方式类似于 ANSI SQL 标准和主流关系型数据库管理系统中的实现。 - - ## SQL 支持 {#sql-support} ClickHouse 支持一种[基于 SQL 的声明式查询语言](/sql-reference),在很多方面与 ANSI SQL 标准保持一致。支持的查询子句包括 [GROUP BY](/sql-reference/statements/select/group-by)、[ORDER BY](/sql-reference/statements/select/order-by)、[FROM](/sql-reference/statements/select/from) 中的子查询、[JOIN](/sql-reference/statements/select/join) 子句、[IN](/sql-reference/operators/in) 运算符、[窗口函数](/sql-reference/window-functions) 以及标量子查询。 - - ## 近似计算 {#approximate-calculation} ClickHouse 提供了一些以精度换取性能的方式。例如,其中一些聚合函数可以近似计算不同值的数量、中位数和分位数。此外,可以在数据的一个样本上执行查询,从而快速得到近似结果。最后,可以只在有限数量的键上执行聚合,而不是对所有键进行聚合。根据键分布偏斜程度的不同,这种方式在显著减少相较于精确计算所需资源的同时,仍然可以提供相当精确的结果。 - - ## 自适应 Join 算法 {#adaptive-join-algorithms} ClickHouse 会自适应地选择 join 算法:它首先使用快速的哈希 join,当存在多张大表时,会回退为合并 join。 - - ## 卓越的查询性能 {#superior-query-performance} ClickHouse 以其极快的查询性能而闻名。 要了解 ClickHouse 为何如此之快,请参阅 [Why is ClickHouse fast?](/concepts/why-clickhouse-is-so-fast.mdx) 指南。 - - - ## 相关资源 {#related-resources} - [ClickHouse 中的财务函数视频](https://www.youtube.com/watch?v=BePLPVa0w_o) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md index 620efdcc913..3048c98f010 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/geohash.md @@ -6,16 +6,12 @@ title: '用于处理 Geohash 的函数' doc_type: 'reference' --- - - ## Geohash {#geohash} [Geohash](https://en.wikipedia.org/wiki/Geohash) 是一种地理编码系统,它将地球表面划分为网格状的区域(bucket),并将每个单元编码为由字母和数字组成的短字符串。它是一种分层数据结构,因此 geohash 字符串越长,表示的地理位置就越精确。 如果需要手动将地理坐标转换为 geohash 字符串,可以使用 [geohash.org](http://geohash.co/)。 - - ## geohashEncode {#geohashencode} 将纬度和经度编码为 [geohash](#geohash) 字符串。 @@ -58,7 +54,6 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; └──────────────┘ ``` - ## geohashDecode {#geohashdecode} 将任意 [geohash](#geohash) 编码字符串解码为经度和纬度。 @@ -89,7 +84,6 @@ SELECT geohashDecode('ezs42') AS res; └─────────────────────────────────┘ ``` - ## geohashesInBox {#geohashesinbox} 返回一个由指定精度的 [geohash](#geohash) 编码字符串组成的数组,这些字符串对应的区域位于给定矩形区域内或与其边界相交,本质上是将一个二维网格扁平化为数组。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md index b6582bb68fb..73aee96e10a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/h3.md @@ -6,8 +6,6 @@ title: '用于处理 H3 索引的函数' doc_type: 'reference' --- - - ## H3 索引 {#h3-index} [H3](https://h3geo.org/) 是一种地理空间索引系统,将地球表面划分为等大的六边形网格单元。该系统是分层的,即顶层的每个六边形(“父”)可以被划分为七个更小但同样等大的六边形(“子”),以此类推。 @@ -20,8 +18,6 @@ H3 索引主要用于对地理位置进行分桶(bucketing)以及其他地 关于 H3 系统的完整说明可在 [Uber Engineering 网站](https://www.uber.com/blog/h3/) 上找到。 - - ## h3IsValid {#h3isvalid} 检查给定数值是否为有效的 [H3](#h3-index) 索引。 @@ -57,7 +53,6 @@ SELECT h3IsValid(630814730351855103) AS h3IsValid; └───────────┘ ``` - ## h3GetResolution {#h3getresolution} 返回给定 [H3](#h3-index) 索引的分辨率。 @@ -93,7 +88,6 @@ SELECT h3GetResolution(639821929606596015) AS resolution; └────────────┘ ``` - ## h3EdgeAngle {#h3edgeangle} 计算 [H3](#h3-index) 六边形边长以角度制表示的平均值。 @@ -128,7 +122,6 @@ SELECT h3EdgeAngle(10) AS edgeAngle; └───────────────────────┘ ``` - ## h3EdgeLengthM {#h3edgelengthm} 计算 [H3](#h3-index) 六边形单元格边的平均长度(单位:米)。 @@ -163,7 +156,6 @@ SELECT h3EdgeLengthM(15) AS edgeLengthM; └─────────────┘ ``` - ## h3EdgeLengthKm {#h3edgelengthkm} 计算一个 [H3](#h3-index) 六边形边长的平均值(单位:千米)。 @@ -198,7 +190,6 @@ SELECT h3EdgeLengthKm(15) AS edgeLengthKm; └──────────────┘ ``` - ## geoToH3 {#geotoh3} 返回指定分辨率下 `(lat, lon)` 点的 [H3](#h3-index) 索引。 @@ -238,7 +229,6 @@ SELECT geoToH3(55.71290588, 37.79506683, 15) AS h3Index; └────────────────────┘ ``` - ## h3ToGeo {#h3togeo} 返回与提供的 [H3](#h3-index) 索引对应的中心点纬度和经度。 @@ -275,7 +265,6 @@ SELECT h3ToGeo(644325524701193974) AS coordinates; └───────────────────────────────────────┘ ``` - ## h3ToGeoBoundary {#h3togeoboundary} 返回由 `(lat, lon)` 坐标对组成的数组,这些坐标对应于给定 H3 索引的边界。 @@ -310,7 +299,6 @@ SELECT h3ToGeoBoundary(644325524701193974) AS coordinates; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3kRing {#h3kring} 以给定六边形为中心,随机列出其半径为 `k` 范围内的所有 [H3](#h3-index) 六边形。 @@ -352,7 +340,6 @@ SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index; └────────────────────┘ ``` - ## h3PolygonToCells {#h3polygontocells} 返回在指定分辨率下位于给定几何体(环或(多)多边形)内的六边形单元。 @@ -397,7 +384,6 @@ SELECT h3PolygonToCells([(-122.4089866999972145,37.813318999983238),(-122.354473 └────────────────────┘ ``` - ## h3GetBaseCell {#h3getbasecell} 返回 [H3](#h3-index) 索引的基础单元编号。 @@ -432,7 +418,6 @@ SELECT h3GetBaseCell(612916788725809151) AS basecell; └──────────┘ ``` - ## h3HexAreaM2 {#h3hexaream2} 返回在给定分辨率下的六边形平均面积(以平方米计)。 @@ -467,7 +452,6 @@ SELECT h3HexAreaM2(13) AS 面积; └──────┘ ``` - ## h3HexAreaKm2 {#h3hexareakm2} 返回给定分辨率下六边形的平均面积(平方千米)。 @@ -502,7 +486,6 @@ SELECT h3HexAreaKm2(13) AS area; └───────────┘ ``` - ## h3IndexesAreNeighbors {#h3indexesareneighbors} 返回给定的 [H3](#h3-index) 索引是否互为邻居。 @@ -539,7 +522,6 @@ SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n; └───┘ ``` - ## h3ToChildren {#h3tochildren} 返回给定 [H3](#h3-index) 索引的子索引数组。 @@ -575,7 +557,6 @@ SELECT h3ToChildren(599405990164561919, 6) AS children; └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3ToParent {#h3toparent} 返回包含给定 [H3](#h3-index) 索引的父级(更粗粒度)索引。 @@ -611,7 +592,6 @@ SELECT h3ToParent(599405990164561919, 3) AS parent; └────────────────────┘ ``` - ## h3ToString {#h3tostring} 将索引的 `H3Index` 表示形式转换为字符串形式。 @@ -644,7 +624,6 @@ SELECT h3ToString(617420388352917503) AS h3_string; └─────────────────┘ ``` - ## stringToH3 {#stringtoh3} 将字符串形式转换为 `H3Index`(UInt64)表示。 @@ -679,7 +658,6 @@ SELECT stringToH3('89184926cc3ffff') AS index; └────────────────────┘ ``` - ## h3GetResolution {#h3getresolution-1} 返回 [H3](#h3-index) 索引的分辨率。 @@ -714,7 +692,6 @@ SELECT h3GetResolution(617420388352917503) AS res; └─────┘ ``` - ## h3IsResClassIII {#h3isresclassiii} 返回 [H3](#h3-index) 索引的分辨率是否为 Class III 朝向。 @@ -750,7 +727,6 @@ SELECT h3IsResClassIII(617420388352917503) AS res; └─────┘ ``` - ## h3IsPentagon {#h3ispentagon} 返回该 [H3](#h3-index) 索引是否表示五边形单元格。 @@ -786,7 +762,6 @@ SELECT h3IsPentagon(644721767722457330) AS pentagon; └──────────┘ ``` - ## h3GetFaces {#h3getfaces} 返回与给定 [H3](#h3-index) 索引相交的二十面体的面。 @@ -821,7 +796,6 @@ SELECT h3GetFaces(599686042433355775) AS faces; └───────┘ ``` - ## h3CellAreaM2 {#h3cellaream2} 返回与给定 H3 索引对应的单元格的精确面积(单位:平方米)。 @@ -856,7 +830,6 @@ SELECT h3CellAreaM2(579205133326352383) AS area; └────────────────────┘ ``` - ## h3CellAreaRads2 {#h3cellarearads2} 返回给定输入 H3 索引所对应单元格的精确面积,单位为平方弧度。 @@ -891,7 +864,6 @@ SELECT h3CellAreaRads2(579205133326352383) AS area; └─────────────────────┘ ``` - ## h3ToCenterChild {#h3tocenterchild} 在指定分辨率下,返回给定 [H3](#h3-index) 索引所包含的中心子(更高分辨率)[H3](#h3-index) 索引。 @@ -927,7 +899,6 @@ SELECT h3ToCenterChild(577023702256844799,1) AS centerToChild; └────────────────────┘ ``` - ## h3ExactEdgeLengthM {#h3exactedgelengthm} 返回由输入的 h3 索引表示的单向边的精确边长(单位:米)。 @@ -962,7 +933,6 @@ SELECT h3ExactEdgeLengthM(1310277011704381439) AS exactEdgeLengthM;; └────────────────────┘ ``` - ## h3ExactEdgeLengthKm {#h3exactedgelengthkm} 返回由输入 h3 索引表示的单向边的精确长度,单位为千米。 @@ -997,7 +967,6 @@ SELECT h3ExactEdgeLengthKm(1310277011704381439) AS exactEdgeLengthKm;; └────────────────────┘ ``` - ## h3ExactEdgeLengthRads {#h3exactedgelengthrads} 返回输入 h3 索引所表示的有向边的精确长度(以弧度为单位)。 @@ -1032,7 +1001,6 @@ SELECT h3ExactEdgeLengthRads(1310277011704381439) AS exactEdgeLengthRads;; └──────────────────────┘ ``` - ## h3NumHexagons {#h3numhexagons} 返回在给定分辨率下的唯一 H3 索引数量。 @@ -1067,7 +1035,6 @@ SELECT h3NumHexagons(3) AS numHexagons; └─────────────┘ ``` - ## h3PointDistM {#h3pointdistm} 返回成对 GeoCoord 点(纬度/经度)之间的“大圆(great circle)”或“haversine”距离,单位为米。 @@ -1103,7 +1070,6 @@ SELECT h3PointDistM(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistM; └───────────────────┘ ``` - ## h3PointDistKm {#h3pointdistkm} 返回 GeoCoord 点对(纬度/经度)之间的大圆(great circle)或半正矢(haversine)距离,单位为千米。 @@ -1139,7 +1105,6 @@ SELECT h3PointDistKm(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistKm; └───────────────────┘ ``` - ## h3PointDistRads {#h3pointdistrads} 返回成对 GeoCoord 点(纬度/经度)之间的“大圆”(great circle)或“半正矢”(haversine)距离,单位为弧度。 @@ -1175,7 +1140,6 @@ SELECT h3PointDistRads(-10.0 ,0.0, 10.0, 0.0) AS h3PointDistRads; └────────────────────┘ ``` - ## h3GetRes0Indexes {#h3getres0indexes} 返回一个数组,包含所有分辨率为 0 的 H3 索引。 @@ -1206,7 +1170,6 @@ SELECT h3GetRes0Indexes AS indexes ; └─────────────────────────────────────────────┘ ``` - ## h3GetPentagonIndexes {#h3getpentagonindexes} 返回在指定分辨率下的所有五边形 H3 索引。 @@ -1241,7 +1204,6 @@ SELECT h3GetPentagonIndexes(3) AS indexes; └────────────────────────────────────────────────────────────────┘ ``` - ## h3Line {#h3line} 返回位于提供的两个索引之间的一系列索引。 @@ -1277,7 +1239,6 @@ h3Line(start,end) └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3Distance {#h3distance} 返回以网格单元数表示的两个索引之间的距离。 @@ -1315,7 +1276,6 @@ h3Distance(start,end) └──────────┘ ``` - ## h3HexRing {#h3hexring} 返回以提供的原点 `h3Index` 为中心、半径为 `k` 的六边形环中各索引。 @@ -1353,7 +1313,6 @@ h3HexRing(index, k) └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdge {#h3getunidirectionaledge} 基于给定的起点和终点返回单向边 H3 索引,出错时返回 0。 @@ -1389,7 +1348,6 @@ h3GetUnidirectionalEdge(originIndex, destinationIndex) └─────────────────────┘ ``` - ## h3UnidirectionalEdgeIsValid {#h3unidirectionaledgeisvalid} 判断给定的 H3Index 是否为有效的单向边索引。如果是单向边索引则返回 1,否则返回 0。 @@ -1425,7 +1383,6 @@ h3UnidirectionalEdgeisValid(index) └────────────┘ ``` - ## h3GetOriginIndexFromUnidirectionalEdge {#h3getoriginindexfromunidirectionaledge} 从单向边 H3Index 返回起点六边形索引。 @@ -1460,7 +1417,6 @@ h3GetOriginIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetDestinationIndexFromUnidirectionalEdge {#h3getdestinationindexfromunidirectionaledge} 从单向边 `H3Index` 获取目标六边形索引。 @@ -1495,7 +1451,6 @@ h3GetDestinationIndexFromUnidirectionalEdge(edge) └────────────────────┘ ``` - ## h3GetIndexesFromUnidirectionalEdge {#h3getindexesfromunidirectionaledge} 返回给定单向边 H3Index 的起点和终点六边形索引。 @@ -1535,7 +1490,6 @@ h3GetIndexesFromUnidirectionalEdge(edge) └─────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgesFromHexagon {#h3getunidirectionaledgesfromhexagon} 返回指定 H3Index 的所有单向边。 @@ -1570,7 +1524,6 @@ h3GetUnidirectionalEdgesFromHexagon(index) └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## h3GetUnidirectionalEdgeBoundary {#h3getunidirectionaledgeboundary} 返回定义该单向边的坐标。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md index 164d8cf344b..411b8fd83d9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/polygon.md @@ -6,8 +6,6 @@ title: '用于处理多边形的函数' doc_type: 'reference' --- - - ## WKT {#wkt} 基于各种[地理数据类型](../../data-types/geo.md)返回一个 WKT(Well-Known Text)几何对象。支持的 WKT 对象有: @@ -75,7 +73,6 @@ SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10))) ``` - ## readWKTMultiPolygon {#readwktmultipolygon} 将 WKT(Well-Known Text)格式的 MultiPolygon 转换为 MultiPolygon 类型。 @@ -101,7 +98,6 @@ SELECT MultiPolygon - ## readWKTPolygon {#readwktpolygon} 将 WKT(Well-Known Text)MultiPolygon 转换为 Polygon 类型。 @@ -127,7 +123,6 @@ FORMAT Markdown 多边形 - ## readWKTPoint {#readwktpoint} ClickHouse 中的 `readWKTPoint` 函数会解析点(Point)几何对象的 Well-Known Text (WKT) 表示,并返回一个使用 ClickHouse 内部格式表示的点。 @@ -156,7 +151,6 @@ SELECT readWKTPoint('POINT (1.2 3.4)'); (1.2,3.4) ``` - ## readWKTLineString {#readwktlinestring} 解析 LineString 几何对象的 Well-Known Text(WKT)表示,并将其返回为 ClickHouse 的内部格式。 @@ -185,7 +179,6 @@ SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKTMultiLineString {#readwktmultilinestring} 解析 MultiLineString 几何对象的 Well-Known Text (WKT) 表示,并以 ClickHouse 的内部格式返回。 @@ -214,7 +207,6 @@ SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6) [[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] ``` - ## readWKTRing {#readwktring} 解析 Polygon 几何体的 Well-Known Text (WKT) 表示形式,并返回一个使用 ClickHouse 内部格式的环(闭合线串,linestring)。 @@ -243,7 +235,6 @@ SELECT readWKTRing('POLYGON ((1 1, 2 2, 3 3, 1 1))'); [(1,1),(2,2),(3,3),(1,1)] ``` - ## polygonsWithinSpherical {#polygonswithinspherical} 根据一个多边形是否完全位于另一个多边形内部返回布尔值 true 或 false。请参阅 [https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/within/within_2.html) @@ -258,7 +249,6 @@ SELECT polygonsWithinSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879 0 ``` - ## readWKBMultiPolygon {#readwkbmultipolygon} 将 WKB(Well-Known Binary)MultiPolygon 转换为 MultiPolygon 类型。 @@ -284,7 +274,6 @@ SELECT MultiPolygon - ## readWKBPolygon {#readwkbpolygon} 将 WKB(Well-Known Binary)格式的 MultiPolygon 转换为 Polygon 类型。 @@ -310,7 +299,6 @@ FORMAT Markdown Polygon(多边形) - ## readWKBPoint {#readwkbpoint} ClickHouse 中的 `readWKBPoint` 函数会解析 Point 几何对象的 Well-Known Binary (WKB) 格式表示,并以 ClickHouse 内部格式返回一个点。 @@ -339,7 +327,6 @@ SELECT readWKBPoint(unhex('0101000000333333333333f33f3333333333330b40')); (1.2,3.4) ``` - ## readWKBLineString {#readwkblinestring} 解析 LineString 几何对象的 Well-Known Binary (WKB) 表示形式,并将其转换为 ClickHouse 的内部格式返回。 @@ -368,7 +355,6 @@ SELECT readWKBLineString(unhex('010200000004000000000000000000f03f000000000000f0 [(1,1),(2,2),(3,3),(1,1)] ``` - ## readWKBMultiLineString {#readwkbmultilinestring} 解析 MultiLineString 几何体的 Well-Known Binary (WKB) 表示,并将其返回为 ClickHouse 的内部格式。 @@ -403,7 +389,6 @@ SELECT readWKBMultiLineString(unhex('0105000000020000000102000000030000000000000 UInt8 类型,0 表示 false,1 表示 true - ## polygonsDistanceSpherical {#polygonsdistancespherical} 计算两个多边形上两点之间的最小距离,其中一个点属于第一个多边形,另一个点属于第二个多边形。这里的 Spherical 表示将坐标解释为位于一个纯理想球体上的坐标,这与真实的地球并不一致。使用这种坐标系可以加快执行速度,但结果当然不够精确。 @@ -426,7 +411,6 @@ SELECT polygonsDistanceSpherical([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsDistanceCartesian {#polygonsdistancecartesian} 计算两个多边形之间的距离 @@ -449,7 +433,6 @@ SELECT polygonsDistanceCartesian([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [ Float64 - ## polygonsEqualsCartesian {#polygonsequalscartesian} 若两个多边形相等,则返回 true @@ -472,7 +455,6 @@ SELECT polygonsEqualsCartesian([[[(1., 1.), (1., 4.), (4., 4.), (4., 1.)]]], [[[ UInt8 类型,0 表示 false,1 表示 true - ## polygonsSymDifferenceSpherical {#polygonssymdifferencespherical} 计算两个多边形在空间集合论中的对称差(XOR) @@ -495,7 +477,6 @@ Polygons MultiPolygon - ## polygonsSymDifferenceCartesian {#polygonssymdifferencecartesian} 与 `polygonsSymDifferenceSpherical` 相同,但其中的坐标使用笛卡尔坐标系,这种方式更接近对真实地球的建模。 @@ -518,7 +499,6 @@ Polygons MultiPolygon - ## polygonsIntersectionSpherical {#polygonsintersectionspherical} 计算球面坐标系下多边形之间的交集(AND)。 @@ -541,7 +521,6 @@ Polygons MultiPolygon - ## polygonsWithinCartesian {#polygonswithincartesian} 如果第二个多边形位于第一个多边形之内,则返回 true。 @@ -564,7 +543,6 @@ SELECT polygonsWithinCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], [[[ UInt8,0 表示 false,1 表示 true - ## polygonsIntersectCartesian {#polygonsintersectcartesian} 如果两个多边形相交(共享任意公共区域或边界),则返回 true。 @@ -587,7 +565,6 @@ SELECT polygonsIntersectCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], UInt8,0 表示 false,1 表示 true - ## polygonsIntersectSpherical {#polygonsintersectspherical} 如果两个多边形相交(共享任意公共区域或边界),则返回 true。参考:[https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html](https://www.boost.org/doc/libs/1_62_0/libs/geometry/doc/html/geometry/reference/algorithms/intersects.html) @@ -610,7 +587,6 @@ SELECT polygonsIntersectSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535 UInt8,0 表示 false,1 表示 true - ## polygonConvexHullCartesian {#polygonconvexhullcartesian} 计算多边形的凸包。[参考](https://www.boost.org/doc/libs/1_61_0/libs/geometry/doc/html/geometry/reference/algorithms/convex_hull.html) @@ -635,7 +611,6 @@ MultiPolygon Polygon - ## polygonAreaSpherical {#polygonareaspherical} 计算多边形的球面面积。 @@ -658,7 +633,6 @@ Polygon Float - ## polygonsUnionSpherical {#polygonsunionspherical} 计算并集(OR)。 @@ -681,7 +655,6 @@ Polygons MultiPolygon - ## polygonPerimeterSpherical {#polygonperimeterspherical} 计算多边形的周长。 @@ -692,18 +665,12 @@ MultiPolygon 以下是表示津巴布韦的多边形: - - ```text POLYGON((30.0107 -15.6462,30.0502 -15.6401,30.09 -15.6294,30.1301 -15.6237,30.1699 -15.6322,30.1956 -15.6491,30.2072 -15.6532,30.2231 -15.6497,30.231 -15.6447,30.2461 -15.6321,30.2549 -15.6289,30.2801 -15.6323,30.2962 -15.639,30.3281 -15.6524,30.3567 -15.6515,30.3963 -15.636,30.3977 -15.7168,30.3993 -15.812,30.4013 -15.9317,30.4026 -16.0012,30.5148 -16.0004,30.5866 -16,30.7497 -15.9989,30.8574 -15.9981,30.9019 -16.0071,30.9422 -16.0345,30.9583 -16.0511,30.9731 -16.062,30.9898 -16.0643,31.012 -16.0549,31.0237 -16.0452,31.0422 -16.0249,31.0569 -16.0176,31.0654 -16.0196,31.0733 -16.0255,31.0809 -16.0259,31.089 -16.0119,31.1141 -15.9969,31.1585 -16.0002,31.26 -16.0235,31.2789 -16.0303,31.2953 -16.0417,31.3096 -16.059,31.3284 -16.0928,31.3409 -16.1067,31.3603 -16.1169,31.3703 -16.1237,31.3746 -16.1329,31.3778 -16.1422,31.384 -16.1488,31.3877 -16.1496,31.3956 -16.1477,31.3996 -16.1473,31.4043 -16.1499,31.4041 -16.1545,31.4027 -16.1594,31.4046 -16.1623,31.4241 -16.1647,31.4457 -16.165,31.4657 -16.1677,31.4806 -16.178,31.5192 -16.1965,31.6861 -16.2072,31.7107 -16.2179,31.7382 -16.2398,31.7988 -16.3037,31.8181 -16.3196,31.8601 -16.3408,31.8719 -16.3504,31.8807 -16.368,31.8856 -16.4063,31.8944 -16.4215,31.9103 -16.4289,32.0141 -16.4449,32.2118 -16.4402,32.2905 -16.4518,32.3937 -16.4918,32.5521 -16.5534,32.6718 -16.5998,32.6831 -16.6099,32.6879 -16.6243,32.6886 -16.6473,32.6987 -16.6868,32.7252 -16.7064,32.7309 -16.7087,32.7313 -16.7088,32.7399 -16.7032,32.7538 -16.6979,32.7693 -16.6955,32.8007 -16.6973,32.862 -16.7105,32.8934 -16.7124,32.9096 -16.7081,32.9396 -16.6898,32.9562 -16.6831,32.9685 -16.6816,32.9616 -16.7103,32.9334 -16.8158,32.9162 -16.8479,32.9005 -16.8678,32.8288 -16.9351,32.8301 -16.9415,32.8868 -17.0382,32.9285 -17.1095,32.9541 -17.1672,32.9678 -17.2289,32.9691 -17.2661,32.9694 -17.2761,32.9732 -17.2979,32.9836 -17.3178,32.9924 -17.3247,33.0147 -17.3367,33.0216 -17.3456,33.0225 -17.3615,33.0163 -17.3772,33.0117 -17.384,32.9974 -17.405,32.9582 -17.4785,32.9517 -17.4862,32.943 -17.4916,32.9366 -17.4983,32.9367 -17.5094,32.9472 -17.5432,32.9517 -17.5514,32.9691 -17.5646,33.0066 -17.581,33.0204 -17.5986,33.0245 -17.6192,33.0206 -17.6385,33.0041 -17.6756,33.0002 -17.7139,33.0032 -17.7577,32.9991 -17.7943,32.9736 -17.8106,32.957 -17.818,32.9461 -17.8347,32.9397 -17.8555,32.9369 -17.875,32.9384 -17.8946,32.9503 -17.9226,32.9521 -17.9402,32.9481 -17.9533,32.9404 -17.96,32.9324 -17.9649,32.9274 -17.9729,32.929 -17.9823,32.9412 -17.9963,32.9403 -18.0048,32.9349 -18.0246,32.9371 -18.0471,32.9723 -18.1503,32.9755 -18.1833,32.9749 -18.1908,32.9659 -18.2122,32.9582 -18.2254,32.9523 -18.233,32.9505 -18.2413,32.955 -18.2563,32.9702 -18.2775,33.0169 -18.3137,33.035 -18.3329,33.0428 -18.352,33.0381 -18.3631,33.0092 -18.3839,32.9882 -18.4132,32.9854 -18.4125,32.9868 -18.4223,32.9995 -18.4367,33.003 -18.4469,32.9964 -18.4671,32.9786 -18.4801,32.9566 -18.4899,32.9371 -18.501,32.9193 -18.51,32.9003 -18.5153,32.8831 -18.5221,32.8707 -18.5358,32.8683 -18.5526,32.8717 -18.5732,32.8845 -18.609,32.9146 -18.6659,32.9223 -18.6932,32.9202 -18.7262,32.9133 -18.753,32.9025 -18.7745,32.8852 -18.7878,32.8589 -18.79,32.8179 -18.787,32.7876 -18.7913,32.6914 -18.8343,32.6899 -18.8432,32.6968 -18.8972,32.7032 -18.9119,32.7158 -18.9198,32.7051 -18.9275,32.6922 -18.9343,32.6825 -18.9427,32.6811 -18.955,32.6886 -18.9773,32.6903 -18.9882,32.6886 -19.001,32.6911 -19.0143,32.699 -19.0222,32.7103 -19.026,32.7239 -19.0266,32.786 -19.0177,32.8034 -19.0196,32.8142 -19.0238,32.82) -19.0283,32.823 -19.0352,32.8253 -19.0468,32.8302 -19.0591,32.8381 -19.0669,32.8475 -19.0739,32.8559 -19.0837,32.8623 -19.1181,32.8332 -19.242,32.8322 -19.2667,32.8287 -19.2846,32.8207 -19.3013,32.8061 -19.3234,32.7688 -19.3636,32.7665 -19.3734,32.7685 -19.4028,32.7622 -19.4434,32.7634 -19.464,32.7739 -19.4759,32.7931 -19.4767,32.8113 -19.4745,32.8254 -19.4792,32.8322 -19.5009,32.8325 -19.5193,32.8254 -19.5916,32.8257 -19.6008,32.8282 -19.6106,32.8296 -19.6237,32.8254 -19.6333,32.8195 -19.642,32.8163 -19.6521,32.8196 -19.6743,32.831 -19.6852,32.8491 -19.6891,32.8722 -19.6902,32.8947 -19.6843,32.9246 -19.6553,32.9432 -19.6493,32.961 -19.6588,32.9624 -19.6791,32.9541 -19.7178,32.9624 -19.7354,32.9791 -19.7514,33.0006 -19.7643,33.0228 -19.7731,33.0328 -19.7842,33.0296 -19.8034,33.0229 -19.8269,33.0213 -19.8681,33.002 -19.927,32.9984 -20.0009,33.0044 -20.0243,33.0073 -20.032,32.9537 -20.0302,32.9401 -20.0415,32.9343 -20.0721,32.9265 -20.0865,32.9107 -20.0911,32.8944 -20.094,32.8853 -20.103,32.8779 -20.1517,32.8729 -20.1672,32.8593 -20.1909,32.8571 -20.2006,32.8583 -20.2075,32.8651 -20.2209,32.8656 -20.2289,32.8584 -20.2595,32.853 -20.2739,32.8452 -20.2867,32.8008 -20.3386,32.7359 -20.4142,32.7044 -20.4718,32.6718 -20.5318,32.6465 -20.558,32.6037 -20.5648,32.5565 -20.5593,32.5131 -20.5646,32.4816 -20.603,32.4711 -20.6455,32.4691 -20.6868,32.4835 -20.7942,32.4972 -20.8981,32.491 -20.9363,32.4677 -20.9802,32.4171 -21.0409,32.3398 -21.1341,32.3453 -21.1428,32.3599 -21.1514,32.3689 -21.163,32.3734 -21.1636,32.3777 -21.1634,32.3806 -21.1655,32.3805 -21.1722,32.3769 -21.1785,32.373 -21.184,32.3717 -21.1879,32.4446 -21.3047,32.4458 -21.309,32.4472 -21.3137,32.4085 -21.2903,32.373 -21.3279,32.3245 -21.3782,32.2722 -21.4325,32.2197 -21.4869,32.1673 -21.5413,32.1148 -21.5956,32.0624 -21.65,32.01 -21.7045,31.9576 -21.7588,31.9052 -21.8132,31.8527 -21.8676,31.8003 -21.922,31.7478 -21.9764,31.6955 -22.0307,31.6431 -22.0852,31.5907 -22.1396,31.5382 -22.1939,31.4858 -22.2483,31.4338 -22.302,31.3687 -22.345,31.2889 -22.3973,31.2656 -22.3655,31.2556 -22.358,31.2457 -22.3575,31.2296 -22.364,31.2215 -22.3649,31.2135 -22.3619,31.1979 -22.3526,31.1907 -22.3506,31.1837 -22.3456,31.1633 -22.3226,31.1526 -22.3164,31.1377 -22.3185,31.1045 -22.3334,31.097 -22.3349,31.0876 -22.3369,31.0703 -22.3337,31.0361 -22.3196,30.9272 -22.2957,30.8671 -22.2896,30.8379 -22.2823,30.8053 -22.2945,30.6939 -22.3028,30.6743 -22.3086,30.6474 -22.3264,30.6324 -22.3307,30.6256 -22.3286,30.6103 -22.3187,30.6011 -22.3164,30.5722 -22.3166,30.5074 -22.3096,30.4885 -22.3102,30.4692 -22.3151,30.4317 -22.3312,30.4127 -22.3369,30.3721 -22.3435,30.335 -22.3447,30.3008 -22.337,30.2693 -22.3164,30.2553 -22.3047,30.2404 -22.2962,30.2217 -22.2909,30.197 -22.2891,30.1527 -22.2948,30.1351 -22.2936,30.1111 -22.2823,30.0826 -22.2629,30.0679 -22.2571,30.0381 -22.2538,30.0359 -22.2506,30.0345 -22.2461,30.0155 -22.227,30.0053 -22.2223,29.9838 -22.2177,29.974 -22.214,29.9467 -22.1983,29.9321 -22.1944,29.896 -22.1914,29.8715 -22.1793,29.8373 -22.1724,29.7792 -22.1364,29.7589 -22.1309,29.6914 -22.1341,29.6796 -22.1383,29.6614 -22.1265,29.6411 -22.1292,29.604 -22.1451,29.5702 -22.142,29.551 -22.146,29.5425 -22.1625,29.5318 -22.1724,29.5069 -22.1701,29.4569 -22.1588,29.4361 -22.1631,29.3995 -22.1822,29.378 -22.1929,29.3633 -22.1923,29.3569 -22.1909,29.3501 -22.1867,29.2736 -22.1251,29.2673 -22.1158,29.2596 -22.0961,29.2541 -22.0871,29.2444 -22.0757,29.2393 -22.0726,29.1449 -22.0753,29.108 -22.0692,29.0708 -22.051,29.0405 -22.0209,29.0216 -21.9828,29.0138 -21.9404,29.0179 -21.8981,29.0289 -21.8766,29.0454 -21.8526,29.0576 -21.8292,29.0553 -21.81,29.0387 -21.7979,28.9987 -21.786,28.9808 -21.7748,28.9519 -21.7683,28.891 -21.7649,28.8609 -21.7574,28.7142 -21.6935,28.6684 -21.68,28.6297 -21.6513,28.6157 -21.6471,28.5859 -21.6444,28.554 -21.6366,28.5429 -21.6383,28.5325 -21.6431,28.4973 -21.6515,28.4814 -21.6574,28.4646 -21.6603,28.4431 -21.6558,28.3618 -21.6163,28.3219 -21.6035,28.2849 -21.5969,28.1657 -21.5952,28.0908 -21.5813,28.0329 -21.5779,28.0166 -21.5729,28.0026 -21.5642,27.9904 -21.5519,27.9847 -21.5429,27.9757 -21.5226,27.9706 -21.5144,27.9637 -21.5105,27.9581 -21.5115,27.9532 -21.5105,27.9493 -21.5008,27.9544 -21.4878,27.9504 -21.482,27.9433 -21.4799,27.9399 -21.478,27.9419 -21.4685,27.9496 -21.4565,27.953 -21.4487,27.9502 -21.4383,27.9205 -21.3812,27.9042 -21.3647,27.8978 -21.3554,27.8962 -21.3479,27.8967 -21.3324,27.8944 -21.3243,27.885 -21.3102,27.8491 -21.2697,27.8236 -21.2317,27.7938 -21.1974,27.7244 -21.1497,27.7092 -21.1345,27.6748 -21.0901,27.6666 -21.0712,27.6668 -21.0538,27.679 -21.0007,27.6804 -20.9796,27.6727 -20.9235,27.6726 -20.9137,27.6751 -20.8913,27.6748 -20.8799,27.676 -20.8667,27.6818 -20.8576,27.689 -20.849,27.6944 -20.8377,27.7096 -20.7567,27.7073 -20.7167,27.6825 -20.6373,27.6904 -20.6015,27.7026 -20.5661,27.7056 -20.5267,27.6981 -20.5091,27.6838 -20.4961,27.666 -20.4891,27.6258 -20.4886,27.5909 -20.4733,27.5341 -20.483,27.4539 -20.4733,27.3407 -20.473,27.306 -20.4774,27.2684 -20.4958,27.284 -20.3515,27.266 -20.2342,27.2149 -20.1105,27.2018 -20.093,27.1837 -20.0823,27.1629 -20.0766,27.1419 -20.0733,27.1297 -20.0729,27.1198 -20.0739,27.1096 -20.0732,27.0973 -20.0689,27.0865 -20.0605,27.0692 -20.0374,27.0601 -20.0276,27.0267 -20.0101,26.9943 -20.0068,26.9611 -20.0072,26.9251 -20.0009,26.8119 -19.9464,26.7745 -19.9398,26.7508 -19.9396,26.731 -19.9359,26.7139 -19.9274,26.6986 -19.9125,26.6848 -19.8945,26.6772 -19.8868,26.6738 -19.8834,26.6594 -19.8757,26.6141 -19.8634,26.5956 -19.8556,26.5819 -19.8421,26.5748 -19.8195,26.5663 -19.8008,26.5493 -19.7841,26.5089 -19.7593,26.4897 -19.7519,26.4503 -19.7433,26.4319 -19.7365,26.4128 -19.7196,26.3852 -19.6791,26.3627 -19.6676,26.3323 -19.6624,26.3244 -19.6591,26.3122 -19.6514,26.3125 -19.6496,26.3191 -19.6463,26.3263 -19.6339,26.3335 -19.613,26.331 -19.605,26.3211 -19.592,26.3132 -19.5842,26.3035 -19.5773,26.2926 -19.5725,26.2391 -19.5715,26.1945 -19.5602,26.1555 -19.5372,26.1303 -19.5011,26.0344 -19.2437,26.0114 -19.1998,25.9811 -19.1618,25.9565 -19.1221,25.9486 -19.1033,25.9449 -19.0792,25.9481 -19.0587,25.9644 -19.0216,25.9678 -19.001,25.9674 -18.9999,25.9407 -18.9213,25.8153 -18.814,25.7795 -18.7388,25.7734 -18.6656,25.7619 -18.6303,25.7369 -18.6087,25.6983 -18.5902,25.6695 -18.566,25.6221 -18.5011,25.6084 -18.4877,25.5744 -18.4657,25.5085 -18.3991,25.4956 -18.3789,25.4905 -18.3655,25.4812 -18.3234,25.4732 -18.3034,25.4409 -18.2532,25.4088 -18.176,25.3875 -18.139,25.3574 -18.1158,25.3234 -18.0966,25.2964 -18.0686,25.255 -18.0011,25.2261 -17.9319,25.2194 -17.908,25.2194 -17.8798,25.2598 -17.7941,25.2667 -17.8009,25.2854 -17.8093,25.3159 -17.8321,25.3355 -17.8412,25.3453 -17.8426,25.3765 -17.8412,25.4095 -17.853,25.4203 -17.8549,25.4956 -17.8549,25.5007 -17.856,25.5102 -17.8612,25.5165 -17.8623,25.5221 -17.8601,25.5309 -17.851,25.5368 -17.8487,25.604 -17.8362,25.657 -17.8139,25.6814 -17.8115,25.6942 -17.8194,25.7064 -17.8299,25.7438 -17.8394,25.766 -17.8498,25.786 -17.8622,25.7947 -17.8727,25.8044 -17.8882,25.8497 -17.9067,25.8636 -17.9238,25.8475 -17.9294,25.8462 -17.9437,25.8535 -17.96,25.8636 -17.9716,25.9245 -17.999,25.967 -18.0005,25.9785 -17.999,26.0337 -17.9716,26.0406 -17.9785,26.0466 -17.9663,26.0625 -17.9629,26.0812 -17.9624,26.0952 -17.9585,26.0962 -17.9546,26.0942 -17.9419,26.0952 -17.9381,26.1012 -17.9358,26.1186 -17.9316,26.1354 -17.9226,26.1586 -17.9183,26.1675 -17.9136,26.203 -17.8872,26.2119 -17.8828,26.2211 -17.8863,26.2282 -17.8947,26.2339 -17.904,26.2392 -17.9102,26.2483 -17.9134,26.2943 -17.9185,26.3038 -17.9228,26.312 -17.9284,26.3183 -17.9344,26.3255 -17.936,26.3627 -17.9306,26.4086 -17.939,26.4855 -17.9793,26.5271 -17.992,26.5536 -17.9965,26.5702 -18.0029,26.5834 -18.0132,26.5989 -18.03,26.6127 -18.0412,26.6288 -18.0492,26.6857 -18.0668,26.7 -18.0692,26.7119 -18.0658,26.7406 -18.0405,26.7536 -18.033,26.7697 -18.029,26.794 -18.0262,26.8883 -17.9846,26.912 -17.992,26.9487 -17.9689,26.9592 -17.9647,27.0063 -17.9627,27.0213 -17.9585,27.0485 -17.9443,27.0782 -17.917,27.1154 -17.8822,27.149 -17.8425,27.1465 -17.8189,27.1453 -17.7941,27.147 -17.7839,27.1571 -17.7693,27.4221 -17.5048,27.5243 -17.4151,27.5773 -17.3631,27.6045 -17.3128,27.6249 -17.2333,27.6412 -17.1985,27.7773 -17.0012,27.8169 -16.9596,27.8686 -16.9297,28.023 -16.8654,28.1139 -16.8276,28.2125 -16.7486,28.2801 -16.7065,28.6433 -16.5688,28.6907 -16.5603,28.7188 -16.5603,28.7328 -16.5581,28.7414 -16.5507,28.7611 -16.5323,28.7693 -16.5152,28.8089 -16.4863,28.8225 -16.4708,28.8291 -16.4346,28.8331 -16.4264,28.8572 -16.3882,28.857 -16.3655,28.8405 -16.3236,28.8368 -16.3063,28.8403 -16.2847,28.8642 -16.2312,28.8471 -16.2027,28.8525 -16.1628,28.8654 -16.1212,28.871 -16.0872,28.8685 -16.0822,28.8638 -16.0766,28.8593 -16.0696,28.8572 -16.0605,28.8603 -16.0494,28.8741 -16.0289,28.8772 -16.022,28.8989 -15.9955,28.9324 -15.9637,28.9469 -15.9572,28.9513 -15.9553,28.9728 -15.9514,29.0181 -15.9506,29.0423 -15.9463,29.0551 -15.9344,29.0763 -15.8954,29.0862 -15.8846,29.1022 -15.8709,29.1217 -15.8593,29.1419 -15.8545,29.151 -15.8488,29.1863 -15.8128,29.407 -15.7142,29.4221 -15.711,29.5085 -15.7036,29.5262 -15.6928,29.5634 -15.6621,29.5872 -15.6557,29.6086 -15.6584,29.628 -15.6636,29.6485 -15.6666,29.6728 -15.6633,29.73 -15.6447,29.7733 -15.6381,29.8143 -15.6197,29.8373 -15.6148,29.8818 -15.6188,29.9675 -15.6415,30.0107 -15.6462)) ``` - - #### polygonPerimeterSpherical 函数的使用方法 {#usage-of-polygon-perimeter-spherical} - - ```sql SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15.640129), (30.090029, -15.629381), (30.130129, -15.623696), (30.16992, -15.632171), (30.195552, -15.649121), (30.207231, -15.653152), (30.223147, -15.649741), (30.231002, -15.644677), (30.246091, -15.632068), (30.254876, -15.628864), (30.280094, -15.632275), (30.296196, -15.639042), (30.32805, -15.652428), (30.356679, -15.651498), (30.396263, -15.635995), (30.39771, -15.716817), (30.39926, -15.812005), (30.401327, -15.931688), (30.402568, -16.001244), (30.514809, -16.000418), (30.586587, -16.000004), (30.74973, -15.998867), (30.857424, -15.998144), (30.901865, -16.007136), (30.942173, -16.034524), (30.958296, -16.05106), (30.973075, -16.062016), (30.989767, -16.06429), (31.012039, -16.054885), (31.023718, -16.045169), (31.042218, -16.024912), (31.056895, -16.017574), (31.065421, -16.019641), (31.073328, -16.025532), (31.080872, -16.025946), (31.089037, -16.01189), (31.1141, -15.996904), (31.15849, -16.000211), (31.259983, -16.023465), (31.278897, -16.030287), (31.29533, -16.041655), (31.309592, -16.059019), (31.328351, -16.092815), (31.340908, -16.106664), (31.360339, -16.116896), (31.37026, -16.123718), (31.374601, -16.132916), (31.377754, -16.142218), (31.384006, -16.148832), (31.387727, -16.149556), (31.395582, -16.147695), (31.399613, -16.147282), (31.404315, -16.149866), (31.404057, -16.154517), (31.402713, -16.159374), (31.404574, -16.162268), (31.424107, -16.164749), (31.445708, -16.164955), (31.465655, -16.167746), (31.480641, -16.177978), (31.519192, -16.196478), (31.686107, -16.207227), (31.710705, -16.217872), (31.738197, -16.239783), (31.798761, -16.303655), (31.818088, -16.319571), (31.86005, -16.340759), (31.871935, -16.35037), (31.88072, -16.368044), (31.88563, -16.406284), (31.894363, -16.421477), (31.910279, -16.428919), (32.014149, -16.444938), (32.211759, -16.440184), (32.290463, -16.45176), (32.393661, -16.491757), (32.5521, -16.553355), (32.671783, -16.599761), (32.6831, -16.609889), (32.687906, -16.624255), (32.68863, -16.647303), (32.698655, -16.686784), (32.725217, -16.706421), (32.73095, -16.708656), (32.731314, -16.708798), (32.739893, -16.703217), (32.753845, -16.697946), (32.769348, -16.695466), (32.800664, -16.697326), (32.862004, -16.710452), (32.893372, -16.712415), (32.909598, -16.708075), (32.93957, -16.689781), (32.95621, -16.683063), (32.968509, -16.681615999999998), (32.961585, -16.710348), (32.933369, -16.815768), (32.916213, -16.847911), (32.900503, -16.867755), (32.828776, -16.935141), (32.83012, -16.941549), (32.886757, -17.038184), (32.928512, -17.109497), (32.954143, -17.167168), (32.967786, -17.22887), (32.96909, -17.266115), (32.969439, -17.276102), (32.973212, -17.297909), (32.983599, -17.317753), (32.992384, -17.324678), (33.014656, -17.336667), (33.021633, -17.345555), (33.022459, -17.361471), (33.016258, -17.377181), (33.011651, -17.383991), (32.997448, -17.404983), (32.958174, -17.478467), (32.951663, -17.486218), (32.942981, -17.491593), (32.936573, -17.498311), (32.936676, -17.509369), (32.947218, -17.543166), (32.951663, -17.551434), (32.969129, -17.56456), (33.006646, -17.580993), (33.020392, -17.598563), (33.024526, -17.619233), (33.020599, -17.638457), (33.004063, -17.675561), (33.000238, -17.713905), (33.003184, -17.757726), (32.999102, -17.794313), (32.973573, -17.810643), (32.957037, -17.817981), (32.946082, -17.834724), (32.939674, -17.855498), (32.936883, -17.875032), (32.938433, -17.894566), (32.950267, -17.922574), (32.952128, -17.940247), (32.948149, -17.95327), (32.940397, -17.959988), (32.932439, -17.964949), (32.927375, -17.972907), (32.928977, -17.982312), (32.941224, -17.996265), (32.940294, -18.004843), (32.934919, -18.024583), (32.93709, -18.047114), (32.972282, -18.150261), (32.975537, -18.183333), (32.974865, -18.190775), (32.965925, -18.212169), (32.958174, -18.225398), (32.952283, -18.233046), (32.950525999999996, -18.241314), (32.95497, -18.256301), (32.970163, -18.277488), (33.016878, -18.313661), (33.034965, -18.332885), (33.042768, -18.352005), (33.038066, -18.363064), (33.00923, -18.383941), (32.988198, -18.41319), (32.985356, -18.412467), (32.986803, -18.422285), (32.999515, -18.436651), (33.003029, -18.446883), (32.996414, -18.46714), (32.978586, -18.48006), (32.956624, -18.489878), (32.937142, -18.50104), (32.919313, -18.510032), (32.900296, -18.515303), (32.88314, -18.522124), (32.870737, -18.535767), (32.868257, -18.552613), (32.871668, -18.57318), (32.884483, -18.609044), (32.914559, -18.665888), (32.92231, -18.693173), (32.920243, -18.726246), (32.913267, -18.753014), (32.902518, -18.774512), (32.885207, -18.787844), (32.858852, -18.790015), (32.817924, -18.787018), (32.787642, -18.791255), (32.69142, -18.83425), (32.68987, -18.843241), (32.696794, -18.897192), (32.703202, -18.911868), (32.71576, -18.919826), (32.705063, -18.927474), (32.692247, -18.934295), (32.682532, -18.942667), (32.681085, -18.954966), (32.68863, -18.97729), (32.690283, -18.988246), (32.68863, -19.000958), (32.691058, -19.01429), (32.698965, -19.022249), (32.710282, -19.025969), (32.723873, -19.026589), (32.785988, -19.017701), (32.803351, -19.019561), (32.814203, -19.023799), (32.819991, -19.028346), (32.822988, -19.035168), (32.825262, -19.046847), (32.830223, -19.059146), (32.83813, -19.066897), (32.847483, -19.073925), (32.855906, -19.083744), (32.862262, -19.118057), (32.83322, -19.241977), (32.832187, -19.266678), (32.828673, -19.284558), (32.820715, -19.301301), (32.806142, -19.323419), (32.768831, -19.363623), (32.766454, -19.373442), (32.768521, -19.402794), (32.762217, -19.443412), (32.763354, -19.463979), (32.773947, -19.475864), (32.793119, -19.476691), (32.811309, -19.474521), (32.825365, -19.479172), (32.832187, -19.500876), (32.832497000000004, -19.519273), (32.825365, -19.59162), (32.825675, -19.600818), (32.828156, -19.610636), (32.829603, -19.623659), (32.825365, -19.633271), (32.819474, -19.641952), (32.81627, -19.652081), (32.819629, -19.674302), (32.83105, -19.685154), (32.849137, -19.689081), (32.872184, -19.690218), (32.894715, -19.684327), (32.924584, -19.655285), (32.943188, -19.64929), (32.960964, -19.658799), (32.962411, -19.679056), (32.954143, -19.717813), (32.962411, -19.735383), (32.979051, -19.751403), (33.0006, -19.764322), (33.022769, -19.773107), (33.032795, -19.784166), (33.029642, -19.80339), (33.022873, -19.826851), (33.021322, -19.868088), (33.001995, -19.927), (32.998378, -20.000897), (33.004373, -20.024255), (33.007266, -20.032006), (32.95373, -20.030249), (32.940087, -20.041515), (32.934299, -20.072107), (32.926548, -20.086473), (32.910683, -20.091124), (32.894405, -20.094018), (32.88531, -20.10301), (32.877869, -20.151689), (32.872908, -20.167192), (32.859265, -20.190859), (32.857095, -20.200575), (32.858335, -20.207499), (32.865053, -20.220935), (32.86557, -20.228893), (32.858438, -20.259486), (32.852961, -20.273852), (32.845209, -20.286668), (32.800767, -20.338551), (32.735862, -20.414205), (32.704443, -20.471773), (32.671783, -20.531821), (32.646462, -20.557969), (32.603674, -20.56479), (32.556545, -20.559312), (32.513136, -20.564583), (32.481614, -20.603031), (32.471072, -20.645509), (32.469108, -20.68685), (32.483474, -20.794233), (32.49722, -20.898103), (32.491019, -20.936344), (32.467661, -20.980165), (32.417122, -21.040937), (32.339814, -21.134058), (32.345343, -21.142843), (32.359864, -21.151421), (32.368856, -21.162997), (32.373352, -21.163617), (32.377744, -21.16341), (32.380638, -21.165477), (32.380535, -21.172195), (32.376866, -21.178499), (32.37299, -21.183977), (32.37175, -21.187905), (32.444613, -21.304693), (32.445849, -21.308994), (32.447197, -21.313685), (32.408543, -21.290327), (32.37299, -21.327948), (32.324517, -21.378177), (32.272221, -21.432541), (32.219718, -21.486904), (32.167318, -21.541268), (32.114814, -21.595632), (32.062415, -21.649995), (32.010015, -21.704462), (31.957615, -21.758826), (31.905215, -21.813189), (31.852712, -21.867553), (31.800312, -21.92202), (31.747808, -21.976384), (31.695512, -22.030747), (31.643112, -22.085214), (31.590712, -22.139578), (31.538209, -22.193941), (31.485809, -22.248305), (31.433822, -22.302048), (31.36871, -22.345043), (31.288922, -22.39734), (31.265616, -22.365507), (31.255642, -22.357962), (31.24572, -22.357549), (31.229597, -22.363957), (31.221536, -22.364887), (31.213474, -22.36189), (31.197868, -22.352588), (31.190685, -22.350624), (31.183657, -22.34556), (31.163348, -22.322616), (31.152599, -22.316414), (31.137717, -22.318482), (31.10454, -22.333364), (31.097048, -22.334922), (31.087642, -22.336878), (31.07033, -22.333674), (31.036121, -22.319618), (30.927187, -22.295744), (30.867087, -22.289646), (30.83789, -22.282308), (30.805282, -22.294504), (30.693919, -22.302772), (30.674282, -22.30856), (30.647410999999998, -22.32644), (30.632424, -22.330677), (30.625551, -22.32861), (30.610307, -22.318688), (30.601108, -22.316414), (30.57217, -22.316621), (30.507367, -22.309593), (30.488454, -22.310213), (30.46923, -22.315071), (30.431713, -22.331194), (30.412696, -22.336878), (30.372078, -22.343493), (30.334975, -22.344733), (30.300765, -22.336982), (30.269346, -22.316414), (30.25529, -22.304736), (30.240407, -22.296157), (30.2217, -22.290886), (30.196999, -22.289129), (30.15266, -22.294814), (30.13509, -22.293574), (30.111113, -22.282308), (30.082587, -22.262878), (30.067911, -22.25709), (30.038145, -22.253783), (30.035872, -22.250579), (30.034528, -22.246135), (30.015511, -22.227014), (30.005279, -22.22226), (29.983782, -22.217713), (29.973963, -22.213992), (29.946678, -22.198282), (29.932105, -22.194355), (29.896035, -22.191358), (29.871489, -22.179265), (29.837331, -22.172444), (29.779246, -22.136374), (29.758886, -22.130896), (29.691448, -22.1341), (29.679614, -22.138338), (29.661424, -22.126452), (29.641064, -22.129242), (29.60396, -22.145055), (29.570164, -22.141955), (29.551043, -22.145986), (29.542517, -22.162522), (29.53182, -22.172444), (29.506912, -22.170067), (29.456889, -22.158801), (29.436115, -22.163142), (29.399528, -22.182159), (29.378031, -22.192908), (29.363250999999998, -22.192288), (29.356947, -22.190944000000002), (29.350074, -22.186707), (29.273644, -22.125108), (29.26734, -22.115807), (29.259588, -22.096066), (29.254111, -22.087074), (29.244395, -22.075706), (29.239331, -22.072605), (29.144867, -22.075292), (29.10797, -22.069194), (29.070763, -22.051004), (29.040532, -22.020929), (29.021567, -21.982791), (29.013815, -21.940417), (29.017949, -21.898145), (29.028905, -21.876648), (29.045441, -21.852567), (29.057637, -21.829209), (29.05526, -21.809985), (29.038723, -21.797893), (28.998726, -21.786008), (28.980846, -21.774845), (28.951907, -21.768334), (28.891032, -21.764924), (28.860853, -21.757379), (28.714195, -21.693507), (28.66841, -21.679968), (28.629704, -21.651339), (28.6157, -21.647101), (28.585934, -21.644414), (28.553998, -21.636559), (28.542939, -21.638316), (28.532501, -21.643071), (28.497309, -21.651546), (28.481393, -21.657437), (28.464598, -21.660331), (28.443101, -21.655783), (28.361762, -21.616302), (28.321919, -21.603486), (28.284867, -21.596872), (28.165702, -21.595218), (28.090771, -21.581266), (28.032893, -21.577855), (28.016563, -21.572894), (28.002559, -21.564212), (27.990415, -21.551913), (27.984731, -21.542922), (27.975739, -21.522561), (27.970571, -21.514396), (27.963698, -21.510469), (27.958066, -21.511502), (27.953208, -21.510469), (27.949281, -21.500754), (27.954448, -21.487835), (27.950418, -21.482047), (27.943338, -21.479876), (27.939876, -21.478016), (27.941943, -21.468508), (27.949642, -21.456519), (27.953001, -21.448664), (27.950211, -21.438329), (27.920549, -21.381174), (27.904219, -21.364741), (27.897811, -21.35544), (27.896157, -21.347895), (27.896674, -21.332392), (27.8944, -21.32433), (27.884995, -21.310171), (27.849132, -21.269657), (27.823604, -21.231726), (27.793838, -21.197413), (27.724385, -21.149664), (27.709192, -21.134471), (27.674775, -21.090133), (27.666611, -21.071219), (27.666817, -21.053753), (27.678961, -21.000733), (27.680356, -20.979649), (27.672657, -20.923528), (27.672605, -20.913709), (27.675085, -20.891282), (27.674775, -20.879913), (27.676016, -20.866684), (27.681803, -20.857589), (27.689038, -20.849011), (27.694412, -20.837744999999998), (27.709605, -20.756716), (27.707332, -20.716719), (27.682475, -20.637344), (27.690382, -20.60148), (27.702629, -20.566134), (27.705575, -20.526653), (27.698133, -20.509083), (27.683767, -20.49606), (27.66599, -20.489136), (27.625786, -20.488619), (27.590853, -20.473323), (27.534112, -20.483038), (27.45391, -20.473323), (27.340739, -20.473013), (27.306012, -20.477354), (27.268392, -20.49575), (27.283998, -20.35147), (27.266015, -20.234164), (27.214907, -20.110451), (27.201781, -20.092984), (27.183746, -20.082339), (27.16292, -20.076551), (27.141888, -20.073347), (27.129692, -20.072934), (27.119771, -20.073864), (27.109642, -20.073244), (27.097343, -20.068903), (27.086491, -20.060532), (27.069231, -20.03738), (27.060136, -20.027562), (27.02665, -20.010095), (26.9943, -20.006788), (26.961072, -20.007201), (26.925054, -20.000897), (26.811882, -19.94643), (26.774469, -19.939815), (26.750801, -19.939609), (26.730957, -19.935888), (26.713904, -19.927413), (26.698608, -19.91253), (26.684758, -19.894547), (26.67717, -19.886815), (26.673803, -19.883385), (26.659437, -19.875737), (26.614065, -19.863438), (26.595565, -19.855583), (26.581922, -19.842147), (26.574791, -19.819513), (26.566316, -19.800806), (26.549263, -19.784063), (26.508852, -19.759258), (26.489731, -19.75192), (26.450251, -19.743342), (26.431854, -19.73652), (26.412837, -19.71957), (26.385242, -19.679056), (26.362711, -19.667584), (26.332325, -19.662416), (26.324367, -19.659109), (26.312171, -19.651358), (26.312481, -19.649601), (26.319096, -19.646293), (26.326331, -19.633891), (26.333462, -19.613014), (26.330981, -19.604952), (26.32106, -19.592033), (26.313205, -19.584178), (26.30349, -19.577254), (26.292638, -19.572499), (26.239101, -19.571466), (26.194452, -19.560200000000002), (26.155488, -19.537153), (26.13027, -19.501082), (26.034359, -19.243734), (26.011414, -19.199809), (25.981132, -19.161775), (25.956534, -19.122088), (25.948576, -19.103277), (25.944855, -19.079196), (25.948059, -19.058732), (25.964389, -19.021629), (25.9678, -19.000958), (25.967449, -18.999925), (25.940721, -18.921273), (25.815251, -18.813993), (25.779491, -18.738752), (25.773393, -18.665578), (25.761921, -18.630335), (25.736909, -18.608734), (25.698255, -18.590234), (25.669523, -18.566049), (25.622084, -18.501143), (25.608442, -18.487708), (25.574439, -18.465693), (25.508499, -18.399134), (25.49558, -18.378877), (25.490516, -18.365545), (25.481163, -18.323377), (25.473204, -18.303429), (25.440855, -18.2532), (25.408816, -18.175995), (25.387525, -18.138995), (25.357449, -18.115844), (25.323446, -18.09662), (25.296368, -18.068612), (25.255026, -18.001122), (25.226088, -17.931876), (25.21937, -17.908001), (25.21937, -17.879786), (25.259781, -17.794107), (25.266705, -17.800928), (25.285412, -17.809299), (25.315901, -17.83214), (25.335538, -17.841235), (25.345254, -17.842579), (25.376466, -17.841235), (25.409539, -17.853018), (25.420288, -17.854878), (25.49558, -17.854878), (25.500748, -17.856015), (25.510153, -17.861183), (25.516458, -17.862319), (25.522142, -17.860149), (25.530927, -17.850951), (25.536818, -17.848677), (25.603997, -17.836171), (25.657017, -17.81395), (25.681409, -17.81147), (25.694224, -17.819428), (25.70642, -17.829867), (25.743834, -17.839375), (25.765951, -17.849814), (25.786002, -17.862216), (25.794683, -17.872655), (25.804399, -17.888158), (25.849667, -17.906658), (25.86362, -17.923814), (25.847497, -17.929395), (25.846153, -17.943658), (25.853490999999998, -17.959988), (25.86362, -17.971563), (25.924495, -17.998952), (25.966973, -18.000502), (25.978548, -17.998952), (26.033739, -17.971563), (26.04056, -17.978488), (26.046554, -17.966292), (26.062471, -17.962882), (26.081178, -17.962365), (26.095234, -17.958541), (26.096164, -17.954614), (26.0942, -17.941901), (26.095234, -17.938077), (26.101228, -17.935803), (26.118591, -17.931566), (26.135438, -17.922574), (26.158589, -17.918337), (26.167477, -17.913582), (26.203031, -17.887227), (26.211919, -17.882783), (26.221117, -17.886297), (26.228249, -17.894669), (26.233933, -17.903971), (26.239204, -17.910172), (26.248299, -17.913376), (26.294291, -17.918543), (26.3038, -17.922781), (26.311965, -17.928362), (26.318269, -17.934356), (26.325504, -17.93601), (26.362711, -17.930636), (26.408599, -17.939007), (26.485494, -17.979315), (26.527145, -17.992027), (26.553604, -17.996471), (26.570243, -18.002879), (26.583369, -18.013215), (26.598872, -18.029958), (26.612721, -18.041223), (26.628844, -18.049181), (26.685689, -18.066751), (26.700003, -18.069232), (26.71194, -18.065821), (26.740569, -18.0405), (26.753591, -18.032955), (26.769714, -18.029028), (26.794002, -18.026237), (26.88826, -17.984586),(26.912031,-17.992027),(26.94867,-17.968876),(26.95916,-17.964742),(27.006289,-17.962675),(27.021275,-17.958541),(27.048457,-17.944278),(27.078171,-17.916993),(27.11543,-17.882163),(27.149019,-17.842476),(27.146539,-17.818911),(27.145299,-17.794107),(27.146952,-17.783875),(27.157081,-17.769302),(27.422078,-17.504822),(27.524294,-17.415112),(27.577314,-17.363125),(27.604495,-17.312792),(27.624856,-17.233314),(27.641186,-17.198484),(27.777301,-17.001183),(27.816886,-16.959636),(27.868562,-16.929663),(28.022993,-16.865393),(28.113922,-16.827551),(28.21252,-16.748589),(28.280113,-16.706524),(28.643295,-16.568755),(28.690734,-16.56028),(28.718794,-16.56028),(28.73285,-16.55811),(28.741377,-16.550668),(28.761117,-16.532271),(28.769282,-16.515218),(28.808866,-16.486279),(28.822509,-16.470776),(28.829124,-16.434603),(28.833051,-16.426438),(28.857236,-16.388198),(28.857029,-16.36546),(28.840492,-16.323602),(28.836772,-16.306342),(28.840286,-16.284741),(28.86416,-16.231205),(28.847107,-16.202679),(28.852481,-16.162785),(28.8654,-16.121237),(28.870981,-16.087234),(28.868501,-16.08217),(28.86385,-16.076589),(28.859303,-16.069561),(28.857236,-16.060466),(28.860336,-16.049407),(28.874082,-16.028943),(28.877183,-16.022018),(28.898887,-15.995457),(28.932373,-15.963727),(28.946862,-15.957235),(28.951287,-15.955252),(28.972784,-15.951428),(29.018053,-15.950602),(29.042341,-15.946261),(29.055053,-15.934375),(29.076344,-15.895411),(29.086162,-15.884559),(29.102182,-15.870916),(29.121716,-15.859341),(29.141869,-15.854483),(29.150964,-15.848799),(29.186311,-15.812832),(29.406969,-15.714233),(29.422059,-15.711030000000001),(29.508462,-15.703588),(29.526239,-15.692839),(29.563446,-15.662144),(29.587217,-15.655736),(29.608559,-15.658422999999999),(29.62799,-15.663591),(29.648505,-15.666588),(29.672793,-15.663281),(29.73005,-15.644677),(29.773252,-15.638062),(29.814283,-15.619666),(29.837331,-15.614808),(29.881773,-15.618839),(29.967504,-15.641473),(30.010654,-15.646227)]),6) ``` @@ -712,14 +679,10 @@ SELECT round(polygonPerimeterSpherical([(30.010654, -15.646227), (30.050238, -15 0.45539 ``` - - ### 输入参数 {#input-parameters-15} ### 返回值 {#returned-value-22} - - ## polygonsIntersectionCartesian {#polygonsintersectioncartesian} 计算多边形之间的交集。 @@ -742,7 +705,6 @@ Polygons MultiPolygon - ## polygonAreaCartesian {#polygonareacartesian} 计算多边形的面积 @@ -765,7 +727,6 @@ Polygon Float64 - ## polygonPerimeterCartesian {#polygonperimetercartesian} 计算多边形的周长。 @@ -788,7 +749,6 @@ Polygon Float64 - ## polygonsUnionCartesian {#polygonsunioncartesian} 计算多边形的并集。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md index f23ec4d089d..86acf6ea36e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/geo/s2.md @@ -6,20 +6,14 @@ description: '用于处理 S2 索引的函数文档' doc_type: 'reference' --- - - # 用于处理 S2 索引的函数 {#functions-for-working-with-s2-index} - - ## S2Index {#s2index} [S2](https://s2geometry.io/) 是一种地理索引系统,它将所有地理数据表示在一个球面上(类似于地球仪)。 在 S2 库中,点由 S2 索引来表示——这是一个特定的数值,用于在内部编码单位球面上的一个点,而不是采用传统的(纬度,经度)对。要获取以(纬度,经度)格式给出的点对应的 S2 点索引,请使用 [geoToS2](#geotos2) 函数。此外,你也可以使用 [s2ToGeo](#s2togeo) 函数来获取与给定 S2 点索引相对应的地理坐标。 - - ## geoToS2 {#geotos2} 返回与给定坐标 `(longitude, latitude)` 对应的 [S2](#s2index) 点索引值。 @@ -55,7 +49,6 @@ SELECT geoToS2(37.79506683, 55.71290588) AS s2Index; └─────────────────────┘ ``` - ## s2ToGeo {#s2togeo} 返回与提供的 [S2](#s2index) 点索引相对应的地理坐标 `(longitude, latitude)`(经度、纬度)。 @@ -92,7 +85,6 @@ SELECT s2ToGeo(4704772434919038107) AS s2Coodrinates; └──────────────────────────────────────┘ ``` - ## s2GetNeighbors {#s2getneighbors} 返回与给定的 [S2](#s2index) 对应的 S2 邻居索引。S2 系统中的每个单元都是由四条测地线围成的四边形,因此,每个单元有 4 个邻居。 @@ -127,7 +119,6 @@ SELECT s2GetNeighbors(5074766849661468672) AS s2Neighbors; └───────────────────────────────────────────────────────────────────────────────────┘ ``` - ## s2CellsIntersect {#s2cellsintersect} 判断提供的两个 [S2](#s2index) 单元是否相交。 @@ -163,7 +154,6 @@ SELECT s2CellsIntersect(9926595209846587392, 9926594385212866560) AS intersect; └───────────┘ ``` - ## s2CapContains {#s2capcontains} 用于判断一个球冠是否包含某个 S2 点。球冠表示由平面截取得到的球面的一部分。它由球面上的一个点以及以度数表示的半径定义。 @@ -201,7 +191,6 @@ SELECT s2CapContains(1157339245694594829, 1.0, 1157347770437378819) AS capContai └─────────────┘ ``` - ## s2CapUnion {#s2capunion} 确定能包含给定两个输入 cap 的最小 cap。cap 表示被平面截下的球面部分。它由球面上的一个点和以度数表示的半径来定义。 @@ -238,7 +227,6 @@ SELECT s2CapUnion(3814912406305146967, 1.0, 1157347770437378819, 1.0) AS capUnio └────────────────────────────────────────┘ ``` - ## s2RectAdd {#s2rectadd} 增加边界矩形的范围,使其包含给定的 S2 点。在 S2 系统中,矩形由一种名为 `S2LatLngRect` 的 `S2Region` 类型表示,用于表示纬度-经度空间中的矩形。 @@ -276,7 +264,6 @@ SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) └───────────────────────────────────────────┘ ``` - ## s2RectContains {#s2rectcontains} 判断给定矩形是否包含某个 S2 点。在 S2 系统中,矩形由一种名为 `S2LatLngRect` 的 S2Region 类型表示,用于表示纬度-经度空间中的矩形区域。 @@ -314,7 +301,6 @@ SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187 └──────────────┘ ``` - ## s2RectUnion {#s2rectunion} 返回包含当前矩形与给定矩形并集的最小矩形。在 S2 系统中,矩形由一种名为 `S2LatLngRect` 的 `S2Region` 类型表示,用于表示纬度-经度空间中的矩形。 @@ -351,7 +337,6 @@ SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815 └───────────────────────────────────────────┘ ``` - ## s2RectIntersection {#s2rectintersection} 返回包含当前矩形与给定矩形交集的最小矩形。在 S2 系统中,矩形由一种名为 `S2LatLngRect` 的 S2Region 类型表示,用于表示纬度-经度空间中的矩形。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md index c5cfa9c94eb..fe1a632eab0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/in-functions.md @@ -6,12 +6,8 @@ title: '用于实现 IN 运算符的函数' doc_type: 'reference' --- - - # 用于实现 IN 运算符的函数 {#functions-for-implementing-the-in-operator} - - ## in, notIn, globalIn, globalNotIn {#in-notin-globalin-globalnotin} 请参阅 [IN 运算符](/sql-reference/operators/in) 部分。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md index 378e66e10df..853ca0da0a8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/machine-learning-functions.md @@ -6,30 +6,20 @@ title: '机器学习函数' doc_type: 'reference' --- - - # 机器学习函数 {#machine-learning-functions} - - ## evalMLMethod {#evalmlmethod} 使用已训练好的回归模型进行预测时,请使用 `evalMLMethod` 函数。相关内容请参阅 `linearRegression` 中的链接。 - - ## stochasticLinearRegression {#stochasticlinearregression} [stochasticLinearRegression](/sql-reference/aggregate-functions/reference/stochasticlinearregression) 聚合函数使用线性模型和 MSE 损失函数实现随机梯度下降算法。使用 `evalMLMethod` 基于新数据进行预测。 - - ## stochasticLogisticRegression {#stochasticlogisticregression} [stochasticLogisticRegression](/sql-reference/aggregate-functions/reference/stochasticlogisticregression) 聚合函数实现了用于二分类问题的随机梯度下降算法。使用 `evalMLMethod` 对新数据进行预测。 - - ## naiveBayesClassifier {#naivebayesclassifier} 使用基于 n-gram 和拉普拉斯平滑的朴素贝叶斯模型对输入文本进行分类。在使用之前,必须先在 ClickHouse 中完成模型配置。 @@ -130,7 +120,6 @@ SELECT naiveBayesClassifier('language', 'How are you?'); **模型训练指南** - **文件格式** 在人类可读格式下,对于 `n=1` 且为 `token` 模式,模型可能如下所示: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md index 117d9a13fff..6600bd63836 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/numeric-indexed-vector-functions.md @@ -6,14 +6,10 @@ title: 'NumericIndexedVector 函数' doc_type: 'reference' --- - - # NumericIndexedVector {#numericindexedvector} NumericIndexedVector 是一种抽象数据结构,用于封装向量并实现向量聚合和逐元素运算。其存储方式为位切片索引(Bit-Sliced Index)。关于其理论基础和使用场景,请参考论文 [Large-Scale Metric Computation in Online Controlled Experiment Platform](https://arxiv.org/pdf/2405.08411)。 - - ## BSI {#bit-sliced-index} 在 BSI(Bit-Sliced Index,比特切片索引)存储方式中,数据首先以 [Bit-Sliced Index](https://dl.acm.org/doi/abs/10.1145/253260.253268) 形式存储,然后再使用 [Roaring Bitmap](https://github.com/RoaringBitmap/RoaringBitmap) 进行压缩。聚合运算和逐点运算直接在压缩数据上进行,这可以显著提升存储和查询效率。 @@ -27,8 +23,6 @@ NumericIndexedVector 是一种抽象数据结构,用于封装向量并实现 - Bit-Sliced Index 机制会将值转换为二进制。对于浮点类型,转换采用定点数表示,这可能会带来精度损失。可以通过自定义小数部分所使用的比特数来调整精度,默认是 24 位,这在大多数场景下已经足够。在使用带有 `-State` 的聚合函数 groupNumericIndexedVector 构造 NumericIndexedVector 时,可以自定义整数位和小数位的位数。 - 索引有三种情况:非零值、零值和不存在。在 NumericIndexedVector 中,仅存储非零值和零值。此外,在两个 NumericIndexedVector 之间进行逐点运算时,不存在的索引值会被视为 0。在除法场景中,当除数为零时,结果为零。 - - ## 创建 numericIndexedVector 对象 {#create-numeric-indexed-vector-object} 有两种方式可以创建这种结构:一种是使用带有 `-State` 的聚合函数 `groupNumericIndexedVector`。 @@ -37,8 +31,6 @@ NumericIndexedVector 是一种抽象数据结构,用于封装向量并实现 另一种方式是使用 `numericIndexedVectorBuild` 从一个 map 构建该结构。 `groupNumericIndexedVectorState` 函数允许通过参数自定义整数位和小数位的位数,而 `numericIndexedVectorBuild` 则不支持这一点。 - - ## groupNumericIndexedVector {#group-numeric-indexed-vector} 从两个数据列构造一个 NumericIndexedVector,并以 `Float64` 类型返回所有值的和。如果添加后缀 `State`,则返回一个 NumericIndexedVector 对象。 @@ -107,7 +99,6 @@ SELECT groupNumericIndexedVectorStateIf('BSI', 32, 0)(UserID, PlayTime, day = '2 更多详细信息请参见 https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## numericIndexedVectorAllValueSum {#numericIndexedVectorAllValueSum} @@ -144,7 +135,6 @@ SELECT numericIndexedVectorAllValueSum(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorBuild {#numericIndexedVectorBuild} 引入版本:v25.7 @@ -179,7 +169,6 @@ SELECT numericIndexedVectorBuild(mapFromArrays([1, 2, 3], [10, 20, 30])) AS res, └─────┴────────────────────────────────────────────────────────────┘ ``` - ## numericIndexedVectorCardinality {#numericIndexedVectorCardinality} 首次引入于:v25.7 @@ -214,7 +203,6 @@ SELECT numericIndexedVectorCardinality(numericIndexedVectorBuild(mapFromArrays([ └─────┘ ``` - ## numericIndexedVectorGetValue {#numericIndexedVectorGetValue} 自 v25.7 版本引入 @@ -250,7 +238,6 @@ SELECT numericIndexedVectorGetValue(numericIndexedVectorBuild(mapFromArrays([1, └─────┘ ``` - ## numericIndexedVectorPointwiseAdd {#numericIndexedVectorPointwiseAdd} 引入版本:v25.7 @@ -291,7 +278,6 @@ SELECT └───────────────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseDivide {#numericIndexedVectorPointwiseDivide} 引入于:v25.7 @@ -332,7 +318,6 @@ SELECT └─────────────┴─────────────────┘ ``` - ## numericIndexedVectorPointwiseEqual {#numericIndexedVectorPointwiseEqual} 引入于:v25.7 @@ -374,7 +359,6 @@ SELECT └───────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreater {#numericIndexedVectorPointwiseGreater} 引入版本:v25.7 @@ -416,7 +400,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseGreaterEqual {#numericIndexedVectorPointwiseGreaterEqual} 引入版本:v25.7 @@ -458,7 +441,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseLess {#numericIndexedVectorPointwiseLess} 引入于:v25.7 @@ -500,7 +482,6 @@ SELECT └───────────┴───────┘ ``` - ## numericIndexedVectorPointwiseLessEqual {#numericIndexedVectorPointwiseLessEqual} 引入版本:v25.7 @@ -542,7 +523,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseMultiply {#numericIndexedVectorPointwiseMultiply} 在 v25.7 中引入 @@ -583,7 +563,6 @@ SELECT └───────────────┴──────────────────┘ ``` - ## numericIndexedVectorPointwiseNotEqual {#numericIndexedVectorPointwiseNotEqual} 引入版本:v25.7 @@ -625,7 +604,6 @@ SELECT └───────────────┴───────────┘ ``` - ## numericIndexedVectorPointwiseSubtract {#numericIndexedVectorPointwiseSubtract} 自 v25.7 起引入 @@ -666,7 +644,6 @@ SELECT └────────────────────────┴─────────────────┘ ``` - ## numericIndexedVectorShortDebugString {#numericIndexedVectorShortDebugString} 自 v25.7 引入 @@ -702,7 +679,6 @@ SELECT numericIndexedVectorShortDebugString(numericIndexedVectorBuild(mapFromArr res: {"vector_type":"BSI","index_type":"char8_t","value_type":"char8_t","integer_bit_num":8,"fraction_bit_num":0,"zero_indexes_info":{"cardinality":"0"},"non_zero_indexes_info":{"total_cardinality":"3","all_value_sum":60,"number_of_bitmaps":"8","bitmap_info":{"cardinality":{"0":"0","1":"2","2":"2","3":"2","4":"2","5":"0","6":"0","7":"0"}}}} ``` - ## numericIndexedVectorToMap {#numericIndexedVectorToMap} 自 v25.7 引入 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md index f62d9042814..f781df82532 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/overview.md @@ -7,8 +7,6 @@ title: '常规函数' doc_type: 'reference' --- - - # 常规函数 {#regular-functions} 函数至少有\*两种类型 —— 常规函数(通常就直接称为“函数”)和聚合函数。这是完全不同的概念。常规函数的工作方式就像是对每一行单独应用(对于每一行,函数的结果不依赖于其他行)。聚合函数则会从多行中累积一组值(即它们依赖于整组行)。 @@ -19,26 +17,18 @@ doc_type: 'reference' 还有第三类函数,其中 ['arrayJoin' 函数](../functions/array-join.md)就属于该类。另外,[表函数](../table-functions/index.md)也可以单独归为一类。 ::: - - ## 强类型 {#strong-typing} 与标准 SQL 不同,ClickHouse 使用强类型。换句话说,它不会在类型之间进行隐式转换。每个函数仅适用于特定的一组类型。因此,有时需要使用类型转换函数。 - - ## 公共子表达式消除 {#common-subexpression-elimination} 查询中所有具有相同 AST(相同的抽象语法树结构或相同的语法解析结果)的表达式都被视为具有相同的值。此类表达式会被合并后只执行一次。相同的子查询也会以这种方式被消除。 - - ## 结果类型 {#types-of-results} 所有函数都返回单个值作为结果(既不会返回多个值,也不会不返回值)。结果类型通常仅由参数类型决定,而不是由参数值决定。例外是 `tupleElement` 函数(`a.N` 运算符)和 `toFixedString` 函数。 - - ## 常量 {#constants} 为简化处理,某些函数在部分参数上只能使用常量。例如,LIKE 运算符的右侧参数必须是常量。 @@ -48,8 +38,6 @@ doc_type: 'reference' 对于常量参数和非常量参数,函数可以采用不同的实现方式(执行不同的代码)。但是,对于一个常量和一个仅包含相同值的实际列,它们的计算结果应当一致。 - - ## NULL 处理 {#null-processing} 函数具有以下行为: @@ -57,14 +45,10 @@ doc_type: 'reference' - 如果函数的至少一个参数为 `NULL`,则函数结果也为 `NULL`。 - 某些函数具有在各自描述中单独说明的特殊行为。在 ClickHouse 源代码中,这些函数将 `UseDefaultImplementationForNulls` 设为 `false`。 - - ## 不变性 {#constancy} 函数不能更改其参数的值——任何修改都会通过返回结果体现出来。因此,单独计算各个函数的结果与这些函数在查询中的书写顺序无关。 - - ## 高阶函数 {#higher-order-functions} ### `->` 运算符和 lambda(params, expr) 函数 {#arrow-operator-and-lambda} @@ -82,7 +66,6 @@ str -> str != Referer 对于某些函数,可以省略第一个参数(lambda 函数)。在这种情况下,默认认为执行的是恒等映射。 - ## 用户自定义函数(UDF) {#user-defined-functions-udfs} ClickHouse 支持用户自定义函数(UDF)。请参阅[用户自定义函数(UDF)](../functions/udf.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md index 202d24fe20e..752f2f86ea8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/time-window-functions.md @@ -10,7 +10,6 @@ keywords: ['时间窗口'] import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # 时间窗口函数 {#time-window-functions} @@ -26,7 +25,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; 参见:https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## hop {#hop} @@ -66,7 +64,6 @@ SELECT hop(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) ('2024-07-03 00:00:00','2024-07-05 00:00:00') ``` - ## hopEnd {#hopEnd} 引入版本:v22.1 @@ -104,7 +101,6 @@ SELECT hopEnd(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-05 00:00:00 ``` - ## hopStart {#hopStart} 引入版本:v22.1 @@ -142,7 +138,6 @@ SELECT hopStart(now(), INTERVAL '1' DAY, INTERVAL '2' DAY) 2024-07-03 00:00:00 ``` - ## tumble {#tumble} 引入自:v21.12 @@ -177,7 +172,6 @@ SELECT tumble(now(), toIntervalDay('1')) ('2024-07-04 00:00:00','2024-07-05 00:00:00') ``` - ## tumbleEnd {#tumbleEnd} 引入于:v22.1 @@ -212,7 +206,6 @@ SELECT tumbleEnd(now(), toIntervalDay('1')) 2024-07-05 00:00:00 ``` - ## tumbleStart {#tumbleStart} 引入于:v22.1 @@ -249,7 +242,6 @@ SELECT tumbleStart(now(), toIntervalDay('1')) {/* 自动生成结束 */ } - ## 相关内容 {#related-content} - [时间序列用例指南](/use-cases/time-series) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md index 02408b34a53..f8ab53d9ace 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-functions.md @@ -16,7 +16,6 @@ doc_type: 'reference' 参见:https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## flattenTuple {#flattenTuple} @@ -57,7 +56,6 @@ SELECT flattenTuple(t) FROM tab; └────────────────┘ ``` - ## tuple {#tuple} 引入版本:v @@ -92,7 +90,6 @@ SELECT tuple(1, 2) (1,2) ``` - ## tupleConcat {#tupleConcat} 自 v23.8 起引入 @@ -125,7 +122,6 @@ SELECT tupleConcat((1, 2), ('a',), (true, false)) (1, 2, 'a', true, false) ``` - ## tupleDivide {#tupleDivide} 引入版本:v21.11 @@ -163,7 +159,6 @@ SELECT tupleDivide((1, 2), (2, 3)) (0.5, 0.6666666666666666) ``` - ## tupleDivideByNumber {#tupleDivideByNumber} 自 v21.11 引入 @@ -201,7 +196,6 @@ SELECT tupleDivideByNumber((1, 2), 0.5) (2, 4) ``` - ## tupleElement {#tupleElement} 引入版本:v1.1 @@ -277,7 +271,6 @@ SELECT (1, 'hello').2 你好 ``` - ## tupleHammingDistance {#tupleHammingDistance} 引入于:v21.1 @@ -341,7 +334,6 @@ SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseIn 2 ``` - ## tupleIntDiv {#tupleIntDiv} 引入版本:v23.8 @@ -387,7 +379,6 @@ SELECT tupleIntDiv((15, 10, 5), (5.5, 5.5, 5.5)) (2, 1, 0) ``` - ## tupleIntDivByNumber {#tupleIntDivByNumber} 引入版本:v23.8 @@ -433,7 +424,6 @@ SELECT tupleIntDivByNumber((15.2, 10.7, 5.5), 5.8) (2, 1, 0) ``` - ## tupleIntDivOrZero {#tupleIntDivOrZero} 引入版本:v23.8 @@ -469,7 +459,6 @@ SELECT tupleIntDivOrZero((5, 10, 15), (0, 0, 0)) (0, 0, 0) ``` - ## tupleIntDivOrZeroByNumber {#tupleIntDivOrZeroByNumber} 首次引入于:v23.8 @@ -515,7 +504,6 @@ SELECT tupleIntDivOrZeroByNumber((15, 10, 5), 0) (0, 0, 0) ``` - ## tupleMinus {#tupleMinus} 自 v21.11 版本引入 @@ -551,7 +539,6 @@ SELECT tupleMinus((1, 2), (2, 3)) (-1, -1) ``` - ## tupleModulo {#tupleModulo} 引入自:v23.8 @@ -585,7 +572,6 @@ SELECT tupleModulo((15, 10, 5), (5, 3, 2)) (0, 1, 1) ``` - ## tupleModuloByNumber {#tupleModuloByNumber} 自 v23.8 起引入 @@ -619,7 +605,6 @@ SELECT tupleModuloByNumber((15, 10, 5), 2) (1, 0, 1) ``` - ## tupleMultiply {#tupleMultiply} 自 v21.11 引入 @@ -653,7 +638,6 @@ SELECT tupleMultiply((1, 2), (2, 3)) (2, 6) ``` - ## tupleMultiplyByNumber {#tupleMultiplyByNumber} 自 v21.11 起提供 @@ -687,7 +671,6 @@ SELECT tupleMultiplyByNumber((1, 2), -2.1) (-2.1, -4.2) ``` - ## tupleNames {#tupleNames} 引入版本:v @@ -717,7 +700,6 @@ SELECT tupleNames(tuple(1 as a, 2 as b)) ['a','b'] ``` - ## tupleNegate {#tupleNegate} 引入版本:v21.11 @@ -750,7 +732,6 @@ SELECT tupleNegate((1, 2)) (-1, -2) ``` - ## tuplePlus {#tuplePlus} 自 v21.11 引入 @@ -786,7 +767,6 @@ SELECT tuplePlus((1, 2), (2, 3)) (3, 5) ``` - ## tupleToNameValuePairs {#tupleToNameValuePairs} 引入于:v21.9 @@ -833,7 +813,6 @@ SELECT tupleToNameValuePairs(tuple(3, 2, 1)) {/*AUTOGENERATED_END*/ } - ## untuple {#untuple} 在调用处对 [tuple](/sql-reference/data-types/tuple) 元素执行语法替换。 @@ -910,7 +889,6 @@ SELECT untuple((* EXCEPT (v2, v3),)) FROM kv; └─────┴────┴────┴────┴───────────┘ ``` - ## 距离函数 {#distance-functions} 所有受支持的函数都在[距离函数文档](../../sql-reference/functions/distance-functions.md)中进行了说明。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md index 70e0d104913..32392883d21 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/tuple-map-functions.md @@ -3,12 +3,12 @@ description: 'Tuple Map 函数文档' sidebar_label: 'Map' slug: /sql-reference/functions/tuple-map-functions title: 'Map 函数' -doc_type: 'reference' +doc_type: '参考' --- ## map {#map} -根据键值对创建一个 [Map(key, value)](../data-types/map.md) 类型的值。 +从键值对构造一个类型为 [Map(key, value)](../data-types/map.md) 的值。 **语法** @@ -18,12 +18,12 @@ map(key1, value1[, key2, value2, ...]) **参数** -* `key_n` — 映射项的键。可以是任意受支持的 [Map](../data-types/map.md) 键类型。 -* `value_n` — 映射项的值。可以是任意受支持的 [Map](../data-types/map.md) 值类型。 +* `key_n` — map 条目的键。任意 Map 支持的键类型。([Map](../data-types/map.md)) +* `value_n` — map 条目的值。任意 Map 支持的值类型。([Map](../data-types/map.md)) **返回值** -* 一个包含 `key:value` 键值对的 Map。[Map(key, value)](../data-types/map.md)。 +* 一个包含 `key:value` 键值对的 map。[Map(key, value)](../data-types/map.md)。 **示例** @@ -45,15 +45,15 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); ## mapFromArrays {#mapfromarrays} -从键数组或 Map 和值数组或 Map 创建一个 Map。 +从键的数组或 map 与值的数组或 map 构造一个 map。 -该函数是语法 `CAST([...], 'Map(key_type, value_type)')` 的一种便捷替代方式。 -例如,可以不写: +该函数是语法 `CAST([...], 'Map(key_type, value_type)')` 的便捷替代方案。 +例如,与其编写 * `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')`,或 * `CAST([('aa',4), ('bb',5)], 'Map(String, UInt32)')` -而写成 `mapFromArrays(['aa', 'bb'], [4, 5])`。 +可以写作 `mapFromArrays(['aa', 'bb'], [4, 5])`。 **语法** @@ -61,16 +61,16 @@ SELECT map('key1', number, 'key2', number * 2) FROM numbers(3); mapFromArrays(keys, values) ``` -别名: `MAP_FROM_ARRAYS(keys, values)` +别名:`MAP_FROM_ARRAYS(keys, values)` **参数** -* `keys` — 用于构建 map 的键的数组或 map,类型为 [Array](../data-types/array.md) 或 [Map](../data-types/map.md)。如果 `keys` 是数组,可接受其类型为 `Array(Nullable(T))` 或 `Array(LowCardinality(Nullable(T)))`,前提是其中不包含 NULL 值。 -* `values` — 用于构建 map 的值的数组或 map,类型为 [Array](../data-types/array.md) 或 [Map](../data-types/map.md)。 +* `keys` — 用于创建 map 的键的数组或 map,类型为 [Array](../data-types/array.md) 或 [Map](../data-types/map.md)。如果 `keys` 是数组,我们接受其类型为 `Array(Nullable(T))` 或 `Array(LowCardinality(Nullable(T)))`,只要其中不包含 NULL 值即可。 +* `values` - 用于创建 map 的值的数组或 map,类型为 [Array](../data-types/array.md) 或 [Map](../data-types/map.md)。 **返回值** -* 一个 map,其键和值由键数组和值数组/map 构造而成。 +* 一个 map,其中的键和值由键数组和值数组或 map 构造而成。 **示例** @@ -88,7 +88,7 @@ SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) └───────────────────────────────────────────┘ ``` -`mapFromArrays` 也接受类型为 [Map](../data-types/map.md) 的参数。这些参数在执行时会被转换为元组数组。 +`mapFromArrays` 也接受类型为 [Map](../data-types/map.md) 的参数。在执行期间,这些参数会被转换为元组数组。 ```sql SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) @@ -116,11 +116,11 @@ SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3]) ## extractKeyValuePairs {#extractkeyvaluepairs} -将由键值对组成的字符串转换为 [Map(String, String)](../data-types/map.md)。 -解析过程能够容忍噪声(例如日志文件中的多余内容)。 +将一个由键值对组成的字符串转换为 [Map(String, String)](../data-types/map.md)。 +解析过程对噪声具有一定的容错能力(例如日志文件)。 输入字符串中的键值对由一个键、紧随其后的键值分隔符以及一个值组成。 -键值对之间由键值对分隔符进行分隔。 -键和值可以使用引号包裹。 +各个键值对之间由键值对分隔符分隔。 +键和值都可以带引号。 **语法** @@ -136,21 +136,21 @@ extractKeyValuePairs(data[, key_value_delimiter[, pair_delimiter[, quoting_chara **参数** * `data` - 要从中提取键值对的字符串。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 -* `key_value_delimiter` - 分隔键和值的单个字符。默认为 `:`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 -* `pair_delimiters` - 分隔各个键值对的字符集合。默认是 ` `、`,` 和 `;`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 -* `quoting_character` - 用作引用符号的单个字符。默认为 `"`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 -* `unexpected_quoting_character_strategy` - 在 `read_key` 和 `read_value` 阶段处理出现在非预期位置的引用符号的策略。可选值:`invalid`、`accept` 和 `promote`。`invalid` 会丢弃键/值并切换回 `WAITING_KEY` 状态。`accept` 会将其视为普通字符。`promote` 会切换到 `READ_QUOTED_{KEY/VALUE}` 状态并从下一个字符开始。 +* `key_value_delimiter` - 用于分隔键和值的单个字符。默认值为 `:`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 +* `pair_delimiters` - 用于分隔各个键值对的字符集合。默认值为 ` `、`,` 和 `;`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 +* `quoting_character` - 用作引用字符的单个字符。默认值为 `"`。[String](../data-types/string.md) 或 [FixedString](../data-types/fixedstring.md)。 +* `unexpected_quoting_character_strategy` - 在 `read_key` 和 `read_value` 阶段处理在意外位置出现的引用字符的策略。可选值:"invalid"、"accept" 和 "promote"。invalid 将丢弃键/值并回到 `WAITING_KEY` 状态;accept 将把它当作普通字符处理;promote 将切换到 `READ_QUOTED_{KEY/VALUE}` 状态,并从下一个字符开始。 **返回值** -* 一个键值对的 `Map`。类型:[Map(String, String)](../data-types/map.md) +* 键值对的 Map。类型:[Map(String, String)](../data-types/map.md) **示例** 查询 ```sql -SELECT extractKeyValuePairs('姓名:neymar, 年龄:31 球队:psg,国籍:brazil') AS kv +SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') AS kv ``` 结果: @@ -161,10 +161,10 @@ SELECT extractKeyValuePairs('姓名:neymar, 年龄:31 球队:psg,国籍:brazil') └─────────────────────────────────────────────────────────────────────────┘ ``` -使用单引号 `'` 作为引号字符: +使用单引号 `'` 作为引号: ```sql -SELECT extractKeyValuePairs('姓名:\'neymar\';\'年龄\':31;球队:psg;国籍:brazil,最后键:最后值', ':', ';,', '\'') AS kv +SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') AS kv ``` 结果: @@ -175,7 +175,7 @@ SELECT extractKeyValuePairs('姓名:\'neymar\';\'年龄\':31;球队:psg;国籍:b └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -unexpected_quoting_character_strategy 示例: +unexpected_quoting_character_strategy 示例: unexpected_quoting_character_strategy=invalid @@ -243,7 +243,7 @@ SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') AS kv; └──────────────┘ ``` -在不支持转义序列的场景下使用转义序列: +在不支持转义序列的环境中的转义序列: ```sql SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv @@ -257,7 +257,7 @@ SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv └────────────────────────┘ ``` -要还原经 `toString` 序列化的字符串 map 键值对: +要还原使用 `toString` 序列化的、以字符串为键的 map 键值对: ```sql SELECT @@ -270,7 +270,7 @@ FORMAT Vertical; 结果: ```response -行 1: +第 1 行: ────── m: {'John':'33','Paula':'31'} map_serialized: {'John':'33','Paula':'31'} @@ -281,20 +281,20 @@ map_restored: {'John':'33','Paula':'31'} 与 `extractKeyValuePairs` 相同,但支持转义。 -支持的转义序列包括:`\x`、`\N`、`\a`、`\b`、`\e`、`\f`、`\n`、`\r`、`\t`、`\v` 和 `\0`。 -非标准转义序列会原样返回(包括反斜杠),除非它们是以下之一: -`\\`、`'`、`"`、反引号 `backtick`、`/`、`=` 或 ASCII 控制字符(c <= 31)。 +支持的转义序列:`\x`、`\N`、`\a`、`\b`、`\e`、`\f`、`\n`、`\r`、`\t`、`\v` 和 `\0`。 +非标准转义序列会原样返回(包括反斜杠),除非它们是下列之一: +`\\`、`'`、`"`、`backtick`、`/`、`=` 或 ASCII 控制字符(c <= 31)。 -此函数适用于预转义和后转义都不合适的用例。例如,考虑如下输入字符串:`a: "aaaa\"bbb"`。期望的输出是:`a: aaaa\"bbbb`。 +当预转义和后转义都不适用时,可以使用此函数。例如,考虑如下输入字符串:`a: "aaaa\"bbb"`。期望输出为:`a: aaaa\"bbbb`。 -* 预转义:对其进行预转义处理后将输出:`a: "aaaa"bbb"`,然后 `extractKeyValuePairs` 会输出:`a: aaaa` -* 后转义:`extractKeyValuePairs` 会输出 `a: aaaa\`,而后转义会保持其原样。 +* 预转义:预转义之后的输出为:`a: "aaaa"bbb"`,然后 `extractKeyValuePairs` 会输出:`a: aaaa` +* 后转义:`extractKeyValuePairs` 会输出 `a: aaaa\`,而后转义会保持其不变。 -键中的前导转义序列会被跳过,而在值中则视为无效。 +在 key 中,前导转义序列会被忽略;在 value 中,它们将被视为无效。 **示例** -在启用转义序列支持时的转义序列行为: +启用转义序列支持时的转义序列示例: ```sql SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv @@ -320,11 +320,11 @@ mapAdd(arg1, arg2 [, ...]) **参数** -参数是由两个[数组](/sql-reference/data-types/array)组成的[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),第一个数组中的元素表示键,第二个数组包含每个键对应的值。所有键数组的类型必须相同,所有值数组中的元素必须能够统一提升为同一种类型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges) 或 [Float64](/sql-reference/data-types/float))。该统一提升后的公共类型将作为结果数组的类型。 +参数是由两个[数组](/sql-reference/data-types/array)组成的[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),其中第一个数组中的元素表示键,第二个数组中包含每个键对应的值。所有键数组的类型必须相同,且所有值数组应包含可以统一提升为同一类型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges) 或 [Float64](/sql-reference/data-types/float))的元素。该统一提升后的类型将作为结果数组的元素类型。 **返回值** -* 根据传入的参数返回一个[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),其中第一个数组包含排好序的键,第二个数组包含对应的值。 +* 根据参数返回一个[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),其中第一个数组包含排序后的键,第二个数组包含对应的值。 **示例** @@ -342,7 +342,7 @@ SELECT mapAdd(map(1,1), map(1,1)); └──────────────────────────────┘ ``` -使用元组查询: +使用元组的查询: ```sql SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) AS res, toTypeName(res) AS type; @@ -358,7 +358,7 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) AS res, toTy ## mapSubtract {#mapsubtract} -收集所有键,并对相应的值执行减法运算。 +收集所有键并将对应的值相减。 **语法** @@ -368,11 +368,11 @@ mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) **参数** -参数是两个[数组](/sql-reference/data-types/array)组成的[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),第一个数组中的元素表示键,第二个数组包含每个键对应的值。所有键数组应具有相同的类型,所有值数组中的元素应可以提升为同一种类型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges) 或 [Float64](/sql-reference/data-types/float))。这一公共提升类型会被用作结果数组元素的类型。 +参数为由两个[数组](/sql-reference/data-types/array)组成的[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),第一个数组中的元素表示键,第二个数组中包含每个键对应的值。所有键数组必须具有相同的类型,所有值数组中的元素必须都能提升为同一种类型([Int64](/sql-reference/data-types/int-uint#integer-ranges)、[UInt64](/sql-reference/data-types/int-uint#integer-ranges) 或 [Float64](/sql-reference/data-types/float))。该统一提升后的类型将作为结果数组的类型。 **返回值** -* 根据参数,返回一个[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),其中第一个数组包含排序后的键,第二个数组包含对应的值。 +* 根据参数返回一个[map](../data-types/map.md)或[tuple](/sql-reference/data-types/tuple),其中第一个数组包含排序后的键,第二个数组包含对应的值。 **示例** @@ -390,7 +390,7 @@ SELECT mapSubtract(map(1,1), map(1,1)); └───────────────────────────────────┘ ``` -使用元组映射进行查询: +包含元组映射的查询: ```sql SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) AS res, toTypeName(res) AS type; @@ -406,38 +406,38 @@ SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt3 ## mapPopulateSeries {#mappopulateseries} -为具有整数键的 map 填充缺失的键值对。 -为了支持将键扩展到超过当前最大值的情况,可以指定一个最大键。 -更具体地说,该函数返回一个 map,其中键从最小键到最大键(或指定的 `max` 参数)形成一个步长为 1 的序列,并具有相应的值。 -如果某个键未指定值,则使用默认值作为其对应的值。 -如果键重复出现,则仅将第一个值(按出现顺序)与该键关联。 +使用整数键为 `map` 中缺失的键值对填充数据。 +为了支持将键扩展到当前最大值之外的范围,可以指定一个最大键。 +更具体地说,该函数返回一个 `map`,其中键从最小键到最大键(或指定的 `max` 参数)构成步长为 1 的序列,并带有相应的值。 +如果某个键没有指定值,则使用默认值作为对应的值。 +如果键有重复,只会将第一个值(按照出现顺序)与该键关联。 -**语法** +**Syntax** ```sql mapPopulateSeries(map[, max]) mapPopulateSeries(keys, values[, max]) ``` -对于数组参数,每一行中 `keys` 和 `values` 的元素数量必须相同。 +对于数组参数,`keys` 和 `values` 中的元素数量在每一行中必须相同。 **参数** -参数可以是 [Map](../data-types/map.md),也可以是两个 [Array](/sql-reference/data-types/array),其中第一个数组包含键,第二个数组包含每个键对应的值。 +参数可以是 [Map](../data-types/map.md) 或两个 [Array](/sql-reference/data-types/array),其中第一个数组为键,第二个数组为对应的值。 映射数组: * `map` — 具有整数键的 Map。[Map](../data-types/map.md)。 -或 +或者 * `keys` — 键的数组。[Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 * `values` — 值的数组。[Array](/sql-reference/data-types/array)([Int](/sql-reference/data-types/int-uint#integer-ranges))。 -* `max` — 最大键值,可选。[Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges)。 +* `max` — 最大键值。可选。[Int8, Int16, Int32, Int64, Int128, Int256](/sql-reference/data-types/int-uint#integer-ranges)。 **返回值** -* 根据参数,返回一个 [Map](../data-types/map.md),或者一个由两个 [Array](/sql-reference/data-types/array) 组成的 [Tuple](/sql-reference/data-types/tuple):按排序顺序排列的键,以及与这些键相对应的值。 +* 根据传入的参数,返回一个 [Map](../data-types/map.md) 或一个由两个 [Array](/sql-reference/data-types/array) 组成的 [Tuple](/sql-reference/data-types/tuple):按排序顺序排列的键,以及与这些键对应的值。 **示例** @@ -455,7 +455,7 @@ SELECT mapPopulateSeries(map(1, 10, 5, 20), 6); └─────────────────────────────────────────┘ ``` -对映射数组进行查询: +对映射数组的查询: ```sql SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type; @@ -471,10 +471,10 @@ SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type ## mapKeys {#mapkeys} -返回给定 `Map` 的所有键。 +返回给定 map 的所有键。 -通过启用设置 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) 可以优化该函数。 -启用该设置后,函数只读取 [keys](/sql-reference/data-types/map#reading-subcolumns-of-map) 子列,而不是整个 `Map` 列。 +通过启用 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) 这一设置,可以对该函数进行优化。 +启用该设置后,函数只会读取 [keys](/sql-reference/data-types/map#reading-subcolumns-of-map) 子列,而不是整个 map。 查询 `SELECT mapKeys(m) FROM table` 会被重写为 `SELECT m.keys FROM table`。 **语法** @@ -485,11 +485,11 @@ mapKeys(map) **参数** -* `map` — Map 类型。参见 [Map](../data-types/map.md)。 +* `map` — Map 类型。[Map](../data-types/map.md)。 **返回值** -* 包含 `map` 中所有键的数组。参见 [Array](../data-types/array.md)。 +* 包含 `map` 中所有键的数组。[Array](../data-types/array.md)。 **示例** @@ -514,7 +514,7 @@ SELECT mapKeys(a) FROM tab; ## mapContains {#mapcontains} -判断给定的 `map` 是否包含指定的键。 +返回一个布尔值,用于表示给定的 `map` 中是否包含指定的键。 **语法** @@ -526,12 +526,12 @@ Alias: `mapContainsKey(map, key)` **参数** -* `map` — Map 类型。参见 [Map](../data-types/map.md)。 -* `key` — 键。类型必须与 `map` 的键类型一致。 +* `map` — Map 映射类型。参见 [Map](../data-types/map.md)。 +* `key` — 键。类型必须与 `map` 的键类型匹配。 **返回值** -* 若 `map` 包含 `key`,则返回 `1`,否则返回 `0`。返回类型为 [UInt8](../data-types/int-uint.md)。 +* 如果 `map` 中包含 `key`,则返回 `1`,否则返回 `0`。参见 [UInt8](../data-types/int-uint.md)。 **示例** @@ -566,11 +566,11 @@ mapContainsKeyLike(map, pattern) **参数** * `map` — Map 类型。[Map](../data-types/map.md)。 -* `pattern` - 要匹配的字符串模式。 +* `pattern` - 要匹配的字符串模式。 **返回值** -* 如果 `map` 中存在符合指定模式的键 `key`,则返回 `1`,否则返回 `0`。 +* 如果 `map` 中包含符合指定模式的键,则返回 `1`,否则返回 `0`。 **示例** @@ -595,7 +595,7 @@ SELECT mapContainsKeyLike(a, 'a%') FROM tab; ## mapExtractKeyLike {#mapextractkeylike} -给定一个具有字符串键的 `map` 和一个 `LIKE` 模式,此函数返回一个 `map`,其中仅包含键名匹配该模式的元素。 +给定一个具有字符串键的 `map` 以及一个 LIKE 模式,此函数返回一个仅包含其键匹配该模式元素的 `map`。 **语法** @@ -605,12 +605,12 @@ mapExtractKeyLike(map, pattern) **参数** -* `map` — Map 类型。参见 [Map](../data-types/map.md)。 +* `map` — Map。[Map](../data-types/map.md)。 * `pattern` - 要匹配的字符串模式。 **返回值** -* 一个 map,包含键名匹配指定模式的元素。如果没有元素匹配该模式,则返回空 map。 +* 一个 Map,包含键与指定模式匹配的元素。如果没有元素匹配该模式,则返回空 Map。 **示例** @@ -635,10 +635,10 @@ SELECT mapExtractKeyLike(a, 'a%') FROM tab; ## mapValues {#mapvalues} -返回给定 Map 的所有值。 +返回给定 map 的所有值。 -可以通过启用设置 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) 来优化该函数。 -启用该设置后,函数只会读取 [values](/sql-reference/data-types/map#reading-subcolumns-of-map) 子列,而不是整个 Map。 +通过启用 [optimize_functions_to_subcolumns](/operations/settings/settings#optimize_functions_to_subcolumns) 设置,可以对该函数进行优化。 +启用该设置后,函数只会读取 [values](/sql-reference/data-types/map#reading-subcolumns-of-map) 子列,而不是整个 map。 查询 `SELECT mapValues(m) FROM table` 会被转换为 `SELECT m.values FROM table`。 **语法** @@ -649,11 +649,11 @@ mapValues(map) **参数** -* `map` — Map 类型。参见 [Map](../data-types/map.md)。 +* `map` — Map 类型。[Map](../data-types/map.md)。 **返回值** -* 包含 `map` 中所有值的 Array。参见 [Array](../data-types/array.md)。 +* 包含 `map` 中所有值的数组。[Array](../data-types/array.md)。 **示例** @@ -678,7 +678,7 @@ SELECT mapValues(a) FROM tab; ## mapContainsValue {#mapcontainsvalue} -返回给定键是否存在于给定的 `map` 中。 +返回给定的 `key` 是否存在于给定的 `map` 中。 **语法** @@ -690,12 +690,12 @@ mapContainsValue(map, value) **参数** -* `map` — 映射。参见 [Map](../data-types/map.md)。 -* `value` — 值。类型必须与 `map` 的值类型一致。 +* `map` — Map。[Map](../data-types/map.md)。 +* `value` — 值。类型必须与 `map` 的值类型相匹配。 **返回值** -* 当 `map` 包含 `value` 时返回 `1`,否则返回 `0`。参见 [UInt8](../data-types/int-uint.md)。 +* 若 `map` 包含 `value`,则为 `1`,否则为 `0`。[UInt8](../data-types/int-uint.md)。 **示例** @@ -729,12 +729,12 @@ mapContainsValueLike(map, pattern) **参数** -* `map` — Map 类型。参见 [Map](../data-types/map.md)。 -* `pattern` - 字符串匹配模式。 +* `map` — Map。[Map](../data-types/map.md)。 +* `pattern` - 用于匹配的字符串模式。 **返回值** -* 如果 `map` 中存在符合指定模式的值,则返回 `1`,否则返回 `0`。 +* 如果 `map` 中包含符合指定模式的 `value`,则返回 `1`,否则返回 `0`。 **示例** @@ -759,7 +759,7 @@ SELECT mapContainsValueLike(a, 'a%') FROM tab; ## mapExtractValueLike {#mapextractvaluelike} -给定一个值为字符串的 `map` 和一个 `LIKE` 模式,此函数返回一个 `map`,其中仅包含值与该模式匹配的元素。 +给定一个值为字符串的 map 和一个 LIKE 模式,该函数返回一个仅包含其值匹配该模式的元素的 map。 **语法** @@ -774,7 +774,7 @@ mapExtractValueLike(map, pattern) **返回值** -* 一个包含其值与指定模式匹配的元素的 Map。如果没有元素匹配该模式,则返回空 Map。 +* 一个包含其值匹配指定模式的元素的 Map。如果没有元素匹配该模式,则返回空 Map。 **示例** @@ -799,7 +799,7 @@ SELECT mapExtractValueLike(a, 'a%') FROM tab; ## mapApply {#mapapply} -将函数应用于映射中的每个元素。 +将一个函数应用于 `map` 的每个元素。 **语法** @@ -814,7 +814,7 @@ mapApply(func, map) **返回值** -* 返回一个 Map,该 Map 是对原始 Map 的每个元素应用 `func(map1[i], ..., mapN[i])` 所得到的结果。 +* 返回一个 `map`,通过对原始 `map` 中每个元素应用 `func(map1[i], ..., mapN[i])` 得到。 **示例** @@ -841,7 +841,7 @@ FROM ## mapFilter {#mapfilter} -通过对映射中的每个元素应用函数来对映射进行过滤。 +通过对 map 中的每个元素应用函数来过滤 map。 **语法** @@ -851,12 +851,12 @@ mapFilter(func, map) **参数** -* `func` - [Lambda 函数](/sql-reference/functions/overview#higher-order-functions)。 -* `map` — [Map 类型](../data-types/map.md)。 +* `func` - [Lambda 函数](/sql-reference/functions/overview#higher-order-functions)。 +* `map` — [Map](../data-types/map.md)。 **返回值** -* 返回一个 map,其中仅包含那些在调用 `func(map1[i], ..., mapN[i])` 时返回非 0 值的 `map` 中的元素。 +* 返回一个 map,其中仅包含 `map` 中那些使 `func(map1[i], ..., mapN[i])` 返回非 0 值的元素。 **示例** @@ -896,7 +896,7 @@ mapUpdate(map1, map2) **返回值** -* 返回 `map1`,其值被更新为 `map2` 中对应键的值。 +* 返回 `map1`,其值根据 `map2` 中对应键的值进行了更新。 **示例** @@ -916,10 +916,10 @@ SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map; ## mapConcat {#mapconcat} -根据键是否相等连接多个 `Map`。 -如果多个输入 `Map` 中存在具有相同键的元素,则所有这些元素都会被添加到结果 `Map` 中,但通过运算符 `[]` 只能访问到第一个元素。 +根据键是否相同来合并多个 `map`。 +如果在多个输入 `map` 中存在具有相同键的元素,则所有元素都会被添加到结果 `map` 中,但通过操作符 `[]` 只能访问第一个元素。 -**语法** +**Syntax** ```sql mapConcat(maps) @@ -927,11 +927,11 @@ mapConcat(maps) **参数** -* `maps` – 任意数量的 [Map](../data-types/map.md)。 +* `maps` – 任意数量的 [Maps](../data-types/map.md)。 **返回值** -* 返回一个将所有作为参数传入的 map 拼接在一起后的 map。 +* 返回一个 map,它是对作为参数传入的多个 map 进行连接得到的结果。 **示例** @@ -965,11 +965,11 @@ SELECT mapConcat(map('key1', 1, 'key2', 2), map('key1', 3)) AS map, map['key1']; ## mapExists([func,], map) {#mapexistsfunc-map} -如果在 `map` 中至少存在一对键值对,使得 `func(key, value)` 的返回值不为 0,则返回 1,否则返回 0。 +如果在 `map` 中存在至少一组键值对,使得 `func(key, value)` 的返回值不为 0,则返回 1,否则返回 0。 :::note `mapExists` 是一个[高阶函数](/sql-reference/functions/overview#higher-order-functions)。 -你可以将 lambda 函数作为其第一个参数传入。 +可以将一个 lambda 函数作为第一个参数传入。 ::: **示例** @@ -990,11 +990,11 @@ SELECT mapExists((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapAll([func,] map) {#mapallfunc-map} -如果对 `map` 中的所有键值对,`func(key, value)` 的返回值都不为 0,则返回 1,否则返回 0。 +如果对 `map` 中所有键值对调用 `func(key, value)` 的结果都不为 0,则返回 1,否则返回 0。 :::note -注意,`mapAll` 是一个[高阶函数](/sql-reference/functions/overview#higher-order-functions)。 -你可以将一个 lambda 函数作为其第一个参数传入。 +请注意,`mapAll` 是一个[高阶函数](/sql-reference/functions/overview#higher-order-functions)。 +可以将一个 lambda 函数作为第一个参数传递给它。 ::: **示例** @@ -1015,8 +1015,8 @@ SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res ## mapSort([func,], map) {#mapsortfunc-map} -按升序对 map 中的元素进行排序。 -如果指定了 `func` 函数,则排序顺序由将 `func` 函数应用到 map 的键和值上所得的结果决定。 +将 map 中的元素按升序排序。 +如果指定了 `func` 函数,则排序顺序由 `func` 应用于 map 的键和值后得到的结果来决定。 **示例** @@ -1040,28 +1040,28 @@ SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; └──────────────────────────────┘ ``` -有关更多详细信息,请参阅 `arraySort` 函数的[参考文档](/sql-reference/functions/array-functions#arraySort)。 +更多详情请参阅 [`arraySort` 函数的参考文档](/sql-reference/functions/array-functions#arraySort)。 ## mapPartialSort {#mappartialsort} -对 map 中的元素进行升序排序,并提供额外的 `limit` 参数以支持部分排序。 -如果指定了函数 `func`,则排序顺序由将 `func` 应用于 map 的键和值所得到的结果来决定。 +按升序对 map 的元素进行排序,并通过额外的 `limit` 参数实现部分排序。 +如果指定了 `func` 函数,则排序顺序由将 `func` 函数应用于 map 的键和值所得结果来决定。 **语法** ```sql -mapPartialSort([func,] limit,map) +mapPartialSort([func,] limit, map) ``` **参数** -* `func` – 可选函数,作用于 map 的键和值。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 -* `limit` – 范围为 [1..limit] 的元素会被排序。[(U)Int](../data-types/int-uint.md)。 +* `func` – 可选函数,应用于 map 的键和值。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 +* `limit` – 区间 [1..limit] 内的元素会被排序。[(U)Int](../data-types/int-uint.md)。 * `map` – 要排序的 map。[Map](../data-types/map.md)。 **返回值** -* 部分排序的 map。[Map](../data-types/map.md)。 +* 部分排序后的 map。[Map](../data-types/map.md)。 **示例** @@ -1077,8 +1077,8 @@ SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ## mapReverseSort([func,], map) {#mapreversesortfunc-map} -对 `map` 的元素进行降序排序。 -如果指定了 `func` 函数,则排序顺序由 `func` 应用于 `map` 的键和值后得到的结果决定。 +按降序对 map 中的元素进行排序。 +如果指定了 `func` 函数,则根据将 `func` 函数应用到 map 的键和值后得到的结果来确定排序顺序。 **示例** @@ -1102,12 +1102,12 @@ SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map; └──────────────────────────────┘ ``` -有关更多详细信息,请参阅函数 [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort)。 +更多详细信息请参阅 [arrayReverseSort](/sql-reference/functions/array-functions#arrayReverseSort) 函数。 ## mapPartialReverseSort {#mappartialreversesort} -按降序对 `map` 的元素进行排序,并允许通过额外的 `limit` 参数进行部分排序。 -如果指定了 `func` 函数,则排序顺序由将 `func` 函数应用到 `map` 的键和值后得到的结果决定。 +对 map 中的元素按降序排序,并带有额外的 `limit` 参数,用于执行部分排序。 +如果指定了 `func` 函数,排序顺序根据将 `func` 函数应用到 map 的键和值所得的结果来确定。 **语法** @@ -1117,13 +1117,13 @@ mapPartialReverseSort([func,] limit, map) **参数** -* `func` – 可选函数,应用于 map 的键和值。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 -* `limit` – 索引范围为 [1..limit] 的元素会被排序。[(U)Int](../data-types/int-uint.md)。 -* `map` – 要排序的 map。[Map](../data-types/map.md)。 +* `func` – 可选,用于作用于 map 的键和值的函数。[Lambda function](/sql-reference/functions/overview#higher-order-functions)。 +* `limit` – 范围为 [1..limit] 的元素会被排序。[(U)Int](../data-types/int-uint.md)。 +* `map` – 要排序的 Map。[Map](../data-types/map.md)。 **返回值** -* 部分排序的 map。[Map](../data-types/map.md)。 +* 部分排序的 Map。[Map](../data-types/map.md)。 **示例** @@ -1138,11 +1138,1072 @@ SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)); ``` {/* - 下面标签中的内容会在文档框架构建期间 - 被基于 system.functions 生成的文档替换。请不要修改或删除这些标签。 + 以下标签内的内容会在文档框架构建期间被替换为 + 由 system.functions 生成的文档。请不要修改或删除这些标签。 参见:https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } {/*AUTOGENERATED_START*/ } +## extractKeyValuePairs {#extractKeyValuePairs} + +引入版本:v + +从任意字符串中提取键值对。该字符串不需要完全符合键值对格式。 + +它可以包含噪声(例如日志文件)。需要通过函数参数指定要解析的键值对格式。 + +一个键值对由键、紧随其后的 `key_value_delimiter` 以及一个值组成。也支持带引号的键和值。键值对之间必须由键值对分隔符分隔。 + +**语法** + +```sql + extractKeyValuePairs(data, [key_value_delimiter], [pair_delimiter], [quoting_character]) +``` + +**参数** + +* `data` - 要从中提取键值对的字符串。[String](../../sql-reference/data-types/string.md) 或 [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `key_value_delimiter` - 用作键与值之间分隔符的字符。默认值为 `:`。[String](../../sql-reference/data-types/string.md) 或 [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `pair_delimiters` - 用作各键值对之间分隔符的字符集合。默认值为 `\space`、`,` 和 `;`。[String](../../sql-reference/data-types/string.md) 或 [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `quoting_character` - 用作引用符号的字符。默认值为 `"。[String](../../sql-reference/data-types/string.md) 或 [FixedString](../../sql-reference/data-types/fixedstring.md)。 + * `unexpected_quoting_character_strategy` - 在 `read_key` 和 `read_value` 阶段处理出现在非预期位置的引用符号的策略。可选值:`invalid`、`accept` 和 `promote`。`invalid` 会丢弃键/值并切换回 `WAITING_KEY` 状态;`accept` 会将其视为普通字符;`promote` 会切换到 `READ_QUOTED_{KEY/VALUE}` 状态并从下一个字符开始。默认值为 `INVALID`。 + +**返回值** + +* 以 Map(String, String) 形式返回提取出的键值对。 + +**示例** + +查询: + +**简单示例** + +```sql + arthur :) select extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv + + 查询 id: f9e0ca6f-3178-4ee2-aa2c-a5517abb9cee + + ┌─kv──────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │ + └─────────────────────────────────────────────────────────────────────────┘ +``` + +**将单引号用作引号字符** + +```sql + arthur :) select extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv + + Query id: 0e22bf6b-9844-414a-99dc-32bf647abd5e + + ┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +unexpected_quoting_character_strategy 示例: + +unexpected_quoting_character_strategy=invalid + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'INVALID') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +unexpected_quoting_character_strategy=accept + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv────────────────┐ + │ {'name"abc':'5'} │ + └───────────────────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'ACCEPT') as kv; +``` + +```text + ┌─kv─────────────────┐ + │ {'name"abc"':'5'} │ + └────────────────────┘ +``` + +unexpected_quoting_character_strategy=promote + +```sql + SELECT extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv──┐ + │ {} │ + └─────┘ +``` + +```sql + SELECT extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'PROMOTE') as kv; +``` + +```text + ┌─kv───────────┐ + │ {'abc':'5'} │ + └──────────────┘ +``` + +**不支持转义序列时的转义序列** + +```sql + arthur :) select extractKeyValuePairs('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv + + Query id: e9fd26ee-b41f-4a11-b17f-25af6fd5d356 + + ┌─kv────────────────────┐ + │ {'age':'a\\x0A\\n\\0'} │ + └───────────────────────┘ +``` + +**语法** + +```sql +``` + +**别名**: `str_to_map`, `mapFromString` + +**参数** + +* 无。 + +**返回值** + +**示例** + +## extractKeyValuePairsWithEscaping {#extractKeyValuePairsWithEscaping} + +引入版本: v + +与 `extractKeyValuePairs` 相同,但支持转义。 + +支持的转义序列:`\x`、`\N`、`\a`、`\b`、`\e`、`\f`、`\n`、`\r`、`\t`、`\v` 和 `\0`。 +非标准转义序列将按原样返回(包括反斜杠),除非它们是以下字符之一: +`\\`、`'`、`"`、`backtick`、`/`、`=` 或 ASCII 控制字符(`c <= 31`)。 + +此函数适用于预转义和后转义都不适用的场景。例如,考虑以下输入字符串:`a: "aaaa\"bbb"`。预期输出为:`a: aaaa\"bbbb`。 + +* 预转义:预转义后将输出:`a: "aaaa"bbb"`,然后 `extractKeyValuePairs` 将输出:`a: aaaa` + * 后转义:`extractKeyValuePairs` 将输出 `a: aaaa\`,后转义将保持不变。 + +键中的前导转义序列将被跳过,值中的前导转义序列将被视为无效。 + +**启用转义序列支持的转义序列** + +```sql + arthur :) select extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') as kv + + SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv + + Query id: 44c114f0-5658-4c75-ab87-4574de3a1645 + + ┌─kv───────────────┐ + │ {'age':'a\n\n\0'} │ + └──────────────────┘ +``` + +**语法** + +```sql +``` + +**参数** + +* 无 + +**返回值** + +**示例** + +## map {#map} + +引入版本:v21.1 + +从键值对创建一个类型为 `Map(key, value)` 的值。 + +**语法** + +```sql +map(key1, value1[, key2, value2, ...]) +``` + +**参数** + +* `key_n` — Map 条目的键。[`Any`](/sql-reference/data-types) +* `value_n` — Map 条目的值。[`Any`](/sql-reference/data-types) + +**返回值** + +返回一个包含键值对的 Map。[`Map(Any, Any)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT map('key1', number, 'key2', number * 2) FROM numbers(3) +``` + +```response title=Response +{'key1':0,'key2':0} +{'key1':1,'key2':2} +{'key1':2,'key2':4} +``` + +## mapAdd {#mapAdd} + +在 v20.7 中引入 + +收集所有键并对相应的值求和。 + +**语法** + +```sql +mapAdd(arg1[, arg2, ...]) +``` + +**参数** + +* `arg1[, arg2, ...]` — `Map` 类型或由两个数组组成的元组,其中第一个数组中的元素表示键,第二个数组包含每个键对应的值。[`Map(K, V)`](/sql-reference/data-types/map) 或 [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**返回值** + +返回一个 `Map` 或元组,其中第一个数组包含已排序的键,第二个数组包含对应的值。[`Map(K, V)`](/sql-reference/data-types/map) 或 [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**示例** + +**使用 Map 类型** + +```sql title=Query +SELECT mapAdd(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:2} +``` + +**使用元组** + +```sql title=Query +SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) +``` + +```response title=Response +([1, 2], [2, 2]) +``` + +## mapAll {#mapAll} + +引入版本:v23.4 + +判断某个条件是否对 map 中的所有键值对都成立。 +`mapAll` 是一个高阶函数。 +你可以将一个 lambda 函数作为第一个参数传递给它。 + +**语法** + +```sql +mapAll([func,] map) +``` + +**参数** + +* `func` — Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要检查的映射(Map)。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +如果所有键值对都满足条件,则返回 `1`,否则返回 `0`。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapAll((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +0 +``` + +## mapApply {#mapApply} + +引入于:v22.3 版本 + +将一个函数应用于 map 中的每个元素。 + +**语法** + +```sql +mapApply(func, map) +``` + +**参数** + +* `func` — Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要应用函数的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个新的 Map,通过对原始 Map 的每个元素应用 `func` 得到。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapApply((k, v) -> (k, v * 2), map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k1':2,'k2':4} +``` + +## mapConcat {#mapConcat} + +首次引入版本:v23.4 + +根据键是否相等来连接多个 map。 +如果多个输入 map 中存在相同键的元素,所有这些元素都会被添加到结果 map 中,但通过 `[]` 运算符只能访问到第一个元素。 + +**语法** + +```sql +mapConcat(maps) +``` + +**参数** + +* `maps` — 任意数量的 `Map`。[`Map`](/sql-reference/data-types/map) + +**返回值** + +返回一个 `Map`,其中包含作为参数传入的所有 `Map` 的合并结果。[`Map`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapConcat(map('k1', 'v1'), map('k2', 'v2')) +``` + +```response title=Response +{'k1':'v1','k2':'v2'} +``` + +## mapContainsKey {#mapContainsKey} + +引入版本:v21.2 + +判断 `map` 中是否包含某个键。 + +**语法** + +```sql +mapContains(map, key) +``` + +**别名**:`mapContains` + +**参数** + +* `map` — 要搜索的 Map。[`Map(K, V)`](/sql-reference/data-types/map) +* `key` — 要搜索的键。类型必须与该 Map 的键类型匹配。[`Any`](/sql-reference/data-types) + +**返回值** + +如果 Map 中包含该键,则返回 1,否则返回 0。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapContainsKey(map('k1', 'v1', 'k2', 'v2'), 'k1') +``` + +```response title=Response +1 +``` + +## mapContainsKeyLike {#mapContainsKeyLike} + +引入版本:v23.4 + +检查 `map` 中是否存在与指定模式通过 `LIKE` 匹配的键。 + +**语法** + +```sql +mapContainsKeyLike(map, pattern) +``` + +**参数** + +* `map` — 要搜索的映射。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 用于匹配键的模式。[`const String`](/sql-reference/data-types/string) + +**返回值** + +如果 `map` 中包含与 `pattern` 匹配的键,则返回 `1`,否则返回 `0`。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsKeyLike(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└─────────────────────────────┘ +``` + +## mapContainsValue {#mapContainsValue} + +引入版本:v25.6 + +用于判断某个值是否存在于 map 中。 + +**语法** + +```sql +mapContainsValue(map, value) +``` + +**参数** + +* `map` — 要在其中查找的 Map。[`Map(K, V)`](/sql-reference/data-types/map) +* `value` — 要查找的值。其类型必须与 map 的值类型匹配。[`Any`](/sql-reference/data-types) + +**返回值** + +如果 map 中包含该值则返回 `1`,否则返回 `0`。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapContainsValue(map('k1', 'v1', 'k2', 'v2'), 'v1') +``` + +```response title=Response +1 +``` + +## mapContainsValueLike {#mapContainsValueLike} + +自 v25.5 引入 + +检查 map 中是否存在符合指定 `LIKE` 模式的值。 + +**语法** + +```sql +mapContainsValueLike(map, pattern) +``` + +**参数** + +* `map` — 要搜索的 Map。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 用于匹配 `map` 中值的模式。[`const String`](/sql-reference/data-types/string) + +**返回值** + +如果 `map` 中包含与 `pattern` 匹配的值,则返回 `1`,否则返回 `0`。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapContainsValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapContainsV⋯ke(a, 'a%')─┐ +│ 1 │ +│ 0 │ +└──────────────────────────┘ +``` + +## mapExists {#mapExists} + +引入版本:v23.4 + +用于判断在一个 map 中是否存在至少一对键值对满足指定条件。 +`mapExists` 是一个高阶函数。 +你可以将一个 lambda 函数作为第一个参数传递给它。 + +**语法** + +```sql +mapExists([func,] map) +``` + +**参数** + +* `func` — 可选。Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要检查的 Map 类型值。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +如果至少有一个键值对满足条件,则返回 `1`,否则返回 `0`。[`UInt8`](/sql-reference/data-types/int-uint) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapExists((k, v) -> v = 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +1 +``` + +## mapExtractKeyLike {#mapExtractKeyLike} + +引入版本:v23.4 + +给定一个键为字符串的 map 和一个 `LIKE` 模式,此函数返回一个仅包含键与该模式匹配元素的 map。 + +**语法** + +```sql +mapExtractKeyLike(map, pattern) +``` + +**参数** + +* `map` — 要从中提取数据的 Map。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 用于匹配键的模式字符串。[`const String`](/sql-reference/data-types/string) + +**返回值** + +返回一个 map,其中包含键与指定模式匹配的元素。如果没有元素匹配该模式,则返回一个空 map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractKeyLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractKeyLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└────────────────────────────┘ +``` + +## mapExtractValueLike {#mapExtractValueLike} + +引入版本:v25.5 + +给定一个值为字符串的 map 和一个 `LIKE` 模式,此函数返回一个 map,其中只包含值与该模式匹配的元素。 + +**语法** + +```sql +mapExtractValueLike(map, pattern) +``` + +**参数** + +* `map` — 要从中提取元素的 Map。[`Map(K, V)`](/sql-reference/data-types/map) +* `pattern` — 用于匹配值的模式。[`const String`](/sql-reference/data-types/string) + +**返回值** + +返回一个 Map,其中仅包含值与指定模式匹配的元素。如果没有元素匹配该模式,则返回空 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +CREATE TABLE tab (a Map(String, String)) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'}); + +SELECT mapExtractValueLike(a, 'a%') FROM tab; +``` + +```response title=Response +┌─mapExtractValueLike(a, 'a%')─┐ +│ {'abc':'abc'} │ +│ {} │ +└──────────────────────────────┘ +``` + +## mapFilter {#mapFilter} + +在 v22.3 中引入 + +通过对每个 map 元素应用函数来过滤该 map。 + +**语法** + +```sql +mapFilter(func, map) +``` + +**参数** + +* `func` — Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要筛选的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个 map,仅包含那些使 `func` 返回非 `0` 值的元素。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**使用示例** + +```sql title=Query +SELECT mapFilter((k, v) -> v > 1, map('k1', 1, 'k2', 2)) +``` + +```response title=Response +{'k2':2} +``` + +## mapFromArrays {#mapFromArrays} + +引入版本:v23.3 + +从键的数组或 Map 与值的数组或 Map 创建一个 Map。 +该函数是语法 `CAST([...], 'Map(key_type, value_type)')` 的一种更便捷的替代写法。 + +**语法** + +```sql +mapFromArrays(keys, values) +``` + +**别名**:`MAP_FROM_ARRAYS` + +**参数** + +* `keys` — 用于创建映射的键的数组或 `Map` 类型。[`Array`](/sql-reference/data-types/array) 或 [`Map`](/sql-reference/data-types/map) +* `values` — 用于创建映射的值的数组或 `Map` 类型。[`Array`](/sql-reference/data-types/array) 或 [`Map`](/sql-reference/data-types/map) + +**返回值** + +返回一个 `Map`,其键和值由键数组和值数组/`Map` 构造而成。[`Map`](/sql-reference/data-types/map) + +**示例** + +**基本用法** + +```sql title=Query +SELECT mapFromArrays(['a', 'b', 'c'], [1, 2, 3]) +``` + +```response title=Response +{'a':1,'b':2,'c':3} +``` + +**使用 map 作为输入** + +```sql title=Query +SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3)) +``` + +```response title=Response +{1:('a', 1), 2:('b', 2), 3:('c', 3)} +``` + +## mapKeys {#mapKeys} + +引入版本:v21.2 + +返回给定 map 的键。 +通过启用设置 [`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns),可以对该函数进行优化。 +启用该设置后,函数只会读取 `keys` 子列,而不是整个 map。 +查询 `SELECT mapKeys(m) FROM table` 会被转换为 `SELECT m.keys FROM table`。 + +**语法** + +```sql +mapKeys(map) +``` + +**参数** + +* `map` — 要从中提取键的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个包含该 map 所有键的数组。[`Array(T)`](/sql-reference/data-types/array) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapKeys(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['k1','k2'] +``` + +## mapPartialReverseSort {#mapPartialReverseSort} + +自 v23.4 引入 + +对 map 的元素按降序排序,并带有一个额外的 limit 参数,用于进行部分排序。 +如果指定了 func 函数,则根据将 func 函数应用于 map 的键和值所得到的结果来确定排序顺序。 + +**语法** + +```sql +mapPartialReverseSort([func,] limit, map) +``` + +**参数** + +* `func` — 可选。Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — 对范围 `[1..limit]` 内的元素进行排序。[`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — 要排序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个按降序部分排序后的 map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapPartialSort {#mapPartialSort} + +自 v23.4 版本引入 + +对 map 的元素按升序排序,并接受一个额外的 limit 参数,用于执行部分排序。 +如果指定了函数 func,则排序顺序由函数 func 作用于 map 的键和值后得到的结果来决定。 + +**语法** + +```sql +mapPartialSort([func,] limit, map) +``` + +**参数** + +* `func` — 可选。Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `limit` — 范围 `[1..limit]` 内的元素将被排序。[`(U)Int*`](/sql-reference/data-types/int-uint) +* `map` — 要排序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个部分有序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapPopulateSeries {#mapPopulateSeries} + +引入版本:v20.10 + +在具有整数键的 map 中填充缺失的键值对。 +为了支持将键扩展到当前最大值之外,可以指定一个最大键。 +更具体地说,该函数返回一个 map,其键从最小键到最大键(或指定的 max 参数)构成步长为 1 的序列,并具有对应的值。 +如果某个键未指定值,则使用默认值作为该键的值。 +如果键出现重复,则只将第一个值(按出现顺序)与该键关联。 + +**语法** + +```sql +mapPopulateSeries(map[, max]) | mapPopulateSeries(keys, values[, max]) +``` + +**参数** + +* `map` — 具有整数键的 Map。[`Map((U)Int*, V)`](/sql-reference/data-types/map) +* `keys` — 键数组。[`Array(T)`](/sql-reference/data-types/array) +* `values` — 值数组。[`Array(T)`](/sql-reference/data-types/array) +* `max` — 可选。键的最大值。[`Int8`](/sql-reference/data-types/int-uint) 或 [`Int16`](/sql-reference/data-types/int-uint) 或 [`Int32`](/sql-reference/data-types/int-uint) 或 [`Int64`](/sql-reference/data-types/int-uint) 或 [`Int128`](/sql-reference/data-types/int-uint) 或 [`Int256`](/sql-reference/data-types/int-uint) + +**返回值** + +返回一个 Map,或由两个数组组成的元组:第一个数组包含按排序后顺序排列的键,第二个数组包含对应键的值。[`Map(K, V)`](/sql-reference/data-types/map) 或 [`Tuple(Array(UInt*), Array(Any))`](/sql-reference/data-types/tuple) + +**示例** + +**使用 Map 类型** + +```sql title=Query +SELECT mapPopulateSeries(map(1, 10, 5, 20), 6) +``` + +```response title=Response +{1:10, 2:0, 3:0, 4:0, 5:20, 6:0} +``` + +**使用映射数组** + +```sql title=Query +SELECT mapPopulateSeries([1, 2, 4], [11, 22, 44], 5) +``` + +```response title=Response +([1, 2, 3, 4, 5], [11, 22, 0, 44, 0]) +``` + +## mapReverseSort {#mapReverseSort} + +引入版本:v23.4 + +对 map 中的元素进行降序排序。 +如果指定了函数 func,则排序顺序由函数 func 作用于 map 的键和值所产生的结果来决定。 + +**语法** + +```sql +mapReverseSort([func,] map) +``` + +**参数** + +* `func` — 可选。Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要排序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回按降序排序后的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapReverseSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k1':3,'k3':2,'k2':1} +``` + +## mapSort {#mapSort} + +引入于:v23.4 + +按升序对 map 的元素进行排序。 +如果指定了函数 func,则排序顺序由将 func 函数应用于 map 的键和值后得到的结果决定。 + +**语法** + +```sql +mapSort([func,] map) +``` + +**参数** + +* `func` — 可选。Lambda 函数。[`Lambda function`](/sql-reference/functions/overview#arrow-operator-and-lambda) +* `map` — 要排序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回按升序排序的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**用法示例** + +```sql title=Query +SELECT mapSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2)) +``` + +```response title=Response +{'k2':1,'k3':2,'k1':3} +``` + +## mapSubtract {#mapSubtract} + +自 v20.7 版本引入。 + +收集所有键并对相应的值进行相减运算。 + +**语法** + +```sql +mapSubtract(arg1[, arg2, ...]) +``` + +**参数** + +* `arg1[, arg2, ...]` — 类型为 `Map` 或由两个数组组成的 `Tuple`,其中第一个数组的元素表示键,第二个数组包含每个键对应的值。[`Map(K, V)`](/sql-reference/data-types/map) 或 [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**返回值** + +返回一个 `Map` 或 `Tuple`,其中第一个数组包含排序后的键,第二个数组包含对应的值。[`Map(K, V)`](/sql-reference/data-types/map) 或 [`Tuple(Array(T), Array(T))`](/sql-reference/data-types/tuple) + +**示例** + +**使用 Map 类型** + +```sql title=Query +SELECT mapSubtract(map(1, 1), map(1, 1)) +``` + +```response title=Response +{1:0} +``` + +**使用 tuple map 时** + +```sql title=Query +SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) +``` + +```response title=Response +([1, 2], [-1, 0]) +``` + +## mapUpdate {#mapUpdate} + +引入版本:v22.3 + +对于两个 `map`,返回在第一个 `map` 的基础上,用第二个 `map` 中对应键的值更新后的结果。 + +**语法** + +```sql +mapUpdate(map1, map2) +``` + +**参数** + +* `map1` — 要更新的映射。[`Map(K, V)`](/sql-reference/data-types/map) +* `map2` — 用于更新的映射。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回按 `map2` 中对应键的值更新后的 `map1`。[`Map(K, V)`](/sql-reference/data-types/map) + +**示例** + +**基本用法** + +```sql title=Query +SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) +``` + +```response title=Response +{'key3':0,'key1':10,'key2':10} +``` + +## mapValues {#mapValues} + +首次引入于:v21.2 + +返回给定 map 中所有的值。 +通过启用 [`optimize_functions_to_subcolumns`](/operations/settings/settings#optimize_functions_to_subcolumns) 这个设置,可以对该函数进行优化。 +启用该设置后,函数只会读取 `values` 子列,而不是整个 map。 +查询 `SELECT mapValues(m) FROM table` 会被重写为 `SELECT m.values FROM table`。 + +**语法** + +```sql +mapValues(map) +``` + +**参数** + +* `map` — 要从中提取值的 Map。[`Map(K, V)`](/sql-reference/data-types/map) + +**返回值** + +返回一个数组,包含该 Map 中的所有值。[`Array(T)`](/sql-reference/data-types/array) + +**示例** + +**使用示例** + +```sql title=Query +SELECT mapValues(map('k1', 'v1', 'k2', 'v2')) +``` + +```response title=Response +['v1','v2'] +``` + {/*AUTOGENERATED_END*/ } diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md index e6e06a4ac64..2c3c1770f24 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/type-conversion-functions.md @@ -6,12 +6,8 @@ title: '类型转换函数' doc_type: 'reference' --- - - # 类型转换函数 {#type-conversion-functions} - - ## 数据转换的常见问题 {#common-issues-with-data-conversion} ClickHouse 通常遵循[与 C++ 程序相同的隐式转换行为](https://en.cppreference.com/w/cpp/language/implicit_conversion)。 @@ -54,7 +50,6 @@ SETTINGS cast_keep_nullable = 1 └──────────────────┴─────────────────────┴──────────────────┘ ``` - ## 关于 `toString` 函数的说明 {#to-string-functions} `toString` 函数族用于在数字、字符串(但不包括定长字符串)、日期以及带时间的日期之间进行转换。 @@ -65,8 +60,6 @@ SETTINGS cast_keep_nullable = 1 - 在带时间的日期与数字之间进行转换时,带时间的日期对应于自 Unix 纪元开始以来的秒数。 - 对于 `DateTime` 参数,`toString` 函数可以接收第二个字符串参数,其中包含时区名称,例如:`Europe/Amsterdam`。在这种情况下,时间将按照指定的时区进行格式化。 - - ## 关于 `toDate`/`toDateTime` 函数的说明 {#to-date-and-date-time-functions} `toDate`/`toDateTime` 函数的日期和日期时间格式定义如下: @@ -116,7 +109,6 @@ LIMIT 10 另请参阅 [`toUnixTimestamp`](#toUnixTimestamp) 函数。 - ## toBool {#tobool} 将输入值转换为 [`Bool`](../data-types/boolean.md) 类型的值。出现错误时抛出异常。 @@ -167,7 +159,6 @@ toBool('false'): false toBool('FALSE'): false ``` - ## toInt8 {#toint8} 将输入值转换为 [`Int8`](../data-types/int-uint.md) 类型的值。发生错误时会抛出异常。 @@ -234,7 +225,6 @@ toInt8('-8'): -8 * [`toInt8OrNull`](#toInt8OrNull)。 * [`toInt8OrDefault`](#toint8ordefault)。 - ## toInt8OrZero {#toint8orzero} 与 [`toInt8`](#toint8) 类似,此函数将输入值转换为 [Int8](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -297,7 +287,6 @@ toInt8OrZero('abc'): 0 * [`toInt8OrNull`](#toInt8OrNull). * [`toInt8OrDefault`](#toint8ordefault). - ## toInt8OrNull {#toInt8OrNull} 类似于 [`toInt8`](#toint8),此函数将输入值转换为 [Int8](../data-types/int-uint.md) 类型的值,但在出错时会返回 `NULL`。 @@ -360,7 +349,6 @@ toInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toInt8OrZero`](#toint8orzero)。 * [`toInt8OrDefault`](#toint8ordefault)。 - ## toInt8OrDefault {#toint8ordefault} 与 [`toInt8`](#toint8) 类似,此函数将输入值转换为 [Int8](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -428,7 +416,6 @@ toInt8OrDefault('abc', CAST('-1', 'Int8')): -1 * [`toInt8OrZero`](#toint8orzero)。 * [`toInt8OrNull`](#toInt8OrNull)。 - ## toInt16 {#toint16} 将输入值转换为 [`Int16`](../data-types/int-uint.md) 类型的值。在出错时抛出异常。 @@ -495,7 +482,6 @@ toInt16('-16'): -16 * [`toInt16OrNull`](#toint16ornull)。 * [`toInt16OrDefault`](#toint16ordefault)。 - ## toInt16OrZero {#toint16orzero} 与 [`toInt16`](#toint16) 类似,此函数将输入值转换为 [Int16](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -558,7 +544,6 @@ toInt16OrZero('abc'): 0 * [`toInt16OrNull`](#toint16ornull)。 * [`toInt16OrDefault`](#toint16ordefault)。 - ## toInt16OrNull {#toint16ornull} 与 [`toInt16`](#toint16) 类似,此函数将输入值转换为 [Int16](../data-types/int-uint.md) 类型的值,但在出错时会返回 `NULL`。 @@ -621,7 +606,6 @@ toInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toInt16OrZero`](#toint16orzero). * [`toInt16OrDefault`](#toint16ordefault). - ## toInt16OrDefault {#toint16ordefault} 与 [`toInt16`](#toint16) 类似,此函数将输入值转换为 [Int16](../data-types/int-uint.md) 类型的值,但在发生错误时返回默认值。 @@ -689,7 +673,6 @@ toInt16OrDefault('abc', CAST('-1', 'Int16')): -1 * [`toInt16OrZero`](#toint16orzero). * [`toInt16OrNull`](#toint16ornull). - ## toInt32 {#toint32} 将输入值转换为 [`Int32`](../data-types/int-uint.md) 类型的值。如果发生错误,则会抛出异常。 @@ -756,7 +739,6 @@ toInt32('-32'): -32 * [`toInt32OrNull`](#toint32ornull)。 * [`toInt32OrDefault`](#toint32ordefault)。 - ## toInt32OrZero {#toint32orzero} 与 [`toInt32`](#toint32) 类似,此函数将输入值转换为 [Int32](../data-types/int-uint.md) 类型的值,但如果发生错误则返回 `0`。 @@ -819,7 +801,6 @@ toInt32OrZero('abc'): 0 * [`toInt32OrNull`](#toint32ornull)。 * [`toInt32OrDefault`](#toint32ordefault)。 - ## toInt32OrNull {#toint32ornull} 与 [`toInt32`](#toint32) 类似,该函数将输入值转换为 [Int32](../data-types/int-uint.md) 类型的值,但在出错时返回 `NULL`。 @@ -882,7 +863,6 @@ toInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrDefault`](#toint32ordefault). - ## toInt32OrDefault {#toint32ordefault} 与 [`toInt32`](#toint32) 类似,此函数将输入值转换为 [Int32](../data-types/int-uint.md) 类型的值,但在发生错误时返回默认值。 @@ -950,7 +930,6 @@ toInt32OrDefault('abc', CAST('-1', 'Int32')): -1 * [`toInt32OrZero`](#toint32orzero). * [`toInt32OrNull`](#toint32ornull). - ## toInt64 {#toint64} 将输入值转换为 [`Int64`](../data-types/int-uint.md) 类型的值。出错时会抛出异常。 @@ -1017,7 +996,6 @@ toInt64('-64'): -64 * [`toInt64OrNull`](#toint64ornull)。 * [`toInt64OrDefault`](#toint64ordefault)。 - ## toInt64OrZero {#toint64orzero} 与 [`toInt64`](#toint64) 类似,此函数将输入值转换为 [Int64](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -1080,7 +1058,6 @@ toInt64OrZero('abc'): 0 * [`toInt64OrNull`](#toint64ornull)。 * [`toInt64OrDefault`](#toint64ordefault)。 - ## toInt64OrNull {#toint64ornull} 与 [`toInt64`](#toint64) 类似,此函数将输入值转换为 [Int64](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -1143,7 +1120,6 @@ toInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toInt64OrZero`](#toint64orzero)。 * [`toInt64OrDefault`](#toint64ordefault)。 - ## toInt64OrDefault {#toint64ordefault} 与 [`toInt64`](#toint64) 类似,此函数将输入值转换为 [Int64](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -1211,7 +1187,6 @@ toInt64OrDefault('abc', CAST('-1', 'Int64')): -1 * [`toInt64OrZero`](#toint64orzero)。 * [`toInt64OrNull`](#toint64ornull)。 - ## toInt128 {#toint128} 将输入值转换为 [`Int128`](../data-types/int-uint.md) 类型的值。发生错误时抛出异常。 @@ -1277,7 +1252,6 @@ toInt128('-128'): -128 * [`toInt128OrNull`](#toint128ornull)。 * [`toInt128OrDefault`](#toint128ordefault)。 - ## toInt128OrZero {#toint128orzero} 与 [`toInt128`](#toint128) 类似,此函数将输入值转换为 [Int128](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -1340,7 +1314,6 @@ toInt128OrZero('abc'): 0 * [`toInt128OrNull`](#toint128ornull)。 * [`toInt128OrDefault`](#toint128ordefault)。 - ## toInt128OrNull {#toint128ornull} 与 [`toInt128`](#toint128) 类似,此函数将输入值转换为 [Int128](../data-types/int-uint.md) 类型的值,如果发生错误则返回 `NULL`。 @@ -1403,7 +1376,6 @@ toInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toInt128OrZero`](#toint128orzero)。 * [`toInt128OrDefault`](#toint128ordefault)。 - ## toInt128OrDefault {#toint128ordefault} 与 [`toInt128`](#toint128) 类似,此函数将输入值转换为 [Int128](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -1472,7 +1444,6 @@ toInt128OrDefault('abc', CAST('-1', 'Int128')): -1 * [`toInt128OrZero`](#toint128orzero)。 * [`toInt128OrNull`](#toint128ornull)。 - ## toInt256 {#toint256} 将输入值转换为 [`Int256`](../data-types/int-uint.md) 类型的值。在出错时会抛出异常。 @@ -1538,7 +1509,6 @@ toInt256('-256'): -256 * [`toInt256OrNull`](#toint256ornull)。 * [`toInt256OrDefault`](#toint256ordefault)。 - ## toInt256OrZero {#toint256orzero} 与 [`toInt256`](#toint256) 类似,此函数将输入值转换为 [Int256](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -1601,7 +1571,6 @@ toInt256OrZero('abc'): 0 * [`toInt256OrNull`](#toint256ornull). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrNull {#toint256ornull} 与 [`toInt256`](#toint256) 类似,此函数将输入值转换为 [Int256](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -1664,7 +1633,6 @@ toInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toInt256OrZero`](#toint256orzero). * [`toInt256OrDefault`](#toint256ordefault). - ## toInt256OrDefault {#toint256ordefault} 与 [`toInt256`](#toint256) 类似,此函数将输入值转换为 [Int256](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -1732,7 +1700,6 @@ toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 * [`toInt256OrZero`](#toint256orzero)。 * [`toInt256OrNull`](#toint256ornull)。 - ## toUInt8 {#touint8} 将输入值转换为 [`UInt8`](../data-types/int-uint.md) 类型的值。出错时抛出异常。 @@ -1799,7 +1766,6 @@ toUInt8('8'): 8 * [`toUInt8OrNull`](#touint8ornull)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrZero {#touint8orzero} 与 [`toUInt8`](#touint8) 类似,此函数将输入值转换为 [UInt8](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -1862,7 +1828,6 @@ toUInt8OrZero('abc'): 0 * [`toUInt8OrNull`](#touint8ornull)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrNull {#touint8ornull} 与 [`toUInt8`](#touint8) 类似,此函数将输入值转换为 [UInt8](../data-types/int-uint.md) 类型的值,但在出错时返回 `NULL`。 @@ -1925,7 +1890,6 @@ toUInt8OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt8OrZero`](#touint8orzero)。 * [`toUInt8OrDefault`](#touint8ordefault)。 - ## toUInt8OrDefault {#touint8ordefault} 与 [`toUInt8`](#touint8) 类似,此函数将输入值转换为 [UInt8](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -1993,7 +1957,6 @@ toUInt8OrDefault('abc', CAST('0', 'UInt8')): 0 * [`toUInt8OrZero`](#touint8orzero)。 * [`toUInt8OrNull`](#touint8ornull)。 - ## toUInt16 {#touint16} 将输入值转换为 [`UInt16`](../data-types/int-uint.md) 类型的值。如果出错则抛出异常。 @@ -2060,7 +2023,6 @@ toUInt16('16'): 16 * [`toUInt16OrNull`](#touint16ornull)。 * [`toUInt16OrDefault`](#touint16ordefault)。 - ## toUInt16OrZero {#touint16orzero} 与 [`toUInt16`](#touint16) 类似,此函数将输入值转换为 [UInt16](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -2123,7 +2085,6 @@ toUInt16OrZero('abc'): 0 * [`toUInt16OrNull`](#touint16ornull). * [`toUInt16OrDefault`](#touint16ordefault). - ## toUInt16OrNull {#touint16ornull} 与 [`toUInt16`](#touint16) 类似,此函数将输入值转换为 [UInt16](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -2186,7 +2147,6 @@ toUInt16OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt16OrZero`](#touint16orzero)。 * [`toUInt16OrDefault`](#touint16ordefault)。 - ## toUInt16OrDefault {#touint16ordefault} 与 [`toUInt16`](#touint16) 类似,此函数将输入值转换为类型为 [UInt16](../data-types/int-uint.md) 的值,但在出错时返回默认值。 @@ -2254,7 +2214,6 @@ toUInt16OrDefault('abc', CAST('0', 'UInt16')): 0 * [`toUInt16OrZero`](#touint16orzero)。 * [`toUInt16OrNull`](#touint16ornull)。 - ## toUInt32 {#touint32} 将输入值转换为 [`UInt32`](../data-types/int-uint.md) 类型的值。如果发生错误,则抛出异常。 @@ -2321,7 +2280,6 @@ toUInt32('32'): 32 * [`toUInt32OrNull`](#touint32ornull)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrZero {#touint32orzero} 与 [`toUInt32`](#touint32) 类似,此函数将输入值转换为 [UInt32](../data-types/int-uint.md) 类型的值,但在出错时返回 `0`。 @@ -2385,7 +2343,6 @@ toUInt32OrZero('abc'): 0 * [`toUInt32OrNull`](#touint32ornull)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrNull {#touint32ornull} 类似于 [`toUInt32`](#touint32),此函数将输入值转换为 [UInt32](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -2448,7 +2405,6 @@ toUInt32OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt32OrZero`](#touint32orzero)。 * [`toUInt32OrDefault`](#touint32ordefault)。 - ## toUInt32OrDefault {#touint32ordefault} 与 [`toUInt32`](#touint32) 类似,此函数将输入值转换为 [UInt32](../data-types/int-uint.md) 类型的值,但在发生错误时返回默认值。 @@ -2516,7 +2472,6 @@ toUInt32OrDefault('abc', CAST('0', 'UInt32')): 0 * [`toUInt32OrZero`](#touint32orzero)。 * [`toUInt32OrNull`](#touint32ornull)。 - ## toUInt64 {#touint64} 将输入值转换为 [`UInt64`](../data-types/int-uint.md) 类型的值。出错时抛出异常。 @@ -2583,7 +2538,6 @@ toUInt64('64'): 64 * [`toUInt64OrNull`](#touint64ornull)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrZero {#touint64orzero} 与 [`toUInt64`](#touint64) 类似,该函数将输入值转换为 [UInt64](../data-types/int-uint.md) 类型的值,但在出错时返回 `0`。 @@ -2646,7 +2600,6 @@ toUInt64OrZero('abc'): 0 * [`toUInt64OrNull`](#touint64ornull)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrNull {#touint64ornull} 与 [`toUInt64`](#touint64) 类似,此函数将输入值转换为 [UInt64](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -2709,7 +2662,6 @@ toUInt64OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt64OrZero`](#touint64orzero)。 * [`toUInt64OrDefault`](#touint64ordefault)。 - ## toUInt64OrDefault {#touint64ordefault} 与 [`toUInt64`](#touint64) 类似,此函数将输入值转换为 [UInt64](../data-types/int-uint.md) 类型的值,但在发生错误时返回默认值。 @@ -2777,7 +2729,6 @@ toUInt64OrDefault('abc', CAST('0', 'UInt64')): 0 * [`toUInt64OrZero`](#touint64orzero)。 * [`toUInt64OrNull`](#touint64ornull)。 - ## toUInt128 {#touint128} 将输入值转换为 [`UInt128`](../data-types/int-uint.md) 类型的值。如果发生错误,会抛出异常。 @@ -2843,7 +2794,6 @@ toUInt128('128'): 128 * [`toUInt128OrNull`](#touint128ornull)。 * [`toUInt128OrDefault`](#touint128ordefault)。 - ## toUInt128OrZero {#touint128orzero} 与 [`toUInt128`](#touint128) 类似,此函数将输入值转换为 [UInt128](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `0`。 @@ -2906,7 +2856,6 @@ toUInt128OrZero('abc'): 0 * [`toUInt128OrNull`](#touint128ornull)。 * [`toUInt128OrDefault`](#touint128ordefault)。 - ## toUInt128OrNull {#touint128ornull} 与 [`toUInt128`](#touint128) 类似,此函数将输入值转换为 [UInt128](../data-types/int-uint.md) 类型的值,但在出错时返回 `NULL`。 @@ -2969,7 +2918,6 @@ toUInt128OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt128OrZero`](#touint128orzero)。 * [`toUInt128OrDefault`](#touint128ordefault)。 - ## toUInt128OrDefault {#touint128ordefault} 与 [`toUInt128`](#toint128) 类似,此函数将输入值转换为 [UInt128](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -3038,7 +2986,6 @@ toUInt128OrDefault('abc', CAST('0', 'UInt128')): 0 * [`toUInt128OrZero`](#touint128orzero)。 * [`toUInt128OrNull`](#touint128ornull)。 - ## toUInt256 {#touint256} 将输入值转换为 [`UInt256`](../data-types/int-uint.md) 类型的值。如果发生错误,则会抛出异常。 @@ -3104,7 +3051,6 @@ toUInt256('256'): 256 * [`toUInt256OrNull`](#touint256ornull)。 * [`toUInt256OrDefault`](#touint256ordefault)。 - ## toUInt256OrZero {#touint256orzero} 与 [`toUInt256`](#touint256) 类似,此函数将输入值转换为 [UInt256](../data-types/int-uint.md) 类型的值;如果发生错误,则返回 `0`。 @@ -3167,7 +3113,6 @@ toUInt256OrZero('abc'): 0 * [`toUInt256OrNull`](#touint256ornull)。 * [`toUInt256OrDefault`](#touint256ordefault)。 - ## toUInt256OrNull {#touint256ornull} 与 [`toUInt256`](#touint256) 类似,此函数将输入值转换为 [UInt256](../data-types/int-uint.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -3230,7 +3175,6 @@ toUInt256OrNull('abc'): ᴺᵁᴸᴸ * [`toUInt256OrZero`](#touint256orzero)。 * [`toUInt256OrDefault`](#touint256ordefault)。 - ## toUInt256OrDefault {#touint256ordefault} 类似于 [`toUInt256`](#touint256),此函数将输入值转换为 [UInt256](../data-types/int-uint.md) 类型的值,但在出错时返回默认值。 @@ -3298,7 +3242,6 @@ toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0 * [`toUInt256OrZero`](#touint256orzero)。 * [`toUInt256OrNull`](#touint256ornull)。 - ## toFloat32 {#tofloat32} 将输入转换为 [`Float32`](../data-types/float.md) 类型的值。发生错误时会抛出异常。 @@ -3356,7 +3299,6 @@ toFloat32('NaN'): nan * [`toFloat32OrNull`](#tofloat32ornull)。 * [`toFloat32OrDefault`](#tofloat32ordefault)。 - ## toFloat32OrZero {#tofloat32orzero} 与 [`toFloat32`](#tofloat32) 类似,此函数将输入值转换为 [Float32](../data-types/float.md) 类型的值,但在发生错误时返回 `0`。 @@ -3409,7 +3351,6 @@ toFloat32OrZero('abc'): 0 * [`toFloat32OrNull`](#tofloat32ornull). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrNull {#tofloat32ornull} 与 [`toFloat32`](#tofloat32) 类似,此函数将输入值转换为 [Float32](../data-types/float.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -3462,7 +3403,6 @@ toFloat32OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrDefault`](#tofloat32ordefault). - ## toFloat32OrDefault {#tofloat32ordefault} 与 [`toFloat32`](#tofloat32) 类似,此函数将输入值转换为 [Float32](../data-types/float.md) 类型的值,但在出错时返回默认值。 @@ -3520,7 +3460,6 @@ toFloat32OrDefault('abc', CAST('0', 'Float32')): 0 * [`toFloat32OrZero`](#tofloat32orzero). * [`toFloat32OrNull`](#tofloat32ornull). - ## toFloat64 {#tofloat64} 将输入值转换为 [`Float64`](../data-types/float.md) 类型的值。出错时会抛出异常。 @@ -3578,7 +3517,6 @@ toFloat64('NaN'): nan * [`toFloat64OrNull`](#tofloat64ornull)。 * [`toFloat64OrDefault`](#tofloat64ordefault)。 - ## toFloat64OrZero {#tofloat64orzero} 与 [`toFloat64`](#tofloat64) 类似,此函数将输入值转换为 [Float64](../data-types/float.md) 类型的值,但在发生错误时返回 `0`。 @@ -3631,7 +3569,6 @@ toFloat64OrZero('abc'): 0 * [`toFloat64OrNull`](#tofloat64ornull). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrNull {#tofloat64ornull} 与 [`toFloat64`](#tofloat64) 类似,此函数将输入值转换为 [Float64](../data-types/float.md) 类型的值,但在发生错误时返回 `NULL`。 @@ -3684,7 +3621,6 @@ toFloat64OrNull('abc'): ᴺᵁᴸᴸ * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrDefault`](#tofloat64ordefault). - ## toFloat64OrDefault {#tofloat64ordefault} 与 [`toFloat64`](#tofloat64) 类似,此函数将输入值转换为 [Float64](../data-types/float.md) 类型的值,但在出错时会返回默认值。 @@ -3742,7 +3678,6 @@ toFloat64OrDefault('abc', CAST('0', 'Float64')): 0 * [`toFloat64OrZero`](#tofloat64orzero). * [`toFloat64OrNull`](#tofloat64ornull). - ## toBFloat16 {#tobfloat16} 将输入值转换为 [`BFloat16`](/sql-reference/data-types/float#bfloat16) 类型的值。 @@ -3790,7 +3725,6 @@ SELECT toBFloat16('42.7'); * [`toBFloat16OrZero`](#tobfloat16orzero)。 * [`toBFloat16OrNull`](#tobfloat16ornull)。 - ## toBFloat16OrZero {#tobfloat16orzero} 将 `String` 类型的输入值转换为 [`BFloat16`](/sql-reference/data-types/float#bfloat16) 类型的值。 @@ -3844,7 +3778,6 @@ SELECT toBFloat16OrZero('12.3456789'); * [`toBFloat16`](#tobfloat16)。 * [`toBFloat16OrNull`](#tobfloat16ornull)。 - ## toBFloat16OrNull {#tobfloat16ornull} 将 String 类型的输入值转换为 [`BFloat16`](/sql-reference/data-types/float#bfloat16) 类型的值, @@ -3898,7 +3831,6 @@ SELECT toBFloat16OrNull('12.3456789'); * [`toBFloat16`](#tobfloat16)。 * [`toBFloat16OrZero`](#tobfloat16orzero)。 - ## toDate {#todate} 将参数转换为 [Date](../data-types/date.md) 数据类型。 @@ -4002,7 +3934,6 @@ SELECT toDate(10000000000.) `toDate` 函数还可以写成以下形式: - ```sql SELECT now() AS time, @@ -4017,7 +3948,6 @@ SELECT └─────────────────────┴───────────────┴─────────────┴─────────────────────┘ ``` - ## toDateOrZero {#todateorzero} 与 [toDate](#todate) 相同,但在收到无效参数时返回 [Date](../data-types/date.md) 的下界值。仅支持 [String](../data-types/string.md) 类型的参数。 @@ -4038,7 +3968,6 @@ SELECT toDateOrZero('2022-12-30'), toDateOrZero(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrNull {#todateornull} 与 [toDate](#todate) 相同,但在收到无效参数时返回 `NULL`。仅支持 [String](../data-types/string.md) 类型参数。 @@ -4059,7 +3988,6 @@ SELECT toDateOrNull('2022-12-30'), toDateOrNull(''); └────────────────────────────┴──────────────────┘ ``` - ## toDateOrDefault {#todateordefault} 与 [toDate](#todate) 类似,但在转换失败时会返回一个默认值。该默认值为第二个参数(如果提供),否则为 [Date](../data-types/date.md) 的下边界。 @@ -4086,7 +4014,6 @@ SELECT toDateOrDefault('2022-12-30'), toDateOrDefault('', '2023-01-01'::Date); └───────────────────────────────┴─────────────────────────────────────────────────┘ ``` - ## toDateTime {#todatetime} 将输入值转换为 [DateTime](../data-types/datetime.md)。 @@ -4128,7 +4055,6 @@ SELECT toDateTime('2022-12-30 13:44:17'), toDateTime(1685457500, 'UTC'); └───────────────────────────────────┴───────────────────────────────┘ ``` - ## toDateTimeOrZero {#todatetimeorzero} 与 [toDateTime](#todatetime) 相同,但在收到无效参数时返回 [DateTime](../data-types/datetime.md) 的下界值。仅支持 [String](../data-types/string.md) 参数。 @@ -4149,7 +4075,6 @@ SELECT toDateTimeOrZero('2022-12-30 13:44:17'), toDateTimeOrZero(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrNull {#todatetimeornull} 与 [toDateTime](#todatetime) 相同,但在传入无效参数时返回 `NULL`。仅支持 [String](../data-types/string.md) 类型参数。 @@ -4170,7 +4095,6 @@ SELECT toDateTimeOrNull('2022-12-30 13:44:17'), toDateTimeOrNull(''); └─────────────────────────────────────────┴──────────────────────┘ ``` - ## toDateTimeOrDefault {#todatetimeordefault} 类似于 [toDateTime](#todatetime),但在转换失败时会返回一个默认值:如果指定了第三个参数,则使用该参数,否则使用 [DateTime](../data-types/datetime.md) 的最小值。 @@ -4197,7 +4121,6 @@ SELECT toDateTimeOrDefault('2022-12-30 13:44:17'), toDateTimeOrDefault('', 'UTC' └────────────────────────────────────────────┴─────────────────────────────────────────────────────────────────────────┘ ``` - ## toDate32 {#todate32} 将参数转换为 [Date32](../data-types/date32.md) 数据类型。若值超出范围,`toDate32` 会返回 [Date32](../data-types/date32.md) 所支持的边界值。若参数类型为 [Date](../data-types/date.md),则会同时考虑 Date 类型的取值边界。 @@ -4254,7 +4177,6 @@ SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value); └────────────┴────────────────────────────────────────────┘ ``` - ## toDate32OrZero {#todate32orzero} 与 [toDate32](#todate32) 相同,但如果接收到无效参数,则返回 [Date32](../data-types/date32.md) 的最小值。 @@ -4275,7 +4197,6 @@ SELECT toDate32OrZero('1899-01-01'), toDate32OrZero(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrNull {#todate32ornull} 与 [toDate32](#todate32) 相同,但在收到无效参数时返回 `NULL`。 @@ -4296,7 +4217,6 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull(''); └──────────────────────────────┴────────────────────┘ ``` - ## toDate32OrDefault {#todate32ordefault} 将参数转换为 [Date32](../data-types/date32.md) 数据类型。如果值超出范围,`toDate32OrDefault` 会返回 [Date32](../data-types/date32.md) 支持的下边界值。如果参数为 [Date](../data-types/date.md) 类型,则会考虑其取值范围的边界。若接收到无效参数,则返回默认值。 @@ -4319,7 +4239,6 @@ SELECT └─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` - ## toDateTime64 {#todatetime64} 将输入值转换为 [DateTime64](../data-types/datetime64.md) 类型的值。 @@ -4390,7 +4309,6 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN └─────────────────────────┴─────────────────────────────────────────────────────────────────────┘ ``` - ## toDateTime64OrZero {#todatetime64orzero} 与 [toDateTime64](#todatetime64) 类似,此函数将输入值转换为 [DateTime64](../data-types/datetime64.md) 类型的值,但若参数无效,则返回 [DateTime64](../data-types/datetime64.md) 的最小值。 @@ -4433,7 +4351,6 @@ SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg * [toDateTime64OrNull](#todatetime64ornull)。 * [toDateTime64OrDefault](#todatetime64ordefault)。 - ## toDateTime64OrNull {#todatetime64ornull} 与 [toDateTime64](#todatetime64) 类似,此函数将输入值转换为 [DateTime64](../data-types/datetime64.md) 类型的值,但如果接收到无效参数则返回 `NULL`。 @@ -4478,7 +4395,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero)。 * [toDateTime64OrDefault](#todatetime64ordefault)。 - ## toDateTime64OrDefault {#todatetime64ordefault} 与 [toDateTime64](#todatetime64) 类似,该函数将输入值转换为 [DateTime64](../data-types/datetime64.md) 类型的值, @@ -4526,7 +4442,6 @@ SELECT * [toDateTime64OrZero](#todatetime64orzero)。 * [toDateTime64OrNull](#todatetime64ornull)。 - ## toDecimal32 {#todecimal32} 将输入值转换为 [`Decimal(9, S)`](../data-types/decimal.md) 类型且小数位数为 `S` 的值。发生错误时抛出异常。 @@ -4599,7 +4514,6 @@ type_c: Decimal(9, 3) * [`toDecimal32OrNull`](#todecimal32ornull)。 * [`toDecimal32OrDefault`](#todecimal32ordefault)。 - ## toDecimal32OrZero {#todecimal32orzero} 与 [`toDecimal32`](#todecimal32) 类似,此函数将输入值转换为类型为 [Decimal(9, S)](../data-types/decimal.md) 的值,但在发生错误时返回 `0`。 @@ -4665,7 +4579,6 @@ toTypeName(b): Decimal(9, 5) * [`toDecimal32OrNull`](#todecimal32ornull)。 * [`toDecimal32OrDefault`](#todecimal32ordefault)。 - ## toDecimal32OrNull {#todecimal32ornull} 与 [`toDecimal32`](#todecimal32) 类似,此函数将输入值转换为类型为 [Nullable(Decimal(9, S))](../data-types/decimal.md) 的值,但在出错时返回 `0`。 @@ -4731,7 +4644,6 @@ toTypeName(b): Nullable(Decimal(9, 5)) * [`toDecimal32OrZero`](#todecimal32orzero)。 * [`toDecimal32OrDefault`](#todecimal32ordefault)。 - ## toDecimal32OrDefault {#todecimal32ordefault} 与 [`toDecimal32`](#todecimal32) 类似,此函数将输入值转换为类型为 [Decimal(9, S)](../data-types/decimal.md) 的值,但在发生错误时返回默认值。 @@ -4804,7 +4716,6 @@ toTypeName(b): Decimal(9, 0) * [`toDecimal32OrZero`](#todecimal32orzero). * [`toDecimal32OrNull`](#todecimal32ornull). - ## toDecimal64 {#todecimal64} 将输入值转换为类型为 [`Decimal(18, S)`](../data-types/decimal.md)、小数位数为 `S` 的值。若发生错误则抛出异常。 @@ -4877,7 +4788,6 @@ type_c: Decimal(18, 3) * [`toDecimal64OrNull`](#todecimal64ornull)。 * [`toDecimal64OrDefault`](#todecimal64ordefault)。 - ## toDecimal64OrZero {#todecimal64orzero} 类似于 [`toDecimal64`](#todecimal64),此函数将输入值转换为 [Decimal(18, S)](../data-types/decimal.md) 类型的值,但在出错时返回 `0`。 @@ -4943,7 +4853,6 @@ toTypeName(b): Decimal(18, 18) * [`toDecimal64OrNull`](#todecimal64ornull)。 * [`toDecimal64OrDefault`](#todecimal64ordefault)。 - ## toDecimal64OrNull {#todecimal64ornull} 与 [`toDecimal64`](#todecimal64) 类似,此函数将输入值转换为 [Nullable(Decimal(18, S))](../data-types/decimal.md) 类型的值,但在发生错误时返回 `0`。 @@ -5009,7 +4918,6 @@ toTypeName(b): Nullable(Decimal(18, 18)) * [`toDecimal64OrZero`](#todecimal64orzero)。 * [`toDecimal64OrDefault`](#todecimal64ordefault)。 - ## toDecimal64OrDefault {#todecimal64ordefault} 类似于 [`toDecimal64`](#todecimal64),此函数将输入值转换为 [Decimal(18, S)](../data-types/decimal.md) 类型的值,但在发生错误时返回默认值。 @@ -5082,7 +4990,6 @@ toTypeName(b): Decimal(18, 0) * [`toDecimal64OrZero`](#todecimal64orzero)。 * [`toDecimal64OrNull`](#todecimal64ornull)。 - ## toDecimal128 {#todecimal128} 将输入值转换为类型为 [`Decimal(38, S)`](../data-types/decimal.md)、标度(scale)为 `S` 的值。出错时抛出异常。 @@ -5155,7 +5062,6 @@ type_c: Decimal(38, 3) * [`toDecimal128OrNull`](#todecimal128ornull)。 * [`toDecimal128OrDefault`](#todecimal128ordefault)。 - ## toDecimal128OrZero {#todecimal128orzero} 与 [`toDecimal128`](#todecimal128) 类似,此函数将输入值转换为 [Decimal(38, S)](../data-types/decimal.md) 类型的值,但在出错时返回 `0`。 @@ -5221,7 +5127,6 @@ toTypeName(b): Decimal(38, 38) * [`toDecimal128OrNull`](#todecimal128ornull)。 * [`toDecimal128OrDefault`](#todecimal128ordefault)。 - ## toDecimal128OrNull {#todecimal128ornull} 与 [`toDecimal128`](#todecimal128) 类似,此函数将输入值转换为 [Nullable(Decimal(38, S))](../data-types/decimal.md) 类型的值,但在发生错误时返回 `0`。 @@ -5287,7 +5192,6 @@ toTypeName(b): Nullable(Decimal(38, 38)) * [`toDecimal128OrZero`](#todecimal128orzero)。 * [`toDecimal128OrDefault`](#todecimal128ordefault)。 - ## toDecimal128OrDefault {#todecimal128ordefault} 与 [`toDecimal128`](#todecimal128) 类似,此函数将输入值转换为 [Decimal(38, S)](../data-types/decimal.md) 类型的值,但在发生错误时返回默认值。 @@ -5360,7 +5264,6 @@ toTypeName(b): Decimal(38, 0) * [`toDecimal128OrZero`](#todecimal128orzero)。 * [`toDecimal128OrNull`](#todecimal128ornull)。 - ## toDecimal256 {#todecimal256} 将输入值转换为类型为 [`Decimal(76, S)`](../data-types/decimal.md)、小数位数为 `S` 的值。发生错误时抛出异常。 @@ -5433,7 +5336,6 @@ type_c: Decimal(76, 3) * [`toDecimal256OrNull`](#todecimal256ornull)。 * [`toDecimal256OrDefault`](#todecimal256ordefault)。 - ## toDecimal256OrZero {#todecimal256orzero} 与 [`toDecimal256`](#todecimal256) 类似,此函数将输入值转换为 [Decimal(76, S)](../data-types/decimal.md) 类型的值,但在发生错误时返回 `0`。 @@ -5499,7 +5401,6 @@ toTypeName(b): Decimal(76, 76) * [`toDecimal256OrNull`](#todecimal256ornull)。 * [`toDecimal256OrDefault`](#todecimal256ordefault)。 - ## toDecimal256OrNull {#todecimal256ornull} 类似于 [`toDecimal256`](#todecimal256),此函数将输入值转换为类型为 [Nullable(Decimal(76, S))](../data-types/decimal.md) 的值,但在出错时返回 `0`。 @@ -5565,7 +5466,6 @@ toTypeName(b): Nullable(Decimal(76, 76)) * [`toDecimal256OrZero`](#todecimal256orzero)。 * [`toDecimal256OrDefault`](#todecimal256ordefault)。 - ## toDecimal256OrDefault {#todecimal256ordefault} 与 [`toDecimal256`](#todecimal256) 类似,此函数将输入值转换为 [Decimal(76, S)](../data-types/decimal.md) 类型的值,但在发生错误时返回默认值。 @@ -5638,7 +5538,6 @@ toTypeName(b): Decimal(76, 0) * [`toDecimal256OrZero`](#todecimal256orzero)。 * [`toDecimal256OrNull`](#todecimal256ornull)。 - ## toString {#tostring} 将值转换为其字符串表示。 @@ -5683,7 +5582,6 @@ LIMIT 10; └─────────────────────┴───────────────────┴─────────────────────┘ ``` - ## toFixedString {#tofixedstring} 将一个 [String](../data-types/string.md) 类型的参数转换为 [FixedString(N)](../data-types/fixedstring.md) 类型(长度固定为 N 的字符串)。\ @@ -5720,7 +5618,6 @@ SELECT toFixedString('foo', 8) AS s; └───────────────┘ ``` - ## toStringCutToZero {#tostringcuttozero} 接受一个 String 或 FixedString 参数。返回将内容在遇到的第一个零字节处截断后的 String。 @@ -5761,7 +5658,6 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut; └────────────┴───────┘ ``` - ## toDecimalString {#todecimalstring} 将数值转换为 String 类型,输出中的小数位数由用户指定。 @@ -5800,7 +5696,6 @@ SELECT toDecimalString(CAST('64.32', 'Float64'), 5); └─────────────────────────────────────────────┘ ``` - ## reinterpretAsUInt8 {#reinterpretasuint8} 通过将输入值视为 `UInt8` 类型的值来执行字节重解释操作。与 [`CAST`](#cast) 不同,此函数不会尝试保留原始数值——如果目标类型无法表示该输入值,则输出将毫无意义。 @@ -5839,7 +5734,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt16 {#reinterpretasuint16} 通过将输入值视为 `UInt16` 类型的值来执行字节重解释操作。不同于 [`CAST`](#cast),该函数不会尝试保留原始数值——如果目标类型无法表示输入值,则输出将毫无意义。 @@ -5878,7 +5772,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt32 {#reinterpretasuint32} 通过将输入值视为 UInt32 类型的值来进行字节级重解释。与 [`CAST`](#cast) 不同,此函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将毫无意义。 @@ -5917,7 +5810,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt64 {#reinterpretasuint64} 通过将输入值按字节重新解释为 `UInt64` 类型的值来执行转换。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始数值含义——如果目标类型无法表示输入类型,则输出结果将没有任何实际意义。 @@ -5956,7 +5848,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt128 {#reinterpretasuint128} 按字节重新解释输入值,将其视为 `UInt128` 类型的值。与 [`CAST`](#cast) 不同,此函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出结果将毫无意义。 @@ -5995,7 +5886,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsUInt256 {#reinterpretasuint256} 通过将输入值视为 `UInt256` 类型来执行字节重解释操作。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入类型,输出将毫无意义。 @@ -6034,7 +5924,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt8 {#reinterpretasint8} 通过将输入值视为 Int8 类型的值来执行字节重解释。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示该输入值,则输出将毫无意义。 @@ -6073,7 +5962,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt16 {#reinterpretasint16} 通过将输入值视为 `Int16` 类型的值来执行字节重解释。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将没有意义。 @@ -6112,7 +6000,6 @@ SELECT └───┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt32 {#reinterpretasint32} 通过将输入值的字节按 `Int32` 类型重新解释来执行转换。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入类型,则输出将是无意义的值。 @@ -6151,7 +6038,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt64 {#reinterpretasint64} 按字节重新解释输入值,将其视为 `Int64` 类型的值。不同于 [`CAST`](#cast),该函数不会尝试保留原始值——如果目标类型无法表示该输入值,则输出将毫无意义。 @@ -6190,7 +6076,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt128 {#reinterpretasint128} 通过将输入值视为 `Int128` 类型的值来执行字节重解释。不同于 [`CAST`](#cast),该函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将毫无意义。 @@ -6229,7 +6114,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsInt256 {#reinterpretasint256} 通过将输入值视为 `Int256` 类型的值来对字节进行重解释。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将没有意义。 @@ -6268,7 +6152,6 @@ SELECT └─────┴───────────────┴─────┴─────────────────┘ ``` - ## reinterpretAsFloat32 {#reinterpretasfloat32} 通过将输入值视为 `Float32` 类型的值来按字节重解释。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将毫无意义。 @@ -6303,7 +6186,6 @@ SELECT reinterpretAsUInt32(toFloat32(0.2)) AS x, reinterpretAsFloat32(x); └────────────┴─────────────────────────┘ ``` - ## reinterpretAsFloat64 {#reinterpretasfloat64} 通过将输入值按字节重新解释为 `Float64` 类型来进行转换。与 [`CAST`](#cast) 不同,该函数不会尝试保留原始值——如果目标类型无法表示输入值,则输出将毫无意义。 @@ -6338,7 +6220,6 @@ SELECT reinterpretAsUInt64(toFloat64(0.2)) AS x, reinterpretAsFloat64(x); └─────────────────────┴─────────────────────────┘ ``` - ## reinterpretAsDate {#reinterpretasdate} 接受一个字符串、固定字符串或数值,并按主机字节序(小端序)将其字节解释为一个数字。函数将该数字视为自 Unix 纪元起始以来的天数,并返回对应的日期。 @@ -6379,7 +6260,6 @@ SELECT reinterpretAsDate(65), reinterpretAsDate('A'); └───────────────────────┴────────────────────────┘ ``` - ## reinterpretAsDateTime {#reinterpretasdatetime} 这些函数接受一个字符串,并将其开头的字节按照主机字节序(小端序)解释为一个数字。返回一个日期时间值,将该数字视为自 Unix 纪元起经过的秒数。 @@ -6420,7 +6300,6 @@ SELECT reinterpretAsDateTime(65), reinterpretAsDateTime('A'); └───────────────────────────┴────────────────────────────┘ ``` - ## reinterpretAsString {#reinterpretasstring} 此函数接收一个数字、日期或带时间的日期,并返回一个字符串,该字符串包含以主机字节序(小端序)表示对应值的字节序列。末尾的 null 字节会被去除。例如,一个 UInt32 类型且值为 255 的数据,其对应的字符串长度为 1 个字节。 @@ -6457,7 +6336,6 @@ SELECT └────────────────────────────────────────────────────────┴───────────────────────────────────────────┘ ``` - ## reinterpretAsFixedString {#reinterpretasfixedstring} 此函数接受一个数字、日期或带时间的日期,并返回一个 `FixedString`,其内容为按主机字节序(小端序)表示相应值的字节序列。末尾的空字节会被丢弃。例如,当类型为 `UInt32` 的值为 255 时,返回的 `FixedString` 长度为 1 字节。 @@ -6494,7 +6372,6 @@ SELECT └─────────────────────────────────────────────────────────────┴────────────────────────────────────────────────┘ ``` - ## reinterpretAsUUID {#reinterpretasuuid} :::note @@ -6555,7 +6432,6 @@ SELECT uuid = uuid2; └─────────────────────┘ ``` - ## reinterpret {#reinterpret} 对 `x` 的值使用相同的源内存字节序列,并将其按目标类型重新解释。 @@ -6607,7 +6483,6 @@ SELECT reinterpret(x'3108b4403108d4403108b4403108d440', 'Array(Float32)') AS str └────────────────────────────┘ ``` - ## CAST {#cast} 将输入值转换为指定的数据类型。与 [reinterpret](#reinterpret) 函数不同,`CAST` 会尝试改用新的数据类型来表示相同的值。如果无法完成转换,则会抛出异常。 @@ -6713,7 +6588,6 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null; * [cast_keep_nullable](../../operations/settings/settings.md/#cast_keep_nullable) 设置 - ## accurateCast(x, T) {#accuratecastx-t} 将 `x` 转换为数据类型 `T`。 @@ -6748,7 +6622,6 @@ SELECT accurateCast(-1, 'UInt8') AS uint8; 代码:70. DB::Exception:从 localhost:9000 接收。DB::Exception:Int8 列中的值无法安全地转换为 UInt8 类型:处理 accurateCast(-1, 'UInt8') AS uint8 时。 ``` - ## accurateCastOrNull(x, T) {#accuratecastornullx-t} 将输入值 `x` 转换为指定的数据类型 `T`。始终返回 [Nullable](../data-types/nullable.md) 类型,如果转换结果无法用目标类型表示,则返回 [NULL](/sql-reference/syntax#null)。 @@ -6801,7 +6674,6 @@ SELECT └───────┴──────┴──────────────┘ ``` - ## accurateCastOrDefault(x, T[, default_value]) {#accuratecastordefaultx-t-default_value} 将输入值 `x` 转换为指定的数据类型 `T`。如果转换后的值无法用目标类型表示,则返回该类型的默认值;如果指定了 `default_value`,则返回 `default_value`。 @@ -6858,7 +6730,6 @@ SELECT └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ ``` - ## toInterval {#toInterval} 根据数值和时间间隔单位(例如 'second' 或 'day')构造一个 [Interval](../../sql-reference/data-types/special-data-types/interval.md) 数据类型的值。 @@ -6906,7 +6777,6 @@ SELECT toDateTime('2025-01-01 00:00:00') + toInterval(1, 'hour') └────────────────────────────────────────────────────────────┘ ``` - ## toIntervalYear {#tointervalyear} 返回一个长度为 `n` 年、数据类型为 [IntervalYear](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -6944,7 +6814,6 @@ SELECT date + interval_to_year AS result └────────────┘ ``` - ## toIntervalQuarter {#tointervalquarter} 返回一个由 `n` 个季度组成、数据类型为 [IntervalQuarter](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -6982,7 +6851,6 @@ SELECT date + interval_to_quarter AS result └────────────┘ ``` - ## toIntervalMonth {#tointervalmonth} 返回一个长度为 `n` 个月、数据类型为 [IntervalMonth](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -7020,7 +6888,6 @@ SELECT date + interval_to_month AS result └────────────┘ ``` - ## toIntervalWeek {#tointervalweek} 返回一个长度为 `n` 周、数据类型为 [IntervalWeek](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -7058,7 +6925,6 @@ SELECT date + interval_to_week AS result └────────────┘ ``` - ## toIntervalDay {#tointervalday} 返回一个由 `n` 天组成、数据类型为 [IntervalDay](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -7096,7 +6962,6 @@ SELECT date + interval_to_days AS result └────────────┘ ``` - ## toIntervalHour {#tointervalhour} 返回一个表示 `n` 小时的 [IntervalHour](../data-types/special-data-types/interval.md) 类型时间间隔。 @@ -7134,7 +6999,6 @@ SELECT date + interval_to_hours AS result └─────────────────────┘ ``` - ## toIntervalMinute {#tointervalminute} 返回一个表示 `n` 分钟的 [IntervalMinute](../data-types/special-data-types/interval.md) 类型时间间隔。 @@ -7172,7 +7036,6 @@ SELECT date + interval_to_minutes AS result └─────────────────────┘ ``` - ## toIntervalSecond {#tointervalsecond} 返回一个 `n` 秒的时间间隔,数据类型为 [IntervalSecond](../data-types/special-data-types/interval.md)。 @@ -7210,7 +7073,6 @@ SELECT date + interval_to_seconds AS result └─────────────────────┘ ``` - ## toIntervalMillisecond {#tointervalmillisecond} 返回一个时长为 `n` 毫秒、数据类型为 [IntervalMillisecond](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -7248,7 +7110,6 @@ SELECT date + interval_to_milliseconds AS result └─────────────────────────┘ ``` - ## toIntervalMicrosecond {#tointervalmicrosecond} 返回一个表示 `n` 微秒的区间,数据类型为 [IntervalMicrosecond](../data-types/special-data-types/interval.md)。 @@ -7286,7 +7147,6 @@ SELECT date + interval_to_microseconds AS result └────────────────────────────┘ ``` - ## toIntervalNanosecond {#tointervalnanosecond} 返回一个长度为 `n` 纳秒、数据类型为 [IntervalNanosecond](../data-types/special-data-types/interval.md) 的时间间隔。 @@ -7324,7 +7184,6 @@ SELECT date + interval_to_nanoseconds AS result └───────────────────────────────┘ ``` - ## parseDateTime {#parsedatetime} 根据 [MySQL 格式字符串](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format),将 [String](../data-types/string.md) 转换为 [DateTime](../data-types/datetime.md)。 @@ -7365,21 +7224,16 @@ SELECT parseDateTime('2021-01-04+23:00:00', '%Y-%m-%d+%H:%i:%s') 别名为:`TO_TIMESTAMP`。 - ## parseDateTimeOrZero {#parsedatetimeorzero} 与 [parseDateTime](#parsedatetime) 相同,唯一区别是当遇到无法处理的日期格式时,会返回零日期值。 - - ## parseDateTimeOrNull {#parsedatetimeornull} 与 [parseDateTime](#parsedatetime) 的行为相同,只是在遇到无法处理的日期格式时会返回 `NULL`。 别名:`str_to_date`。 - - ## parseDateTimeInJodaSyntax {#parsedatetimeinjodasyntax} 与 [parseDateTime](#parsedatetime) 类似,只是该函数使用的是 [Joda](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) 的格式字符串,而不是 MySQL 语法。 @@ -7420,19 +7274,14 @@ SELECT parseDateTimeInJodaSyntax('2023-02-24 14:53:31', 'yyyy-MM-dd HH:mm:ss', ' └─────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## parseDateTimeInJodaSyntaxOrZero {#parsedatetimeinjodasyntaxorzero} 与 [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) 的行为相同,区别在于当遇到无法处理的日期格式时会返回零日期。 - - ## parseDateTimeInJodaSyntaxOrNull {#parsedatetimeinjodasyntaxornull} 与 [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) 的行为相同,只是当遇到无法处理的日期格式时会返回 `NULL`。 - - ## parseDateTime64 {#parsedatetime64} 根据 [MySQL 格式字符串](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format),将 [String](../data-types/string.md) 转换为 [DateTime64](../data-types/datetime64.md)。 @@ -7454,19 +7303,14 @@ parseDateTime64(str[, format[, timezone]]) 返回一个根据 MySQL 风格的格式字符串从输入字符串解析得到的 [DateTime64](../data-types/datetime64.md) 值。 返回值的精度为 6。 - ## parseDateTime64OrZero {#parsedatetime64orzero} 与 [parseDateTime64](#parsedatetime64) 的行为相同,不同之处在于当遇到无法处理的日期格式时,它会返回零日期。 - - ## parseDateTime64OrNull {#parsedatetime64ornull} 与 [parseDateTime64](#parsedatetime64) 的行为相同,区别在于当遇到无法处理的日期格式时,它会返回 `NULL`。 - - ## parseDateTime64InJodaSyntax {#parsedatetime64injodasyntax} 根据 [Joda 格式字符串](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html),将 [String](../data-types/string.md) 转换为 [DateTime64](../data-types/datetime64.md)。 @@ -7488,19 +7332,14 @@ parseDateTime64InJodaSyntax(str[, format[, timezone]]) 返回一个根据 Joda 风格的格式字符串从输入字符串解析得到的 [DateTime64](../data-types/datetime64.md) 值。\ 返回值的精度等于格式字符串中 `S` 占位符的数量(最多为 6)。 - ## parseDateTime64InJodaSyntaxOrZero {#parsedatetime64injodasyntaxorzero} 与 [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax) 的行为相同,不同之处在于当遇到无法处理的日期格式时,它会返回零日期值。 - - ## parseDateTime64InJodaSyntaxOrNull {#parsedatetime64injodasyntaxornull} 与 [parseDateTime64InJodaSyntax](#parsedatetime64injodasyntax) 相同,唯一不同是当遇到无法处理的日期格式时,它会返回 `NULL`。 - - ## parseDateTimeBestEffort {#parsedatetimebesteffort} ## parseDateTime32BestEffort {#parsedatetime32besteffort} @@ -7606,7 +7445,6 @@ SELECT toYear(now()) AS year, parseDateTimeBestEffort('10 20:19'); 结果: - ```response ┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐ │ 2023 │ 2023-01-10 20:19:00 │ @@ -7643,39 +7481,28 @@ FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around); * [由 @xkcd 发布的 ISO 8601 公告](https://xkcd.com/1179/) * [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2) - ## parseDateTimeBestEffortUS {#parsedatetimebesteffortus} 此函数在处理 ISO 日期格式(例如 `YYYY-MM-DD hh:mm:ss`)以及其他可以无歧义提取出月份和日期组件的日期格式(例如 `YYYYMMDDhhmmss`、`YYYY-MM`、`DD hh` 或 `YYYY-MM-DD hh:mm:ss ±h:mm`)时,其行为与 [parseDateTimeBestEffort](#parsedatetimebesteffort) 相同。如果无法无歧义地提取月份和日期组件(例如 `MM/DD/YYYY`、`MM-DD-YYYY` 或 `MM-DD-YY`),则本函数会优先按美国日期格式进行解析,而不是按 `DD/MM/YYYY`、`DD-MM-YYYY` 或 `DD-MM-YY` 解析。作为上述后一种情况的一个例外,如果“月份”大于 12 且小于等于 31,则本函数会退回到 [parseDateTimeBestEffort](#parsedatetimebesteffort) 的行为,例如 `15/08/2020` 会被解析为 `2020-08-15`。 - - ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} ## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull} 与 [parseDateTimeBestEffort](#parsedatetimebesteffort) 相同,区别在于当遇到无法处理的日期格式时返回 `NULL`。 - - ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} ## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero} 与 [parseDateTimeBestEffort](#parsedatetimebesteffort) 相同,但在遇到无法解析的日期格式时,会返回 0 日期或 0 日期时间。 - - ## parseDateTimeBestEffortUSOrNull {#parsedatetimebesteffortusornull} 与 [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) 函数相同,唯一不同在于当遇到无法解析的日期格式时返回 `NULL`。 - - ## parseDateTimeBestEffortUSOrZero {#parsedatetimebesteffortusorzero} 与 [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) 函数相同,区别在于,当遇到无法处理的日期格式时,它会返回零日期(`1970-01-01`)或带时间的零日期(`1970-01-01 00:00:00`)。 - - ## parseDateTime64BestEffort {#parsedatetime64besteffort} 与 [parseDateTimeBestEffort](#parsedatetimebesteffort) 函数相同,但额外支持解析毫秒和微秒,并返回 [DateTime](/sql-reference/data-types/datetime) 数据类型。 @@ -7722,37 +7549,26 @@ FORMAT PrettyCompactMonoBlock; └────────────────────────────┴────────────────────────────────┘ ``` - ## parseDateTime64BestEffortUS {#parsedatetime64besteffortus} 与 [parseDateTime64BestEffort](#parsedatetime64besteffort) 相同,但在存在歧义时,该函数会优先按美国日期格式(`MM/DD/YYYY` 等)进行解析。 - - ## parseDateTime64BestEffortOrNull {#parsedatetime64besteffortornull} 与 [parseDateTime64BestEffort](#parsedatetime64besteffort) 相同,只是当遇到无法处理的日期格式时会返回 `NULL`。 - - ## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero} 与 [parseDateTime64BestEffort](#parsedatetime64besteffort) 相同,不同之处在于当遇到无法处理的日期格式时,会返回零日期或零日期时间。 - - ## parseDateTime64BestEffortUSOrNull {#parsedatetime64besteffortusornull} 与 [parseDateTime64BestEffort](#parsedatetime64besteffort) 相同,不同之处在于,当存在歧义时,此函数优先采用美国日期格式(`MM/DD/YYYY` 等),并在遇到无法处理的日期格式时返回 `NULL`。 - - ## parseDateTime64BestEffortUSOrZero {#parsedatetime64besteffortusorzero} 与 [parseDateTime64BestEffort](#parsedatetime64besteffort) 基本相同,只是在存在歧义时,此函数优先使用美国日期格式(`MM/DD/YYYY` 等),并在遇到无法处理的日期格式时返回零日期或零日期时间值。 - - ## toLowCardinality {#tolowcardinality} 将输入参数转换为同一数据类型的 [LowCardinality](../data-types/lowcardinality.md) 版本。 @@ -7789,7 +7605,6 @@ SELECT toLowCardinality('1'); └───────────────────────┘ ``` - ## toUnixTimestamp {#toUnixTimestamp} 将 `String`、`Date` 或 `DateTime` 转换为 Unix 时间戳(自 `1970-01-01 00:00:00 UTC` 起的秒数),返回 `UInt32` 类型的值。 @@ -7837,7 +7652,6 @@ from_date: 1509840000 from_date32: 1509840000 ``` - ## toUnixTimestamp64Second {#tounixtimestamp64second} 将 `DateTime64` 转换为具有固定秒级精度的 `Int64` 值。输入值会根据其自身精度被按比例缩放。 @@ -7877,7 +7691,6 @@ SELECT toUnixTimestamp64Second(dt64); └───────────────────────────────┘ ``` - ## toUnixTimestamp64Milli {#tounixtimestamp64milli} 将 `DateTime64` 转换为具有固定毫秒精度的 `Int64` 整数值。输入值会根据其小数精度被相应放大或缩小。 @@ -7917,7 +7730,6 @@ SELECT toUnixTimestamp64Milli(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Micro {#tounixtimestamp64micro} 将 `DateTime64` 转换为具有固定微秒精度的 `Int64` 值。输入值会根据其精度按比例放大或缩小。 @@ -7957,7 +7769,6 @@ SELECT toUnixTimestamp64Micro(dt64); └──────────────────────────────┘ ``` - ## toUnixTimestamp64Nano {#tounixtimestamp64nano} 将 `DateTime64` 转换为具有固定纳秒级精度的 `Int64` 值。输入值会根据其精度按比例放大或缩小。 @@ -7997,7 +7808,6 @@ SELECT toUnixTimestamp64Nano(dt64); └─────────────────────────────┘ ``` - ## fromUnixTimestamp64Second {#fromunixtimestamp64second} 将 `Int64` 转换为具有固定秒级精度、可选时区的 `DateTime64` 值。输入值会根据其当前精度被相应地放大或缩小。 @@ -8040,7 +7850,6 @@ SELECT └─────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Milli {#fromunixtimestamp64milli} 将 `Int64` 转换为具有固定毫秒精度、可选时区的 `DateTime64` 值。输入值会根据其精度被相应地放大或缩小。 @@ -8083,7 +7892,6 @@ SELECT └─────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Micro {#fromunixtimestamp64micro} 将 `Int64` 转换为具有固定微秒精度的 `DateTime64` 值,并可选指定时区。输入值会根据其精度被相应地按比例放大或缩小。 @@ -8126,7 +7934,6 @@ SELECT └────────────────────────────┴──────────────────────┘ ``` - ## fromUnixTimestamp64Nano {#fromunixtimestamp64nano} 将 `Int64` 转换为具有固定纳秒精度并可选时区的 `DateTime64` 值。输入值会根据其精度按比例放大或缩小。 @@ -8169,7 +7976,6 @@ SELECT └───────────────────────────────┴──────────────────────┘ ``` - ## formatRow {#formatrow} 按指定格式将任意表达式转换为字符串。 @@ -8241,7 +8047,6 @@ SETTINGS format_custom_result_before_delimiter='\n', format_custom_resul 注意:此函数仅支持行式格式。 - ## formatRowNoNewline {#formatrownonewline} 通过指定的格式将任意表达式转换为字符串。与 `formatRow` 的区别在于,该函数会在存在最后一个 `\n` 时将其去除。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md index 6a7b71f8825..ed7145be943 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/udf.md @@ -10,7 +10,6 @@ import PrivatePreviewBadge from '@theme/badges/PrivatePreviewBadge'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - # 用户定义函数(UDF) {#executable-user-defined-functions} @@ -48,12 +47,8 @@ ClickHouse 可以调用任意外部可执行程序或脚本来处理数据。 命令必须从 `STDIN` 读取参数,并将结果输出到 `STDOUT`。命令必须以迭代方式处理参数。也就是说,在处理完一块参数后,它必须等待下一块参数。 - - ## 可执行用户定义函数 {#executable-user-defined-functions} - - ## 示例 {#examples} ### 使用内联脚本的 UDF {#udf-inline} @@ -193,7 +188,6 @@ SELECT test_function_python(toUInt64(2)); 使用命名参数和 [JSONEachRow](/interfaces/formats/JSONEachRow) 格式,通过 XML 或 YAML 配置创建 `test_function_sum_json`。 - 文件 `test_function.xml`(在默认路径配置下位于 `/etc/clickhouse-server/test_function.xml`)。 @@ -332,7 +326,6 @@ if __name__ == "__main__": SELECT test_function_parameter_python(1)(2); ``` - ```text title="Result" ┌─test_function_parameter_python(1)(2)─┐ │ 参数 1 值 2 │ @@ -412,15 +405,12 @@ SELECT test_shell(number) FROM numbers(10); └────────────────────┘ ``` - ## 错误处理 {#error-handling} 如果数据无效,某些函数可能会抛出异常。 在这种情况下,查询会被取消,并向客户端返回错误信息。 对于分布式处理,当某个服务器上发生异常时,其他服务器也会尝试中止该查询。 - - ## 参数表达式的求值 {#evaluation-of-argument-expressions} 在几乎所有编程语言中,对于某些运算符,其某个参数可能不会被求值。 @@ -428,8 +418,6 @@ SELECT test_shell(number) FROM numbers(10); 在 ClickHouse 中,函数(运算符)的参数始终会被求值。 这是因为 ClickHouse 一次会对成块的列数据进行求值,而不是分别对每一行单独计算。 - - ## 分布式查询处理中的函数执行 {#performing-functions-for-distributed-query-processing} 对于分布式查询处理,会尽可能多地在远程服务器上执行查询处理阶段,其余阶段(合并中间结果及其后的所有步骤)在请求方服务器上执行。 @@ -446,13 +434,9 @@ SELECT test_shell(number) FROM numbers(10); 如果查询中的某个函数默认在请求方服务器上执行,但您需要在远程服务器上执行它,可以将其封装在 `any` 聚合函数中,或者将其加入到 `GROUP BY` 的键中。 - - ## SQL 用户自定义函数 {#sql-user-defined-functions} 可以使用 [CREATE FUNCTION](../statements/create/function.md) 语句,基于 lambda 表达式创建自定义函数。要删除这些函数,请使用 [DROP FUNCTION](../statements/drop.md#drop-function) 语句。 - - ## 相关内容 {#related-content} - [ClickHouse Cloud 中的用户自定义函数](https://clickhouse.com/blog/user-defined-functions-clickhouse-udfs) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md index 59ab27c374f..31f3e1e1e6f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/functions/ulid-functions.md @@ -6,8 +6,6 @@ title: '用于处理 ULID 的函数' doc_type: 'reference' --- - - # 用于处理 ULID 的函数 {#functions-for-working-with-ulids} :::note @@ -20,7 +18,6 @@ doc_type: 'reference' 请参见:https://github.com/ClickHouse/clickhouse-docs/blob/main/contribute/autogenerated-documentation-from-source.md */ } - {/*AUTOGENERATED_START*/ } ## ULIDStringToDateTime {#ULIDStringToDateTime} @@ -58,7 +55,6 @@ SELECT ULIDStringToDateTime('01GNB2S2FGN2P93QPXDNB4EN2R') └────────────────────────────────────────────────────┘ ``` - ## generateULID {#generateULID} 自 v23.2 起引入。 @@ -107,7 +103,6 @@ SELECT generateULID(1), generateULID(2) {/*AUTOGENERATED_END*/ } - ## 另请参阅 {#see-also} - [UUID](../../sql-reference/functions/uuid-functions.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md index 1d5424db198..1b6b9b1c964 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/operators/index.md @@ -128,7 +128,6 @@ SELECT └──────────────────────────┴──────────────────────────┘ ``` - ## 用于处理数据集的运算符 {#operators-for-working-with-data-sets} 请参阅 [IN 运算符](../../sql-reference/operators/in.md) 和 [EXISTS](../../sql-reference/operators/exists.md) 运算符。 @@ -203,7 +202,6 @@ SELECT number AS a FROM numbers(10) WHERE a > ANY (SELECT number FROM numbers(3, └───┘ ``` - ## 日期和时间运算符 {#operators-for-working-with-dates-and-times} ### EXTRACT {#extract} @@ -270,7 +268,6 @@ FROM test.Orders; 更多示例请参见[测试用例](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql)。 - ### INTERVAL {#interval} 创建一个 [Interval](../../sql-reference/data-types/special-data-types/interval.md) 类型的值,用于与 [Date](../../sql-reference/data-types/date.md) 和 [DateTime](../../sql-reference/data-types/datetime.md) 类型值进行算术运算。 @@ -345,7 +342,6 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul') AS time, time + 60 * 6 * [Interval](../../sql-reference/data-types/special-data-types/interval.md) 数据类型 * [toInterval](/sql-reference/functions/type-conversion-functions#tointervalyear) 类型转换函数 - ## 逻辑 AND 运算符 {#logical-and-operator} 语法 `SELECT a AND b` — 通过函数 [and](/sql-reference/functions/logical-functions#and) 计算 `a` 与 `b` 的逻辑与结果。 @@ -382,7 +378,6 @@ END `transform` 函数不支持 `NULL`。 - ## 连接运算符 {#concatenation-operator} `s1 || s2` – 等同于 `concat(s1, s2) function.` @@ -433,7 +428,6 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` - ### IS NOT NULL {#is_not_null} * 对于 [Nullable](../../sql-reference/data-types/nullable.md) 类型的值,`IS NOT NULL` 运算符返回: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md index cf46467cca1..44249b3c575 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/column.md @@ -32,7 +32,6 @@ ALTER [TEMPORARY] TABLE [db].name [ON CLUSTER cluster] ADD|DROP|RENAME|CLEAR|COM * [MATERIALIZE COLUMN](#materialize-column) — 在缺少该列的数据部分中物化该列。 这些操作的详细说明见下文。 - ## ADD COLUMN {#add-column} ```sql @@ -69,7 +68,6 @@ ToDrop UInt32 Added3 UInt32 ``` - ## 删除列(DROP COLUMN) {#drop-column} ```sql @@ -90,7 +88,6 @@ DROP COLUMN [IF EXISTS] name ALTER TABLE visits DROP COLUMN browser ``` - ## 重命名列 {#rename-column} ```sql @@ -107,7 +104,6 @@ ALTER TABLE visits DROP COLUMN browser ALTER TABLE visits RENAME COLUMN webBrowser TO browser ``` - ## CLEAR COLUMN(清空列) {#clear-column} ```sql @@ -124,7 +120,6 @@ CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() ``` - ## 备注列 {#comment-column} ```sql @@ -143,7 +138,6 @@ COMMENT COLUMN [IF EXISTS] name '文本注释' ALTER TABLE visits COMMENT COLUMN browser '此列显示访问站点所使用的浏览器。' ``` - ## 修改列 {#modify-column} ```sql @@ -223,7 +217,6 @@ DESCRIBE users; 在将 Nullable 列更改为 Non-Nullable 时请务必小心。请确保其中没有任何 NULL 值,否则在读取该列时会导致问题。在这种情况下,可以通过 Kill 该 mutation,并将该列恢复为 Nullable 类型来规避问题。 ::: - ## MODIFY COLUMN REMOVE {#modify-column-remove} 移除某个列属性:`DEFAULT`、`ALIAS`、`MATERIALIZED`、`CODEC`、`COMMENT`、`TTL`、`SETTINGS`。 @@ -246,7 +239,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; * [REMOVE TTL](ttl.md) - ## MODIFY COLUMN MODIFY SETTING {#modify-column-modify-setting} 修改列的设置。 @@ -265,7 +257,6 @@ ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING name=value,...; ALTER TABLE table_name MODIFY COLUMN column_name MODIFY SETTING max_compress_block_size = 1048576; ``` - ## MODIFY COLUMN RESET SETTING {#modify-column-reset-setting} 重置列的设置,同时从该表的 CREATE 查询中的列表达式里移除该设置的声明。 @@ -284,7 +275,6 @@ ALTER TABLE 表名 MODIFY COLUMN 列名 RESET SETTING 名称,...; ALTER TABLE 表名 MODIFY COLUMN 列名 RESET SETTING max_compress_block_size; ``` - ## MATERIALIZE COLUMN {#materialize-column} 对具有 `DEFAULT` 或 `MATERIALIZED` 值表达式的列进行物化(materialize)。当使用 `ALTER TABLE table_name ADD COLUMN column_name MATERIALIZED` 添加物化列时,现有行中缺少物化值的部分不会被自动填充。在添加或更新 `DEFAULT` 或 `MATERIALIZED` 表达式之后(这只会更新元数据而不会更改现有数据),可以使用 `MATERIALIZE COLUMN` 语句重写已有列数据。请注意,对排序键中的列进行物化是无效操作,因为这可能破坏排序顺序。 @@ -345,7 +335,6 @@ SELECT groupArray(x), groupArray(s) FROM tmp; * [MATERIALIZED](/sql-reference/statements/create/view#materialized-view). - ## 限制 {#limitations} `ALTER` 查询允许在嵌套数据结构中创建和删除单个元素(列),但不能整体创建或删除整个嵌套数据结构。要添加一个嵌套数据结构,可以添加名称类似于 `name.nested_name` 且类型为 `Array(T)` 的列。一个嵌套数据结构等价于多个数组列,这些列在点号之前具有相同的前缀名称。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md index 8e296906b74..4a80076b467 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/comment.md @@ -9,21 +9,16 @@ keywords: ['ALTER TABLE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER TABLE ... MODIFY COMMENT {#alter-table-modify-comment} 添加、修改或删除表注释(无论之前是否设置过)。表注释的更改会同时反映在 [`system.tables`](../../../operations/system-tables/tables.md) 和 `SHOW CREATE TABLE` 查询的结果中。 - - ## 语法 {#syntax} ```sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' ``` - ## 示例 {#examples} 要创建包含注释的表: @@ -79,7 +74,6 @@ WHERE database = currentDatabase() AND name = 'table_with_comment'; └──────┘ ``` - ## 注意事项 {#caveats} 对于 Replicated 表,不同副本上的注释(comment)可以不同。 @@ -87,8 +81,6 @@ WHERE database = currentDatabase() AND name = 'table_with_comment'; 该功能自 23.9 版本起可用。在更早的 ClickHouse 版本中不可用。 - - ## 相关内容 {#related-content} - [`COMMENT`](/sql-reference/statements/create/table#comment-clause) 子句 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md index 4309cc26b96..20af0b268f8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/database-comment.md @@ -8,21 +8,16 @@ keywords: ['ALTER DATABASE', 'MODIFY COMMENT'] doc_type: 'reference' --- - - # ALTER DATABASE ... MODIFY COMMENT {#alter-database-modify-comment} 添加、修改或删除数据库注释(无论之前是否已设置)。注释的变更会同时体现在 [`system.databases`](/operations/system-tables/databases.md) 表和 `SHOW CREATE DATABASE` 查询中。 - - ## 语法 {#syntax} ```sql ALTER DATABASE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' ``` - ## 示例 {#examples} 要创建带注释的 `DATABASE`: @@ -73,7 +68,6 @@ WHERE name = 'database_with_comment'; └──────┘ ``` - ## 相关内容 {#related-content} - [`COMMENT`](/sql-reference/statements/create/table#comment-clause) 子句 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md index 154c62b2eca..adb3e90db87 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/delete.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... DELETE 语句' doc_type: 'reference' --- - - # ALTER TABLE ... DELETE 语句 {#alter-table-delete-statement} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr * [ALTER 查询的同步性](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * [mutations_sync](/operations/settings/settings.md/#mutations_sync) 设置 - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理更新和删除](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md index a11e595c458..9e4a9f7af93 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/index.md @@ -7,8 +7,6 @@ title: 'ALTER' doc_type: 'reference' --- - - # ALTER {#alter} 大多数 `ALTER TABLE` 查询用于修改表设置或数据: @@ -51,8 +49,6 @@ doc_type: 'reference' | [ALTER TABLE ... MODIFY COMMENT](/sql-reference/statements/alter/comment.md) | 无论之前是否设置过注释,都可以向表添加、修改或删除注释。 | | [ALTER NAMED COLLECTION](/sql-reference/statements/alter/named-collection.md) | 修改[命名集合](/operations/named-collections.md)。 | - - ## 变更 {#mutations} 用于操作表数据的 `ALTER` 查询是通过一种称为“变更(mutations)”的机制实现的,最典型的是 [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete.md) 和 [ALTER TABLE ... UPDATE](/sql-reference/statements/alter/update.md)。它们是类似于 [MergeTree](/engines/table-engines/mergetree-family/index.md) 表中合并操作的异步后台进程,用来生成新的“已变更”数据分片版本。 @@ -66,8 +62,6 @@ doc_type: 'reference' 已完成变更的条目不会立即被删除(保留条目的数量由存储引擎参数 `finished_mutations_to_keep` 决定)。更旧的变更条目会被删除。 - - ## ALTER 查询的同步性 {#synchronicity-of-alter-queries} 对于非复制表,所有 `ALTER` 查询都会以同步方式执行。对于复制表,查询只是向 `ZooKeeper` 中添加相应操作的指令,而这些操作本身会尽快执行。不过,查询可以等待这些操作在所有副本上完成。 @@ -82,8 +76,6 @@ doc_type: 'reference' 对于所有 `ALTER` 查询,如果 `alter_sync = 2`,并且某些副本处于非活动状态的时间超过 `replication_wait_for_inactive_replica_timeout` 设置中指定的时长,则会抛出 `UNFINISHED` 异常。 ::: - - ## 相关内容 {#related-content} - 博客:[在 ClickHouse 中处理更新和删除](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md index 7c8f118c033..dc02b7f3d8a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/projection.md @@ -21,8 +21,6 @@ doc_type: 'reference' 你可以在[此页面](/guides/best-practices/sparse-primary-indexes.md/#option-3-projections)中查看更多关于投影内部工作机制的技术细节。 - - ## 未使用主键的过滤示例 {#example-filtering-without-using-primary-keys} 创建表: @@ -79,7 +77,6 @@ LIMIT 2 SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## 预聚合查询示例 {#example-pre-aggregation-query} 创建包含 Projection 的表: @@ -157,7 +154,6 @@ GROUP BY user_agent SELECT query, projections FROM system.query_log WHERE query_id='' ``` - ## 带有 `_part_offset` 字段的常规投影 {#normal-projection-with-part-offset-field} 创建一个带有常规投影并使用 `_part_offset` 字段的表: @@ -202,31 +198,22 @@ WHERE _part_starting_offset + _part_offset IN ( SETTINGS enable_shared_storage_snapshot_in_query = 1 ``` - # 投影操作 {#manipulating-projections} 可以执行以下关于[投影](/engines/table-engines/mergetree-family/mergetree.md/#projections)的操作: - - ## ADD PROJECTION {#add-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - 在表的元数据中添加投影定义。 - - ## DROP PROJECTION {#drop-projection} `ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - 从表的元数据中移除投影描述,并从磁盘中删除投影文件。以[变更](/sql-reference/statements/alter/index.md#mutations)的形式实现。 - - ## MATERIALIZE PROJECTION {#materialize-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` —— 此查询会在分区 `partition_name` 中重建投影 `name`。其实现方式为一次[变更操作](/sql-reference/statements/alter/index.md#mutations)。 - - ## CLEAR PROJECTION {#clear-projection} `ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - 在不删除其定义的情况下,从磁盘中删除投影文件。其实现方式为一种[变更](/sql-reference/statements/alter/index.md#mutations)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md index 68060d01198..8df6fba648a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/skipping-index.md @@ -8,32 +8,22 @@ toc_hidden_folder: true doc_type: 'reference' --- - - # 对数据跳过索引的操作 {#manipulating-data-skipping-indices} 可以执行以下操作: - - ## ADD INDEX {#add-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - 向表的元数据中添加索引描述。 - - ## DROP INDEX {#drop-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - 从表的元数据中移除索引定义,并从磁盘上删除索引文件。该操作作为一次[mutation](/sql-reference/statements/alter/index.md#mutations)来实现。 - - ## MATERIALIZE INDEX {#materialize-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - 为指定的 `partition_name` 重建名为 `name` 的二级索引。该操作作为一次[变更](/sql-reference/statements/alter/index.md#mutations)实现。如果省略 `IN PARTITION` 子句,则会为整张表的数据重建索引。 - - ## CLEAR INDEX {#clear-index} `ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - 从磁盘中删除二级索引文件,但不会移除索引定义。该操作实现为一次[mutation](/sql-reference/statements/alter/index.md#mutations)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md index 71b97e82039..6cc2b85d037 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/update.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... UPDATE 语句' doc_type: 'reference' --- - - # ALTER TABLE ... UPDATE 语句 {#alter-table-update-statements} ```sql @@ -33,7 +31,6 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN P * [ALTER 查询的同步方式](/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) * [mutations_sync](/operations/settings/settings.md/#mutations_sync) 设置 - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理更新和删除](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md index a8e8946c9f6..91ac39ecd75 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/user.md @@ -30,7 +30,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] 要使用 `ALTER USER` 语句,您必须具有 [ALTER USER](../../../sql-reference/statements/grant.md#access-management) 权限。 - ## GRANTEES 子句 {#grantees-clause} 指定允许从该用户处接收[权限](../../../sql-reference/statements/grant.md#privileges)的用户或角色,前提是该用户自身也已通过带有 [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax) 的授权获得所有必需的访问权限。`GRANTEES` 子句的选项: @@ -42,8 +41,6 @@ ALTER USER [IF EXISTS] name1 [RENAME TO new_name |, name2 [,...]] 您可以使用 `EXCEPT` 表达式排除任意用户或角色。例如,`ALTER USER user1 GRANTEES ANY EXCEPT user2`。这意味着如果 `user1` 拥有一些通过 `GRANT OPTION` 授予的权限,则它可以将这些权限授予除 `user2` 之外的任意用户或角色。 - - ## 示例 {#examples} 将已分配的角色设为默认角色: @@ -104,7 +101,6 @@ ALTER USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by ' ALTER USER user1 RESET AUTHENTICATION METHODS TO NEW ``` - ## VALID UNTIL 子句 {#valid-until-clause} 用于为身份验证方法指定到期日期以及(可选的)时间。它接受一个字符串作为参数。建议使用 `YYYY-MM-DD [hh:mm:ss] [timezone]` 格式表示日期时间。默认情况下,此参数为 `'infinity'`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md index 84c339d6ab0..44fbf0c4fd7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/alter/view.md @@ -7,8 +7,6 @@ title: 'ALTER TABLE ... MODIFY QUERY 语句' doc_type: 'reference' --- - - # ALTER TABLE ... MODIFY QUERY 语句 {#alter-table-modify-query-statement} 您可以使用 `ALTER TABLE ... MODIFY QUERY` 语句修改创建 [物化视图](/sql-reference/statements/create/view#materialized-view) 时指定的 `SELECT` 查询,而不会中断数据摄取过程。 @@ -92,7 +90,6 @@ ALTER TABLE mv MODIFY QUERY GROUP BY ts, event_type, browser; ``` - INSERT INTO events SELECT Date '2020-01-03' + interval number * 900 second, ['imp', 'click'][number%2+1], @@ -172,7 +169,6 @@ browser 该应用程序功能非常受限,因为只能修改 `SELECT` 子句,无法添加新的列。 ``` - ```sql CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; @@ -204,7 +200,6 @@ SELECT * FROM mv; └───┘ ``` - ## ALTER TABLE ... MODIFY REFRESH 语句 {#alter-table--modify-refresh-statement} `ALTER TABLE ... MODIFY REFRESH` 语句用于修改 [可刷新物化视图](../create/view.md#refreshable-materialized-view) 的刷新参数。参见[更改刷新参数](../create/view.md#changing-refresh-parameters)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md index e9a9643233d..e3f3d8e51e5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-grant.md @@ -9,8 +9,6 @@ doc_type: 'reference' `CHECK GRANT` 查询用于检查当前用户或角色是否已被授予特定权限。 - - ## 语法 {#syntax} 查询的基本语法如下: @@ -21,7 +19,6 @@ CHECK GRANT privilege[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|t * `privilege` — 权限的类型。 - ## 示例 {#examples} 如果用户曾被授予该权限,`check_grant` 的返回值为 `1`。否则,`check_grant` 的返回值为 `0`。 @@ -50,6 +47,5 @@ CHECK GRANT SELECT(col2) ON table_2; └────────┘ ``` - ## 通配符 {#wildcard} 在授予权限时,可以使用星号(`*`)来代替表名或数据库名。有关通配符规则,请参阅 [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md index 763ce2da947..b8c91d788b4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/check-table.md @@ -17,8 +17,6 @@ ClickHouse 中的 `CHECK TABLE` 查询用于对特定表或其分区执行校验 该查询不会提升系统性能,如果未完全理解其影响,请不要执行该查询。 ::: - - ## 语法 {#syntax} 查询的基本语法如下: @@ -55,7 +53,6 @@ CHECK TABLE table_name [PARTITION partition_expression | PART part_name] [FORMAT `*Log` 家族中的引擎在发生故障时不提供自动数据恢复。使用 `CHECK TABLE` 查询来及时跟踪数据丢失情况。 - ## 示例 {#examples} 默认情况下,`CHECK TABLE` 查询会显示表的整体检查状态: @@ -152,7 +149,6 @@ FORMAT PrettyCompactMonoBlock SETTINGS check_query_single_value_result = 0 ``` - ```text ┌─database─┬─table────┬─part_path───┬─is_passed─┬─message─┐ │ default │ t2 │ all_1_95_3 │ 1 │ │ @@ -168,7 +164,6 @@ SETTINGS check_query_single_value_result = 0 └──────────┴──────────┴─────────────┴───────────┴─────────┘ ``` - ## 如果数据已损坏 {#if-the-data-is-corrupted} 如果表已损坏,可以将未损坏的数据复制到另一张表。操作步骤如下: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md index 324871ff5a5..770f14055ed 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/role.md @@ -17,7 +17,6 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...] ``` - ## 管理角色 {#managing-roles} 一个用户可以被分配多个角色。用户可以通过 [SET ROLE](../../../sql-reference/statements/set-role.md) 语句,以任意组合应用其已分配的角色。最终的权限范围是所有已应用角色所拥有的全部权限的并集。如果某个用户在其用户账号上被直接授予了权限,这些权限也会与通过角色授予的权限合并。 @@ -28,8 +27,6 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus 要删除角色,请使用 [DROP ROLE](/sql-reference/statements/drop#drop-role) 语句。被删除的角色会自动从所有被分配了该角色的用户和角色中移除。 - - ## 示例 {#examples} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md index ad1886eb1fb..9bd725de164 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/row-policy.md @@ -24,13 +24,10 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` - ## USING 子句 {#using-clause} 允许指定条件来过滤行。只有当对某行计算该条件的结果为非零值时,用户才能看到该行。 - - ## TO 子句 {#to-clause} 在 `TO` 部分中,可以提供该策略适用的用户和角色列表。例如:`CREATE ROW POLICY ... TO accountant, john@localhost`。 @@ -49,8 +46,6 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` ::: - - ## AS 子句 {#as-clause} 允许在同一张表上针对同一用户同时启用多个策略。因此,我们需要一种方法将多个策略中的条件组合起来。 @@ -96,13 +91,10 @@ CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio 使用户 `peter` 只能在 `b=1` 且 `c=2` 时查看 table1 的行,尽管 mydb 中的其他任何表对该用户只会应用 `b=1` 的策略。 - ## ON CLUSTER 子句 {#on-cluster-clause} 允许在集群中创建行策略,参见 [Distributed DDL](../../../sql-reference/distributed-ddl.md)。 - - ## 示例 {#examples} `CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md index 52182e0d9bd..e899f0fb320 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/table.md @@ -16,7 +16,6 @@ import TabItem from '@theme/TabItem'; 默认情况下,表只会在当前服务器上创建。分布式 DDL 查询是通过 `ON CLUSTER` 子句实现的,该子句[单独说明](../../../sql-reference/distributed-ddl.md)。 - ## 语法形式 {#syntax-forms} ### 使用显式 Schema {#with-explicit-schema} @@ -101,7 +100,6 @@ SELECT x, toTypeName(x) FROM t1; └───┴───────────────┘ ``` - ## NULL 或 NOT NULL 修饰符 {#null-or-not-null-modifiers} 在列定义中,可以在数据类型之后使用 `NULL` 和 `NOT NULL` 修饰符,用于指定该列是否可以为 [Nullable](/sql-reference/data-types/nullable) 类型。 @@ -110,8 +108,6 @@ SELECT x, toTypeName(x) FROM t1; 另请参阅 [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable) 设置。 - - ## 默认值 {#default_values} 列定义可以以 `DEFAULT expr`、`MATERIALIZED expr` 或 `ALIAS expr` 的形式指定默认值表达式。示例:`URLDomain String DEFAULT domain(URL)`。 @@ -218,7 +214,6 @@ FROM test FORMAT Vertical; ``` - 第 1 行: ────── id: 1 @@ -264,7 +259,6 @@ SELECT * FROM test SETTINGS asterisk_include_alias_columns=1; └────┴────────────┴──────────┘ ```` - ## 主键 {#primary-key} 在创建表时,可以定义[主键](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries)。主键可以通过两种方式指定: @@ -295,7 +289,6 @@ PRIMARY KEY(expr1[, expr2,...]); 无法在同一个查询中同时使用这两种方式。 ::: - ## 约束 {#constraints} 除了列描述之外,还可以定义约束条件: @@ -340,13 +333,10 @@ ORDER BY (name_len, name); `ASSUME CONSTRAINT` **并不会强制约束成立**,它只是告知优化器该约束被认为是成立的。如果该约束实际上并不成立,则查询结果可能会不正确。因此,仅当可以确定约束确实成立时,才应使用 `ASSUME CONSTRAINT`。 - ## TTL 表达式 {#ttl-expression} 定义值的存储时间。只能为 MergeTree 系列的表指定。有关详细说明,请参阅[列和表的 TTL](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)。 - - ## 列压缩编解码器 {#column_compression_codec} 默认情况下,ClickHouse 在自托管版本中使用 `lz4` 压缩,在 ClickHouse Cloud 中使用 `zstd` 压缩。 @@ -428,7 +418,6 @@ ClickHouse 支持通用 codec 和专用 codec。 `DEFLATE_QPL` — 由 Intel® Query Processing Library 实现的 [Deflate 压缩算法](https://github.com/intel/qpl)。存在一些限制: - - 默认情况下,DEFLATE_QPL 是禁用的,只有在启用配置项 [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec) 后才能使用。 - DEFLATE_QPL 要求 ClickHouse 在构建时启用 SSE 4.2 指令集(默认即为如此)。更多详情参见 [Build Clickhouse with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl)。 - 当系统具有 Intel® IAA(In-Memory Analytics Accelerator,内存内分析加速器)卸载设备时,DEFLATE_QPL 的效果最佳。更多详情参见 [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) 和 [Benchmark with DEFLATE_QPL](/development/building_and_benchmarking_deflate_qpl)。 @@ -456,8 +445,6 @@ ClickHouse 支持通用 codec 和专用 codec。 #### FPC {#fpc} - - `FPC(level, float_size)` - 在序列中不断选择两个预测器中效果更好的一个来预测下一个浮点值,然后将实际值与预测值做 XOR,再对结果进行前导零压缩。类似于 Gorilla,当存储变化缓慢的一系列浮点值时,这种方式非常高效。对于 64 位值(double),FPC 比 Gorilla 更快;对于 32 位值,性能可能有所差异。`level` 可选值范围为 1-28,默认值为 12。`float_size` 可选值为 4、8,当类型是 Float 时默认值为 `sizeof(type)`,其他情况下为 4。关于该算法的详细描述,请参见 [High Throughput Compression of Double-Precision Floating-Point Data](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf)。 #### T64 {#t64} @@ -523,7 +510,6 @@ CREATE TABLE mytable ENGINE = MergeTree ORDER BY x; ``` - ## 临时表 {#temporary-tables} :::note @@ -554,7 +540,6 @@ CREATE [OR REPLACE] TEMPORARY TABLE [IF NOT EXISTS] table_name 可以使用 [ENGINE = Memory](../../../engines/table-engines/special/memory.md) 引擎的表来替代临时表。 - ## REPLACE TABLE {#replace-table} `REPLACE` 语句允许以[原子方式](/concepts/glossary#atomicity)更新一张表。 @@ -721,7 +706,6 @@ WHERE CounterID <12345; - ## COMMENT 子句 {#comment-clause} 在创建表时,可以为表添加注释。 @@ -754,7 +738,6 @@ SELECT name, comment FROM system.tables WHERE name = 't1'; └──────┴─────────────────┘ ``` - ## 相关内容 {#related-content} - 博客:[使用表结构和编解码器优化 ClickHouse](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md index 5cc1e27b989..68901e0c936 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/user.md @@ -26,7 +26,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus `ON CLUSTER` 子句可用于在整个集群中创建用户,参见 [Distributed DDL](../../../sql-reference/distributed-ddl.md)。 - ## 身份验证 {#identification} 可以通过多种方式对用户进行身份验证: @@ -73,7 +72,6 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus * 至少包含 1 个特殊字符 ::: - ## 示例 {#examples} 1. 以下用户名为 `name1`,且不需要密码——显然几乎没有任何安全保障: @@ -162,14 +160,10 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [, name2 [,...]] [ON CLUSTER clus CREATE USER user1 IDENTIFIED WITH plaintext_password by '1', bcrypt_password by '2', plaintext_password by '3'' ``` - - Notes: 1. 较旧版本的 ClickHouse 可能不支持多种认证方法的语法。因此,如果 ClickHouse 服务器中已经存在此类用户并被降级到不支持该语法的版本,这些用户将变得不可用,且部分与用户相关的操作将无法正常工作。为了平滑降级,必须在降级之前将所有用户配置为仅包含单一认证方法。或者,如果服务器在未按正确流程操作的情况下已经被降级,则应删除这些有问题的用户。 2. 出于安全原因,`no_password` 不能与其他认证方法共存。因此,只有在 `no_password` 是查询中唯一的认证方法时,才能指定 `no_password`。 - - ## 用户主机 {#user-host} 用户主机是指可以与 ClickHouse 服务器建立连接的主机。可以在查询中的 `HOST` 子句中通过以下方式指定主机: @@ -191,8 +185,6 @@ Notes: ClickHouse 会将 `user_name@'address'` 视为一个完整的用户名。因此,从技术上讲,可以创建多个具有相同 `user_name`、但在 `@` 后部分不同的用户。不过,不建议这样做。 ::: - - ## VALID UNTIL 子句 {#valid-until-clause} 用于为某个认证方法指定过期日期,以及可选的过期时间。它接受一个字符串作为参数。建议使用 `YYYY-MM-DD [hh:mm:ss] [timezone]` 格式表示日期时间。默认情况下,该参数为 `'infinity'`。 @@ -206,8 +198,6 @@ ClickHouse 会将 `user_name@'address'` 视为一个完整的用户名。因此 - ```CREATE USER name1 VALID UNTIL '2025-01-01 12:00:00 `Asia/Tokyo`'``` - `CREATE USER name1 IDENTIFIED WITH plaintext_password BY 'no_expiration', bcrypt_password BY 'expiration_set' VALID UNTIL '2025-01-01''` - - ## GRANTEES 子句 {#grantees-clause} 指定允许从该用户处接收[权限](../../../sql-reference/statements/grant.md#privileges)的用户或角色,前提是该用户本身也已经通过 [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax) 获得了所有所需的访问权限。`GRANTEES` 子句的选项: @@ -219,8 +209,6 @@ ClickHouse 会将 `user_name@'address'` 视为一个完整的用户名。因此 你可以使用 `EXCEPT` 表达式排除任意用户或角色。例如,`CREATE USER user1 GRANTEES ANY EXCEPT user2`。这意味着如果 `user1` 拥有通过 `GRANT OPTION` 授予的某些权限,它就可以将这些权限授予除 `user2` 之外的任何人。 - - ## 示例 {#examples-1} 创建一个名为 `mira`、受密码 `qwerty` 保护的用户账号: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md index 5928be1633c..909534d4727 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/create/view.md @@ -11,13 +11,10 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import DeprecatedBadge from '@theme/badges/DeprecatedBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # CREATE VIEW {#create-view} 创建一个新视图。视图可以是[普通视图](#normal-view)、[物化视图](#materialized-view)、[可刷新的物化视图](#refreshable-materialized-view)以及[窗口视图](/sql-reference/statements/create/view#window-view)。 - - ## 普通视图 {#normal-view} 语法: @@ -49,7 +46,6 @@ SELECT a, b, c FROM view SELECT a, b, c FROM (SELECT ...) ``` - ## 参数化视图 {#parameterized-view} 参数化视图与普通视图类似,但在创建时可以指定不会立即解析的参数。这类视图可以配合表函数使用:将视图名称作为函数名,将参数值作为函数参数传入。 @@ -64,7 +60,6 @@ CREATE VIEW view AS SELECT * FROM TABLE WHERE Column1={column1:datatype1} and Co SELECT * FROM view(column1=value1, column2=value2 ...) ``` - ## 物化视图 {#materialized-view} ```sql @@ -119,7 +114,6 @@ ClickHouse 中的物化视图在实现上更类似于插入触发器。如果视 要删除视图,请使用 [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view)。尽管 `DROP TABLE` 对 VIEW 也同样可用。 - ## SQL 安全性 {#sql_security} `DEFINER` 和 `SQL SECURITY` 允许你指定在执行视图底层查询时要使用的 ClickHouse 用户。 @@ -166,7 +160,6 @@ SQL SECURITY INVOKER AS SELECT ... ``` - ## 实时视图 {#live-view} @@ -175,8 +168,6 @@ AS SELECT ... 为方便查阅,旧版文档位于[此处](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md) - - ## 可刷新物化视图 {#refreshable-materialized-view} ```sql @@ -245,7 +236,6 @@ REFRESH EVERY 1 DAY OFFSET 2 HOUR RANDOMIZE FOR 1 HOUR -- 每天在 01:30 至 02 在 `APPEND` 模式下,可以通过 `SETTINGS all_replicas = 1` 禁用这种协调。这样会使各个副本彼此独立地执行刷新。在这种情况下,不再需要使用 ReplicatedMergeTree。 - 在非 `APPEND` 模式下,仅支持协调刷新。对于非协调刷新,请使用 `Atomic` 数据库以及 `CREATE ... ON CLUSTER` 查询,在所有副本上创建可刷新物化视图。 协调是通过 Keeper 完成的。znode 路径由 [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) 服务器设置决定。 @@ -317,7 +307,6 @@ ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPEND ### 其他操作 {#other-operations} - 所有可刷新的物化视图的状态都可以在表 [`system.view_refreshes`](../../../operations/system-tables/view_refreshes.md) 中查看。该表包含刷新进度(如果正在运行)、上次和下次刷新时间,以及在刷新失败时的异常消息。 要手动停止、启动、触发或取消刷新,请使用 [`SYSTEM STOP|START|REFRESH|WAIT|CANCEL VIEW`](../system.md#refreshable-materialized-views)。 @@ -328,8 +317,6 @@ ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPEND 趣闻:刷新查询可以从正在刷新的视图中读取数据,读取到的是刷新前版本的数据。这意味着你可以实现康威生命游戏(Conway's Game of Life):https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA== ::: - - ## 窗口视图 {#window-view} @@ -391,7 +378,6 @@ CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTE 请注意,由延迟触发产生的元素应被视为对先前计算结果的更新。与在窗口结束时触发不同,窗口视图会在延迟事件到达时立即触发。因此,同一个窗口将产生多次输出。用户需要将这些重复结果纳入考虑,或对其进行去重处理。 - 你可以使用 `ALTER TABLE ... MODIFY QUERY` 语句修改在 window view 中定义的 `SELECT` 查询。新的 `SELECT` 查询所产生的数据结构,在使用或不使用 `TO [db.]name` 子句时,都必须与原始的 `SELECT` 查询保持一致。请注意,当前窗口中的数据将会丢失,因为中间状态无法复用。 ### 监控新窗口 {#monitoring-new-windows} @@ -462,14 +448,11 @@ Window View 在以下场景中非常有用: * **监控**:按时间对指标日志进行聚合和计算,并将结果输出到目标表。Dashboard 可以将该目标表作为数据源表使用。 * **分析**:在时间窗口内自动聚合和预处理数据,这在分析海量日志时尤其有用。预处理可以消除多个查询中的重复计算,降低查询延迟。 - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理时间序列数据](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) - 博客文章:[使用 ClickHouse 构建可观测性解决方案(第二部分:链路追踪)](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse) - - ## 临时视图 {#temporary-views} ClickHouse 支持具有以下特性的 **临时视图**(在适用情况下与临时表的行为一致): diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md index bf1cf3dd9e5..d6d9ca6d2dd 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/delete.md @@ -16,7 +16,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE 之所以称为「轻量级 `DELETE`」,是为了与 [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete) 命令区分开来,后者是一个重量级的操作。 - ## 示例 {#examples} ```sql @@ -24,7 +23,6 @@ DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE DELETE FROM hits WHERE Title LIKE '%hello%'; ``` - ## 轻量级 `DELETE` 不会立即删除数据 {#lightweight-delete-does-not-delete-data-immediately} 轻量级 `DELETE` 是作为一种[变更(mutation)](/sql-reference/statements/alter#mutations)实现的,它会将行标记为已删除,但不会立即物理删除这些行。 @@ -35,24 +33,18 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; 如果需要保证数据在可预测的时间内从存储中删除,可以考虑使用表设置 [`min_age_to_force_merge_seconds`](/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds)。或者可以使用 [ALTER TABLE ... DELETE](/sql-reference/statements/alter/delete) 命令。请注意,使用 `ALTER TABLE ... DELETE` 删除数据可能会消耗大量资源,因为它会重新创建所有受影响的数据部分。 - - ## 删除大量数据 {#deleting-large-amounts-of-data} 大规模删除操作可能会对 ClickHouse 的性能产生负面影响。如果打算删除表中的所有行,请考虑使用 [`TRUNCATE TABLE`](/sql-reference/statements/truncate) 命令。 如果预计需要频繁执行删除操作,请考虑使用[自定义分区键](/engines/table-engines/mergetree-family/custom-partitioning-key)。然后可以使用 [`ALTER TABLE ... DROP PARTITION`](/sql-reference/statements/alter/partition#drop-partitionpart) 命令快速删除与该分区关联的所有行。 - - ## 轻量级 `DELETE` 的限制 {#limitations-of-lightweight-delete} ### 带有投影的轻量级 `DELETE` {#lightweight-deletes-with-projections} 默认情况下,`DELETE` 不适用于包含投影的表。这是因为投影中的行也可能会受到 `DELETE` 操作的影响。不过,可以使用 [MergeTree 设置](/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` 来更改此行为。 - - ## 使用轻量级 `DELETE` 时的性能注意事项 {#performance-considerations-when-using-lightweight-delete} **使用轻量级 `DELETE` 语句删除大量数据可能会对 `SELECT` 查询性能产生负面影响。** @@ -64,8 +56,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; - 受影响的表包含非常多的数据分片(data parts)。 - 在紧凑分片(Compact part)中存在大量数据。在紧凑分片中,所有列都存储在同一个文件中。 - - ## 删除权限 {#delete-permissions} `DELETE` 语句需要具有 `ALTER DELETE` 权限。要为指定用户在特定表上启用 `DELETE` 语句,请运行以下命令: @@ -74,7 +64,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; 授予 ALTER DELETE 权限 于 db.table 给 username; ``` - ## ClickHouse 内部是如何实现轻量级 DELETE 的 {#how-lightweight-deletes-work-internally-in-clickhouse} 1. **对受影响的行应用“掩码”** @@ -103,8 +92,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; 从上述步骤可以看出,使用掩码技术的轻量级 `DELETE` 相比传统的 `ALTER TABLE ... DELETE` 性能更好,因为它不会为受影响的 part 重写所有列的文件。 - - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理更新和删除](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md index 1644faaf2eb..543b5747d24 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/explain.md @@ -58,7 +58,6 @@ Union ReadFromStorage (SystemNumbers) ``` - ## EXPLAIN 类型 {#explain-types} - `AST` — 抽象语法树。 @@ -101,7 +100,6 @@ EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); ExpressionList ``` - ### EXPLAIN SYNTAX {#explain-syntax} 在语法分析之后显示查询的抽象语法树(AST)。 @@ -146,7 +144,6 @@ ALL INNER JOIN system.numbers AS __table2 ON __table1.number = __table2.number ALL INNER JOIN system.numbers AS __table3 ON __table2.number = __table3.number ``` - ### EXPLAIN QUERY TREE {#explain-query-tree} 设置: @@ -176,25 +173,25 @@ QUERY id: 0 TABLE id: 3, table_name: default.test_table ``` - ### EXPLAIN PLAN {#explain-plan} -输出查询计划的各个步骤。 +输出查询计划步骤。 -Settings: +设置: -* `header` — 输出每个步骤的表头。默认值:0。 -* `description` — 输出步骤描述。默认值:1。 -* `indexes` — 显示已使用的索引、为每个已应用索引所过滤的分区片段数量以及过滤的粒度数量。默认值:0。支持 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表。从 ClickHouse >= v25.9 开始,仅当与 `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0` 一起使用时,此语句才会输出有意义的结果。 -* `projections` — 显示所有已分析的投影,以及它们基于投影主键条件在分区片段级别上的过滤效果。对于每个投影,本节会包含根据该投影主键评估的分区片段数量、行数、标记数和范围数等统计信息。同时还会显示在不读取投影本身的情况下,由于该过滤而被跳过的数据分区片段数量。投影究竟是实际用于读取,还是仅用于过滤分析,可以通过 `description` 字段来判断。默认值:0。支持 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表。 -* `actions` — 输出有关步骤中各项操作的详细信息。默认值:0。 -* `json` — 以 [JSON](/interfaces/formats/JSON) 格式将查询计划步骤输出为一行。默认值:0。建议使用 [TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw) 格式,以避免不必要的转义。 -* `input_headers` - 输出每个步骤的输入表头。默认值:0。通常只对开发者调试与输入输出表头不匹配相关的问题有用。 -* `column_structure` - 在列名和类型的基础上,同时输出表头中列的结构。默认值:0。通常只对开发者调试与输入输出表头不匹配相关的问题有用。 +* `header` — 为每个步骤打印输出头部信息。默认值:0。 +* `description` — 打印步骤描述。默认值:1。 +* `indexes` — 显示已使用的索引、每个应用索引过滤的分区片段数量以及过滤的粒度数量。默认值:0。支持 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表。从 ClickHouse >= v25.9 开始,仅在与 `SETTINGS use_query_condition_cache = 0, use_skip_indexes_on_data_read = 0` 一起使用时,该语句才会输出有意义的结果。 +* `projections` — 显示所有已分析的投影,以及它们基于投影主键条件在分区片段级别过滤方面的效果。对于每个投影,本部分包含统计信息,例如使用该投影主键进行评估的分区片段、行、标记和范围数量。它还会显示由于该过滤而被跳过的数据分区片段数量,而无需从投影本身读取数据。投影是实际用于读取,还是仅用于过滤分析,可以通过 `description` 字段判断。默认值:0。支持 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表。 +* `actions` — 打印关于步骤执行行为的详细信息。默认值:0。 +* `json` — 以 [JSON](/interfaces/formats/JSON) 格式将查询计划步骤输出为一行。默认值:0。建议使用 [TabSeparatedRaw (TSVRaw)](/interfaces/formats/TabSeparatedRaw) 格式以避免不必要的转义。 +* `input_headers` - 为每个步骤打印输入头部信息。默认值:0。主要仅对开发人员在调试与输入输出头部不匹配相关的问题时有用。 +* `column_structure` - 除名称和类型外,还打印头部中列的结构信息。默认值:0。主要仅对开发人员在调试与输入输出头部不匹配相关的问题时有用。 +* `distributed` — 显示在远程节点上针对分布式表或并行副本执行的查询计划。默认值:0。 当 `json=1` 时,步骤名称将包含一个带有唯一步骤标识符的额外后缀。 -Example: +示例: ```sql EXPLAIN SELECT sum(number) FROM numbers(10) GROUP BY number % 4; @@ -211,10 +208,10 @@ Union ``` :::note -不支持对步骤和查询成本进行估算。 +不支持执行步骤和查询代价估算。 ::: -当 `json = 1` 时,查询计划以 JSON 格式表示。每个节点是一个字典,始终包含键 `Node Type` 和 `Plans`。`Node Type` 是表示步骤名称的字符串,`Plans` 是一个包含子步骤描述的数组。根据节点类型和设置,还可以添加其他可选键。 +当 `json = 1` 时,查询计划以 JSON 格式表示。每个节点是一个字典对象,并且始终包含键 `Node Type` 和 `Plans`。`Node Type` 是表示步骤名称的字符串。`Plans` 是一个数组,包含子步骤的描述。根据节点类型和设置,还可以添加其他可选键。 示例: @@ -255,7 +252,7 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; ] ``` -当 `description` = 1 时,会向该步骤添加 `Description` 键: +将 `description` 设为 1 时,会向该步骤添加 `Description` 键: ```json { @@ -264,7 +261,7 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; } ``` -当 `header` = 1 时,会在该步骤中新增一个名为 `Header` 的键,其值为列数组。 +当 `header` = 1 时,`Header` 键会作为列数组添加到该步骤中。 示例: @@ -401,8 +398,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; ] ``` - -当 `actions` = 1 时,所添加的键取决于步骤类型。 +当 `actions` = 1 时,添加的键取决于步骤类型。 示例: @@ -461,6 +457,50 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; ] ``` +当 `distributed` = 1 时,输出不仅包含本地查询计划,还包含将在远程节点上执行的查询计划。这对于分析和调试分布式查询非常有用。 + +分布式表示例: + +```sql +EXPLAIN distributed=1 SELECT * FROM remote('127.0.0.{1,2}', numbers(2)) WHERE number = 1; +``` + +```sql +联合 + 表达式 ((项目名称 + (投影 + (将列名更改为列标识符 + (项目名称 + 投影))))) + 过滤器 ((WHERE + 将列名更改为列标识符)) + 从系统数字读取 + 表达式 ((项目名称 + (投影 + 将列名更改为列标识符))) + 从远程读取 (从远程副本读取) + 表达式 ((项目名称 + 投影)) + 过滤器 ((WHERE + 将列名更改为列标识符)) + 从系统数字读取 +``` + +并行副本示例: + +```sql +SET enable_parallel_replicas = 2, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'default'; + +EXPLAIN distributed=1 SELECT sum(number) FROM test_table GROUP BY number % 4; +``` + +```sql +Expression ((Project names + Projection)) + MergingAggregated + Union + Aggregating + Expression ((Before GROUP BY + Change column names to column identifiers)) + ReadFromMergeTree (default.test_table) + ReadFromRemoteParallelReplicas + BlocksMarshalling + Aggregating + Expression ((Before GROUP BY + Change column names to column identifiers)) + ReadFromMergeTree (default.test_table) +``` + +在这两个示例中,查询计划显示了整个执行流程,包括本地和远程步骤。 + ### EXPLAIN PIPELINE {#explain-pipeline} @@ -494,7 +534,6 @@ ExpressionTransform NumbersRange × 2 0 → 1 ``` - ### EXPLAIN ESTIMATE {#explain-estimate} 显示在处理查询时预计将从表中读取的行数、标记数和分区片段数。适用于 [MergeTree](/engines/table-engines/mergetree-family/mergetree) 系列表。 @@ -523,7 +562,6 @@ EXPLAIN ESTIMATE SELECT * FROM ttt; └──────────┴───────┴───────┴──────┴───────┘ ``` - ### EXPLAIN TABLE OVERRIDE {#explain-table-override} 显示通过 `table function` 访问的表,在应用 `table override` 之后的表结构结果。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md index 5eed1a308df..376c8b2cacf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/grant.md @@ -9,7 +9,6 @@ doc_type: 'reference' import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # GRANT 语句 {#grant-statement} - 向 ClickHouse 用户账户或角色授予[权限](#privileges)。 @@ -30,7 +29,6 @@ GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.ta `WITH GRANT OPTION` 子句为 `user` 或 `role` 授予执行 `GRANT` 查询的权限。用户可以授予与自己权限范围相同或更小范围的权限。 `WITH REPLACE OPTION` 子句会将 `user` 或 `role` 的现有权限替换为新权限;如果未指定该子句,则会将新权限追加到现有权限上,而不是进行替换。 - ## 分配角色的语法 {#assigning-role-syntax} ```sql @@ -43,7 +41,6 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US `WITH ADMIN OPTION` 子句向 `user` 或 `role` 授予 [ADMIN OPTION](#admin-option) 权限。 `WITH REPLACE OPTION` 子句会用新的角色替换该 `user` 或 `role` 现有的角色;如果未指定该子句,则会在原有角色基础上追加新角色。 - ## GRANT CURRENT GRANTS 语法 {#grant-current-grants-syntax} ```sql @@ -57,7 +54,6 @@ GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|* 使用 `CURRENT GRANTS` 语句可以为指定的用户或角色授予所有列出的权限。 如果未指定任何权限,则该用户或角色将获得 `CURRENT_USER` 的所有可用权限。 - ## 用法 {#usage} 要使用 `GRANT`,您的账户必须具有 `GRANT OPTION` 权限。您只能在自身账户权限范围内授予权限。 @@ -87,7 +83,6 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION 你可以在一个查询中为多个账号授予多个权限。查询 `GRANT SELECT, INSERT ON *.* TO john, robin` 允许账号 `john` 和 `robin` 在服务器上所有数据库的所有表上执行 `INSERT` 和 `SELECT` 查询。 - ## 通配符授权 {#wildcard-grants} 在指定权限时,可以使用星号(`*`)来代替表名或数据库名。例如,`GRANT SELECT ON db.* TO john` 查询允许 `john` 在 `db` 数据库中的所有表上执行 `SELECT` 查询。 @@ -139,7 +134,6 @@ GRANT SELECT ON *suffix TO john -- 错误 GRANT SELECT(foo) ON db.table* TO john -- 错误 ``` - ## 权限 {#privileges} 权限是授予用户以执行特定类型查询的许可。 @@ -399,7 +393,6 @@ GRANT SELECT(x,y) ON db.table TO john 该权限允许 `john` 执行任何涉及 `db.table` 中 `x` 和/或 `y` 列数据的 `SELECT` 查询,例如 `SELECT x FROM db.table`。`john` 不能执行 `SELECT z FROM db.table`。也不能执行 `SELECT * FROM db.table`。在处理此查询时,ClickHouse 不会返回任何数据,连 `x` 和 `y` 也不会返回。唯一的例外是当表只包含 `x` 和 `y` 列时,在这种情况下,ClickHouse 会返回该表中的所有数据。 - ### INSERT {#insert} 允许执行 [INSERT](../../sql-reference/statements/insert-into.md) 查询。 @@ -418,7 +411,6 @@ GRANT INSERT(x,y) ON db.table TO john 授予的权限允许 `john` 向 `db.table` 表中的 `x` 和/或 `y` 列插入数据。 - ### ALTER {#alter} 允许根据以下权限层级执行 [ALTER](../../sql-reference/statements/alter/index.md) 查询: @@ -510,7 +502,6 @@ GRANT CLUSTER ON *.* TO ``` - ### DROP {#drop} 允许按照以下权限层级执行 [DROP](../../sql-reference/statements/drop.md) 和 [DETACH](../../sql-reference/statements/detach.md) 查询语句: @@ -739,7 +730,6 @@ GRANT CURRENT GRANTS(READ ON S3) TO alice * **不允许部分撤销:** 不能只撤销已授予过滤模式中的一部分。如有需要,必须撤销整个授权,然后使用新的模式重新授权。 * **不允许使用通配符授权:** 不能使用 `GRANT READ ON *('regexp')` 或类似仅包含通配符的模式。必须提供具体的数据源。 - ### dictGet {#dictget} - `dictGet`。别名:`dictHas`、`dictGetHierarchy`、`dictIsIn` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md index 51895b15c42..75819ce6e7c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/insert-into.md @@ -7,8 +7,6 @@ title: 'INSERT INTO 语句' doc_type: '参考' --- - - # INSERT INTO 语句 {#insert-into-statement} 将数据插入表中。 @@ -105,13 +103,10 @@ INSERT INTO table SETTINGS ... FORMAT format_name data_set ::: - ## 约束 {#constraints} 如果表定义了[约束](../../sql-reference/statements/create/table.md#constraints),则会针对插入数据的每一行检查相应的约束表达式。如果任一约束未被满足,服务器将抛出一个包含约束名称和表达式的异常,并停止执行该查询。 - - ## 插入 SELECT 查询结果 {#inserting-the-results-of-select} **语法** @@ -138,7 +133,6 @@ INSERT INTO x WITH y AS (SELECT * FROM numbers(10)) SELECT * FROM y; WITH y AS (SELECT * FROM numbers(10)) INSERT INTO x SELECT * FROM y; ``` - ## 从文件中插入数据 {#inserting-data-from-a-file} **语法** @@ -197,7 +191,6 @@ INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV; ::: - ## 使用表函数插入数据 {#inserting-using-a-table-function} 可以向由[表函数](../../sql-reference/table-functions/index.md)引用的表中插入数据。 @@ -227,7 +220,6 @@ SELECT * FROM simple_table; └─────┴───────────────────────┘ ``` - ## 在 ClickHouse Cloud 中插入数据 {#inserting-into-clickhouse-cloud} 默认情况下,ClickHouse Cloud 上的服务会提供多个副本以实现高可用性。当连接到某个服务时,连接会建立到这些副本中的一个。 @@ -242,15 +234,12 @@ SELECT .... SETTINGS select_sequential_consistency = 1; 请注意,使用 `select_sequential_consistency` 会增加 ClickHouse Keeper(ClickHouse Cloud 内部使用的组件)的负载,并且可能会视该服务的负载情况导致性能下降。除非确有必要,否则我们不建议启用此设置。推荐的做法是在同一会话中执行读写操作,或者使用基于原生协议(从而支持粘性连接)的客户端驱动程序。 - ## 在复制部署中执行插入 {#inserting-into-a-replicated-setup} 在复制部署中,数据在完成复制后才会在其他副本上可见。`INSERT` 执行后,会立即开始复制过程(在其他副本上下载数据)。这与 ClickHouse Cloud 不同,后者会将数据直接写入共享存储,由副本订阅元数据变更。 请注意,对于复制部署,`INSERT` 操作有时可能会花费相当长的时间(大约一秒量级),因为它需要向 ClickHouse Keeper 提交以完成分布式共识。将 S3 用作存储也会引入额外的延迟。 - - ## 性能注意事项 {#performance-considerations} `INSERT` 会按照主键对输入数据进行排序,并根据分区键将其拆分为多个分区。如果一次性向多个分区插入数据,可能会显著降低 `INSERT` 查询的性能。为避免这种情况: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md index 79fb1538aaa..55faf066dec 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/parallel_with.md @@ -7,14 +7,10 @@ title: 'PARALLEL WITH 子句' doc_type: 'reference' --- - - # PARALLEL WITH 子句 {#parallel-with-clause} 允许并行执行多个语句。 - - ## 语法 {#syntax} ```sql @@ -25,7 +21,6 @@ doc_type: 'reference' 在许多情况下,并行执行语句可能比按顺序执行相同的一组语句更快。例如,`statement1 PARALLEL WITH statement2 PARALLEL WITH statement3` 往往比 `statement1; statement2; statement3` 更快。 - ## 示例 {#examples} 并行创建两个表: @@ -44,13 +39,10 @@ PARALLEL WITH DROP TABLE table2; ``` - ## 设置 {#settings} [max_threads](../../operations/settings/settings.md#max_threads) 设置用于控制要启动的线程数。 - - ## 与 UNION 的比较 {#comparison-with-union} `PARALLEL WITH` 子句与 [UNION](select/union.md) 有些相似,`UNION` 也会并行执行其操作数。但它们之间存在一些差异: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md index 6289d3738b4..9f9bf8ce48c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/revoke.md @@ -7,14 +7,10 @@ title: 'REVOKE 语句' doc_type: 'reference' --- - - # REVOKE 语句 {#revoke-statement} 从用户或角色撤销已授予的权限。 - - ## 语法 {#syntax} **撤销用户权限** @@ -29,7 +25,6 @@ REVOKE [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] ON {db.t REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] ``` - ## 描述 {#description} 要撤销某些权限时,可以使用作用范围比计划撤销的权限更宽泛的权限。例如,如果某个用户拥有 `SELECT (x,y)` 权限,管理员可以执行 `REVOKE SELECT(x,y) ...`,或者 `REVOKE SELECT * ...`,甚至执行 `REVOKE ALL PRIVILEGES ...` 查询来撤销该权限。 @@ -38,8 +33,6 @@ REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] role [,...] FROM {user | rol 可以仅撤销权限的一部分。例如,如果用户拥有 `SELECT *.*` 权限,你可以撤销其中从某些表或某个数据库读取数据的权限。 - - ## 示例 {#examples} 为用户账户 `john` 授予在除 `accounts` 数据库之外的所有数据库上执行 `SELECT` 的权限: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md index 881f885065a..8ddfd0ee175 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/apply_modifier.md @@ -7,21 +7,16 @@ keywords: ['APPLY', 'modifier'] doc_type: 'reference' --- - - # APPLY 修饰符 {#apply} > 允许对查询的外部表表达式返回的每一行调用某个函数。 - - ## 语法 {#syntax} ```sql SELECT APPLY( ) FROM [db.]table_name ``` - ## 示例 {#example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md index 568cc673a39..a677f6a4297 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/array-join.md @@ -6,8 +6,6 @@ title: 'ARRAY JOIN 子句' doc_type: 'reference' --- - - # ARRAY JOIN 子句 {#array-join-clause} 对于包含数组列的表,一个常见操作是生成一个新表:在该新表中,原始数组列中的每个数组元素各占一行,而其他列的值会被复制重复。这是 `ARRAY JOIN` 子句的基本用例。 @@ -29,7 +27,6 @@ FROM * `ARRAY JOIN` - 在默认情况下,`JOIN` 结果中不包含空数组。 * `LEFT ARRAY JOIN` - `JOIN` 结果中会包含含有空数组的行。空数组的值被设置为数组元素类型的默认值(通常是 0、空字符串或 NULL)。 - ## 基本 ARRAY JOIN 示例 {#basic-array-join-examples} ### ARRAY JOIN 和 LEFT ARRAY JOIN {#array-join-left-array-join-examples} @@ -151,7 +148,6 @@ ORDER BY Reaches DESC LIMIT 10 ``` - ```text ┌──GoalID─┬─触达人数─┬─访问次数─┐ │ 53225 │ 3214 │ 1097 │ @@ -167,7 +163,6 @@ LIMIT 10 └─────────┴─────────┴────────┘ ``` - ## 使用别名 {#using-aliases} 可以在 `ARRAY JOIN` 子句中为数组指定一个别名。在这种情况下,可以通过该别名访问数组元素,但数组本身仍通过原始名称访问。示例: @@ -254,7 +249,6 @@ FROM arrays_test ARRAY JOIN arr AS a, [['a','b'],['c']] AS b SETTINGS enable_unaligned_array_join = 1; ``` - ```response ┌─s───────┬─arr─────┬─a─┬─b─────────┐ │ Hello │ [1,2] │ 1 │ ['a','b'] │ @@ -267,7 +261,6 @@ SETTINGS enable_unaligned_array_join = 1; └─────────┴─────────┴───┴───────────┘ ``` - ## ARRAY JOIN 与嵌套数据结构 {#array-join-with-nested-data-structure} `ARRAY JOIN` 也可以用于[嵌套数据结构](../../../sql-reference/data-types/nested-data-structures/index.md): @@ -371,7 +364,6 @@ FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; ``` - ```response ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ │ 你好 │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ @@ -382,7 +374,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; └───────┴─────┴─────┴─────────┴────────────┴─────┘ ``` - ## 实现细节 {#implementation-details} 在运行 `ARRAY JOIN` 时,查询的执行顺序会被优化。尽管在查询中 `ARRAY JOIN` 必须始终写在 [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) 子句之前,但从技术上讲,它们可以按任意顺序执行,除非需要使用 `ARRAY JOIN` 的结果进行过滤。具体执行顺序由查询优化器决定。 @@ -393,8 +384,6 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; `arrayJoin` 始终会被执行,且不支持短路函数求值。这是因为它是一个在查询分析和执行过程中与其他所有函数分开处理的特殊函数,并且需要额外的逻辑,而这些逻辑无法与短路函数执行配合使用。原因在于结果中的行数取决于 `arrayJoin` 的结果,实现对 `arrayJoin` 的惰性执行过于复杂且代价高昂。 - - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理时序数据](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md index 945334c5d2a..bcd2fb16401 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/distinct.md @@ -6,8 +6,6 @@ title: 'DISTINCT 子句' doc_type: 'reference' --- - - # DISTINCT 子句 {#distinct-clause} 如果指定了 `SELECT DISTINCT`,查询结果中只会保留唯一的行。也就是说,在所有完全相同的行集合中,最终结果每组只会保留一行。 @@ -56,7 +54,6 @@ SELECT DISTINCT ON (a,b) * FROM t1; └───┴───┴───┘ ``` - ## DISTINCT 和 ORDER BY {#distinct-and-order-by} ClickHouse 支持在单个查询中对不同的列分别使用 `DISTINCT` 和 `ORDER BY` 子句。`DISTINCT` 子句会先于 `ORDER BY` 子句执行。 @@ -104,13 +101,10 @@ SELECT DISTINCT a FROM t1 ORDER BY b DESC; 在编写查询时请将这一实现特性考虑在内。 - ## NULL 处理 {#null-processing} `DISTINCT` 与 [NULL](/sql-reference/syntax#null) 的行为就好像 `NULL` 是一个具体的值,并且 `NULL==NULL`。换句话说,在 `DISTINCT` 的结果中,包含 `NULL` 的不同组合只会出现一次。这与大多数其他上下文中的 `NULL` 处理方式不同。 - - ## 替代方案 {#alternatives} 也可以在不使用任何聚合函数的情况下,对 `SELECT` 子句中指定的同一组值使用 [GROUP BY](/sql-reference/statements/select/group-by),从而获得相同的结果。但与基于 `GROUP BY` 的方式相比,仍存在一些差异: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md index c468ae208f5..5ee344b5e03 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/except_modifier.md @@ -7,21 +7,16 @@ keywords: ['EXCEPT', 'modifier'] doc_type: 'reference' --- - - # EXCEPT 修饰符 {#except} > 指定要从结果中排除的一个或多个列名。所有匹配的列名都会在输出中被省略。 - - ## 语法 {#syntax} ```sql SELECT EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name ``` - ## 示例 {#examples} ```sql title="Query" diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md index 13b83d27ef2..aea8fd72294 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/format.md @@ -6,22 +6,16 @@ title: 'FORMAT 子句' doc_type: 'reference' --- - - # FORMAT 子句 {#format-clause} ClickHouse 支持多种[序列化格式](../../../interfaces/formats.md),可用于查询结果等多种用途。为 `SELECT` 的输出选择格式有多种方式,其中一种是在查询末尾指定 `FORMAT format`,从而以特定格式获取结果数据。 使用特定格式可能是为了方便使用、与其他系统集成或提升性能。 - - ## 默认格式 {#default-format} 如果省略 `FORMAT` 子句,则会使用默认格式。默认格式取决于配置以及用于访问 ClickHouse 服务器的接口。对于批处理模式下的 [HTTP 接口](../../../interfaces/http.md) 和 [命令行客户端](../../../interfaces/cli.md),默认格式为 `TabSeparated`。对于交互式模式下的命令行客户端,默认格式为 `PrettyCompact`(生成紧凑且易读的表格)。 - - ## 实现细节 {#implementation-details} 在使用命令行客户端时,数据始终以内部的高效格式(`Native`)在网络上传输。客户端会独立解析查询中的 `FORMAT` 子句并自行对数据进行格式化(从而减轻网络和服务器的额外负载)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md index b6517f4917a..40fe2bb9015 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/from.md @@ -6,8 +6,6 @@ title: 'FROM 子句' doc_type: 'reference' --- - - # FROM 子句 {#from-clause} `FROM` 子句指定要从哪些来源读取数据: @@ -29,7 +27,6 @@ FROM table SELECT * ``` - ## FINAL 修饰符 {#final-modifier} 当指定 `FINAL` 时,ClickHouse 会在返回结果之前对数据进行完全合并。这也会执行给定表引擎在合并过程中会进行的所有数据转换。 @@ -78,7 +75,6 @@ SET final = 1; SELECT x, y FROM mytable WHERE x > 1; ``` - ## 实现细节 {#implementation-details} 如果省略 `FROM` 子句,将会从 `system.one` 表中读取数据。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md index fa9248f33a6..2a57f669c6d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/group-by.md @@ -6,8 +6,6 @@ title: 'GROUP BY 子句' doc_type: 'reference' --- - - # GROUP BY 子句 {#group-by-clause} `GROUP BY` 子句会将 `SELECT` 查询切换到聚合模式,其工作方式如下: @@ -22,8 +20,6 @@ doc_type: 'reference' 还有另一种方式可以对表进行聚合。如果查询中只在聚合函数内部使用了表列,则可以省略 `GROUP BY` 子句,此时会假定按空键集进行聚合。此类查询总是恰好返回一行。 ::: - - ## NULL 处理 {#null-processing} 在分组操作中,ClickHouse 将 [NULL](/sql-reference/syntax#null) 视为一个具体值,并且认为 `NULL==NULL`。这与在大多数其他上下文中的 `NULL` 处理方式不同。 @@ -56,7 +52,6 @@ doc_type: 'reference' 如果你向 `GROUP BY` 传入多个键列,结果会给出所选数据的所有组合,就好像把 `NULL` 当作一个特定的取值一样。 - ## ROLLUP 修饰符 {#rollup-modifier} `ROLLUP` 修饰符用于根据 `GROUP BY` 列表中键表达式的顺序计算各级小计。小计行会追加在结果表的末尾。 @@ -130,7 +125,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP; * 用于实现 SQL 标准兼容性的 [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) 设置。 - ## CUBE 修饰符 {#cube-modifier} `CUBE` 修饰符用于对 `GROUP BY` 列表中键表达式的每一种组合计算小计。这些小计行会追加在结果表的末尾。 @@ -175,7 +169,6 @@ SELECT 年, 月, 日, count(*) FROM t GROUP BY CUBE(年, 月, 日); 未包含在 `GROUP BY` 中的列会被填充为 0。 - ```text ┌─年─┬─月─┬─日─┬─count()─┐ │ 2020 │ 10 │ 15 │ 1 │ @@ -229,7 +222,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; * 有关 SQL 标准兼容性,请参见 [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) 设置。 - ## WITH TOTALS 修饰符 {#with-totals-modifier} 如果指定了 `WITH TOTALS` 修饰符,将会额外计算一行数据。该行的键列包含默认值(零或空字符串),聚合函数列则包含在所有行上的聚合结果(即「总计」值)。 @@ -266,8 +258,6 @@ SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE; 你可以在子查询中使用 `WITH TOTALS`,包括位于 [JOIN](/sql-reference/statements/select/join.md) 子句中的子查询(在这种情况下,相应的总计值会被合并)。 - - ## GROUP BY ALL {#group-by-all} `GROUP BY ALL` 等同于在 GROUP BY 中列出所有在 SELECT 子句中出现且不是聚合函数的表达式。 @@ -316,7 +306,6 @@ FROM t GROUP BY substring(a, 4, 2), substring(a, 1, 2) ``` - ## 示例 {#examples} 示例: @@ -344,7 +333,6 @@ GROUP BY domain 对于遇到的每个不同的键值,`GROUP BY` 会计算一组聚合函数的结果。 - ## GROUPING SETS 修饰符 {#grouping-sets-modifier} 这是最通用的修饰符。 @@ -382,7 +370,6 @@ GROUPING SETS * 有关 SQL 标准兼容性,请参见 [group_by_use_nulls](/operations/settings/settings.md#group_by_use_nulls) 设置。 - ## 实现细节 {#implementation-details} 聚合是列式 DBMS 最重要的特性之一,因此它的实现也是 ClickHouse 中优化最充分的部分之一。默认情况下,聚合在内存中使用哈希表完成。它有 40 多种特化实现,会根据“分组键”的数据类型自动选择。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md index d07aba55b1d..86e45bcb0cf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/join.md @@ -7,8 +7,6 @@ keywords: ['INNER JOIN', 'LEFT JOIN', 'LEFT OUTER JOIN', 'RIGHT JOIN', 'RIGHT OU doc_type: 'reference' --- - - # JOIN 子句 {#join-clause} `JOIN` 子句通过使用一个或多个表中共有的值,将这些表的列组合在一起生成一个新表。它是支持 SQL 的数据库中常见的操作,对应于[关系代数](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators)中的连接(join)。对单个表自身进行连接的特殊情况通常被称为“自连接”(self-join)。 @@ -24,7 +22,6 @@ FROM `ON` 子句中的表达式和 `USING` 子句中的列称为“连接键”(join keys)。除非另有说明,`JOIN` 会从具有匹配“连接键”的行生成[笛卡尔积](https://en.wikipedia.org/wiki/Cartesian_product),这可能会产生比源表多得多的结果行。 - ## 支持的 JOIN 类型 {#supported-types-of-join} 支持所有标准的 [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) 类型: @@ -55,8 +52,6 @@ ClickHouse 还提供了额外的 join 类型: 当 [join_algorithm](../../../operations/settings/settings.md#join_algorithm) 设置为 `partial_merge` 时,仅在严格性为 `ALL` 时才支持 `RIGHT JOIN` 和 `FULL JOIN`(不支持 `SEMI`、`ANTI`、`ANY` 和 `ASOF`)。 ::: - - ## 设置 {#settings} 可以使用 [`join_default_strictness`](../../../operations/settings/settings.md#join_default_strictness) 设置来覆盖默认的 JOIN 类型。 @@ -74,8 +69,6 @@ ClickHouse 服务器在执行 `ANY JOIN` 操作时的行为取决于 [`any_join_ 使用 `cross_to_inner_join_rewrite` 设置来定义当 ClickHouse 无法将 `CROSS JOIN` 重写为 `INNER JOIN` 时的行为。默认值为 `1`,此时允许 JOIN 继续执行,但会更慢。如果希望抛出错误,请将 `cross_to_inner_join_rewrite` 设为 `0`;若希望不执行 CROSS JOIN,而是强制重写所有逗号/CROSS JOIN,请将其设为 `2`。如果在值为 `2` 时重写失败,您将收到一条错误消息:“Please, try to simplify `WHERE` section”。 - - ## ON 部分中的条件 {#on-section-conditions} `ON` 部分可以包含多个条件,这些条件通过 `AND` 和 `OR` 运算符组合。指定连接键的条件必须: @@ -167,7 +160,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key; :::note - 默认情况下,支持非等号条件,只要这些条件中使用的列都来自同一张表。 例如,`t1.a = t2.key AND t1.b > 0 AND t2.b > t2.c`,因为 `t1.b > 0` 只使用了 `t1` 的列,而 `t2.b > t2.c` 只使用了 `t2` 的列。 不过,你也可以尝试对类似 `t1.a = t2.key AND t1.b > t2.key` 这种条件的实验性支持,更多细节请参阅下方章节。 @@ -188,7 +180,6 @@ SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key AND t2. └───┴────┴─────┘ ``` - ## 针对来自不同表的列使用非等值条件的 JOIN {#join-with-inequality-conditions-for-columns-from-different-tables} ClickHouse 目前除等值条件外,还支持在 `ALL/ANY/SEMI/ANTI INNER/LEFT/RIGHT/FULL JOIN` 中使用非等值条件。非等值条件仅在 `hash` 和 `grace_hash` join 算法中受支持。使用 `join_use_nulls` 时不支持非等值条件。 @@ -239,7 +230,6 @@ key2 a2 1 1 1 0 0 \N key4 f 2 3 4 0 0 \N ``` - ## JOIN 键中的 NULL 值 {#null-values-in-join-keys} `NULL` 不等于任何值,包括它本身。这意味着如果某个表中用作 `JOIN` 键的列值为 `NULL`,它不会与另一张表中同样为 `NULL` 的值相匹配。 @@ -294,7 +284,6 @@ SELECT A.name, B.score FROM A LEFT JOIN B ON isNotDistinctFrom(A.id, B.id) └─────────┴───────┘ ``` - ## ASOF JOIN 用法 {#asof-join-usage} 当你需要联接那些没有精确匹配的记录时,`ASOF JOIN` 非常有用。 @@ -349,7 +338,6 @@ USING (等值列_1, ... 等值列_N, asof_列) 在 [Join](../../../engines/table-engines/special/join.md) 表引擎中**不**受支持。 ::: - ## PASTE JOIN 用法 {#paste-join-usage} `PASTE JOIN` 的结果是一个表,包含左侧子查询的所有列,后面紧跟右侧子查询的所有列。 @@ -408,7 +396,6 @@ SETTINGS max_block_size = 2; └───┴──────┘ ``` - ## 分布式 JOIN {#distributed-join} 在包含分布式表的 JOIN 中,有两种执行方式: @@ -418,8 +405,6 @@ SETTINGS max_block_size = 2; 在使用 `GLOBAL` 时要小心。更多信息请参见[分布式子查询](/sql-reference/operators/in#distributed-subqueries)一节。 - - ## 隐式类型转换 {#implicit-type-conversion} `INNER JOIN`、`LEFT JOIN`、`RIGHT JOIN` 和 `FULL JOIN` 查询支持对“连接键”进行隐式类型转换。但是,如果左右表的连接键无法被转换为同一种类型,则查询无法执行(例如,没有任何一种数据类型能够同时容纳来自 `UInt64` 和 `Int64`,或 `String` 和 `Int32` 的所有值)。 @@ -462,7 +447,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); └────┴──────┴───────────────┴─────────────────┘ ``` - ## 使用建议 {#usage-recommendations} ### 空单元格或 NULL 单元格的处理 {#processing-of-empty-or-null-cells} @@ -510,8 +494,6 @@ SELECT a, b, toTypeName(a), toTypeName(b) FROM t_1 FULL JOIN t_2 USING (a, b); 当达到上述任一限制时,ClickHouse 会按照 [join_overflow_mode](/operations/settings/settings#join_overflow_mode) 设置中的指示进行处理。 - - ## 示例 {#examples} 示例: @@ -555,7 +537,6 @@ LIMIT 10 └───────────┴────────┴────────┘ ``` - ## 相关内容 {#related-content} - 博客:[ClickHouse:具备完整 SQL JOIN 支持的极速数据库管理系统(DBMS)- 第 1 部分](https://clickhouse.com/blog/clickhouse-fully-supports-joins) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md index a7548200b82..d559f4d6484 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/limit.md @@ -42,7 +42,6 @@ LIMIT n, m 在这两种形式中,`n` 和 `m` 都必须是非负整数。 - ## 负数 LIMIT {#negative-limits} 使用负值从结果集的*末尾*选择行: @@ -81,7 +80,6 @@ LIMIT 10 OFFSET 0.5 -- 从中点开始的 10 行 LIMIT 10 OFFSET -20 -- 跳过最后 20 行后的 10 行 ``` - ## LIMIT ... WITH TIES {#limit--with-ties-modifier} `WITH TIES` 修饰符会额外返回那些 `ORDER BY` 值与 LIMIT 结果中最后一行相同的行。 @@ -129,7 +127,6 @@ SELECT * FROM ( 此修饰符可以与 [`ORDER BY ... WITH FILL`](/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier) 修饰符组合使用。 - ## 注意事项 {#considerations} **非确定性结果:** 如果未使用 [`ORDER BY`](../../../sql-reference/statements/select/order-by.md) 子句,返回的行可能不固定,并且在不同的查询执行中可能会有所不同。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md index c44af361b6c..d8263da58e5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/order-by.md @@ -6,8 +6,6 @@ title: 'ORDER BY 子句' doc_type: 'reference' --- - - # ORDER BY 子句 {#order-by-clause} `ORDER BY` 子句包含: @@ -27,8 +25,6 @@ doc_type: 'reference' 对于在排序表达式上具有相同值的行,返回顺序是任意且非确定性的。 如果在 `SELECT` 语句中省略 `ORDER BY` 子句,行的顺序同样是任意且非确定性的。 - - ## 特殊值的排序 {#sorting-of-special-values} 对 `NaN` 和 `NULL` 的排序顺序有两种处理方式: @@ -74,7 +70,6 @@ doc_type: 'reference' 在对浮点数进行排序时,NaN 会与其他数值分开处理。无论是升序还是降序排序,NaN 都会排在末尾。换句话说,在升序排序时,它们被视为比所有其他数值都大而排在最后;在降序排序时,它们被视为比其余所有数值都小,但同样排在最后。 - ## 排序规则支持 {#collation-support} 对于按 [String](../../../sql-reference/data-types/string.md) 值排序,可以指定排序规则(比较方式)。示例:`ORDER BY SearchPhrase COLLATE 'tr'` —— 按关键字升序排序,使用土耳其字母表、不区分大小写,并假定字符串采用 UTF-8 编码。在 ORDER BY 中的每个表达式都可以独立指定或不指定 `COLLATE`。如果指定了 `ASC` 或 `DESC`,则应在其后写上 `COLLATE`。使用 `COLLATE` 时,排序始终为不区分大小写。 @@ -83,8 +78,6 @@ doc_type: 'reference' 我们只建议在对少量行进行最终排序时使用 `COLLATE`,因为使用 `COLLATE` 的排序效率低于按字节进行的普通排序。 - - ## 排序规则示例 {#collation-examples} 仅使用 [String](../../../sql-reference/data-types/string.md) 值的示例: @@ -229,7 +222,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; 使用 [Tuple](../../../sql-reference/data-types/tuple.md) 的示例: - ```response ┌─x─┬─s───────┐ │ 1 │ (1,'Z') │ @@ -262,7 +254,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; └───┴─────────┘ ``` - ## 实现细节 {#implementation-details} 如果在 `ORDER BY` 的基础上再指定足够小的 [LIMIT](../../../sql-reference/statements/select/limit.md),会占用更少的 RAM。否则,内存消耗量与用于排序的数据量成正比。对于分布式查询处理,如果省略了 [GROUP BY](/sql-reference/statements/select/group-by),排序会在远程服务器上部分完成,然后在发起请求的服务器上进行结果合并。这意味着对于分布式排序,需要排序的数据量可能会大于单个服务器上的可用内存。 @@ -273,8 +264,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; 外部排序的效率远低于在 RAM 中进行的排序。 - - ## 数据读取优化 {#optimization-of-data-reading} 如果 `ORDER BY` 表达式的前缀与表的排序键前缀一致,则可以通过使用 [optimize_read_in_order](../../../operations/settings/settings.md#optimize_read_in_order) 设置来优化查询。 @@ -295,8 +284,6 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; 在 `MaterializedView` 引擎的表中,该优化适用于类似 `SELECT ... FROM merge_tree_table ORDER BY pk` 的视图。但对于类似 `SELECT ... FROM view ORDER BY pk` 的查询,如果视图定义中的查询本身没有 `ORDER BY` 子句,则不支持该优化。 - - ## 带 WITH FILL 修饰符的 ORDER BY 表达式 {#order-by-expr-with-fill-modifier} 该修饰符也可以与 [LIMIT ... WITH TIES 修饰符](/sql-reference/statements/select/limit#limit--with-ties-modifier) 组合使用。 @@ -385,7 +372,6 @@ ORDER BY 结果: - ```text ┌───d1───────┬───d2───────┬─source───┐ │ 1970-01-11 │ 1970-01-02 │ original │ @@ -448,7 +434,6 @@ ORDER BY d2 WITH FILL; ``` - 结果: ```response @@ -615,7 +600,6 @@ SELECT n, source, inter FROM ( 结果: - ```text ┌───n─┬─source───┬─inter─┐ │ 0 │ │ 0 │ @@ -634,7 +618,6 @@ SELECT n, source, inter FROM ( └─────┴──────────┴───────┘ ``` - ## 按排序前缀分组填充 {#filling-grouped-by-sorting-prefix} 在某些情况下,按特定列中取值相同的行分别独立进行填充会很有用——一个很好的示例就是在时间序列中填充缺失值。 @@ -687,7 +670,6 @@ INTERPOLATE ( value AS 9999 ) 在这里,`value` 列被填充值为 `9999`,只是为了让填充的行更加显眼。 此行为通过设置 `use_with_fill_by_sorting_prefix` 参数来控制(该参数默认启用)。 - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中处理时间序列数据](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md index 0ebbd49ee70..e82f001a31d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/prewhere.md @@ -6,16 +6,12 @@ title: 'PREWHERE 子句' doc_type: 'reference' --- - - # PREWHERE 子句 {#prewhere-clause} PREWHERE 是一种用于更高效执行过滤的优化机制。即使未显式指定 `PREWHERE` 子句,该优化也会默认启用。其工作方式是自动将部分 [WHERE](../../../sql-reference/statements/select/where.md) 条件移动到 PREWHERE 阶段。`PREWHERE` 子句的作用只是用于在你认为自己比默认策略更了解如何进行优化时,手动控制这一优化行为。 启用 PREWHERE 优化后,首先只读取执行 PREWHERE 表达式所需的列。之后,再读取执行查询其余部分所需的其他列,但仅限于那些在至少某些行上 PREWHERE 表达式为 `true` 的数据块。如果存在大量数据块在所有行上 PREWHERE 表达式均为 `false`,并且 PREWHERE 所需的列比查询其他部分所需的列更少,那么在执行查询时通常可以显著减少从磁盘读取的数据量。 - - ## 手动控制 PREWHERE {#controlling-prewhere-manually} 该子句与 `WHERE` 子句具有相同的作用。区别在于它决定从表中读取哪些数据。对于在查询中仅被少数列使用、但能够提供强过滤效果的过滤条件,可以手动将其放入 `PREWHERE` 中进行控制,从而减少需要读取的数据量。 @@ -30,14 +26,10 @@ PREWHERE 是一种用于更高效执行过滤的优化机制。即使未显式 `PREWHERE` 部分在 `FINAL` 之前执行,因此,当在不属于表 `ORDER BY` 部分的字段上使用 `PREWHERE` 时,`FROM ... FINAL` 查询的结果可能会产生偏差。 ::: - - ## 限制 {#limitations} `PREWHERE` 仅支持由 [*MergeTree](../../../engines/table-engines/mergetree-family/index.md) 系列表引擎创建的表。 - - ## 示例 {#example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md index b25e9d6ce2a..49c393241d3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/qualify.md @@ -6,22 +6,16 @@ title: 'QUALIFY 子句' doc_type: 'reference' --- - - # QUALIFY 子句 {#qualify-clause} 用于过滤窗口函数的结果。它类似于 [WHERE](../../../sql-reference/statements/select/where.md) 子句,但不同之处在于,`WHERE` 在窗口函数计算之前执行,而 `QUALIFY` 则在窗口函数计算之后执行。 在 `QUALIFY` 子句中可以通过别名引用 `SELECT` 子句中的窗口函数结果。或者,`QUALIFY` 子句也可以基于其他未在查询结果中返回的窗口函数结果进行过滤。 - - ## 限制 {#limitations} 当查询中不包含需要计算的窗口函数时,不能使用 `QUALIFY`。请改用 `WHERE`。 - - ## 示例 {#examples} 示例: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md index e44327039b6..48c3c6e3c66 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/select/where.md @@ -7,8 +7,6 @@ doc_type: 'reference' keywords: ['WHERE'] --- - - # WHERE 子句 {#where-clause} `WHERE` 子句允许过滤由 `SELECT` 的 [`FROM`](../../../sql-reference/statements/select/from.md) 子句返回的数据。 @@ -26,8 +24,6 @@ PREWHERE 是一种用于更高效执行过滤的优化手段。 即使没有显式指定 `PREWHERE` 子句,它默认也是启用的。 ::: - - ## 测试 `NULL` {#testing-for-null} 如需判断某个值是否为 [`NULL`](/sql-reference/syntax#null),请使用: @@ -36,8 +32,6 @@ PREWHERE 是一种用于更高效执行过滤的优化手段。 否则,包含 `NULL` 的表达式将永远不会为真。 - - ## 使用逻辑运算符过滤数据 {#filtering-data-with-logical-operators} 可以在 `WHERE` 子句中使用以下[逻辑函数](/sql-reference/functions/logical-functions#and)来组合多个条件: @@ -47,15 +41,11 @@ PREWHERE 是一种用于更高效执行过滤的优化手段。 - [`or()`](/sql-reference/functions/logical-functions#or) 或 `OR` - [`xor()`](/sql-reference/functions/logical-functions#xor) - - ## 将 UInt8 列用作条件 {#using-uint8-columns-as-a-condition} 在 ClickHouse 中,`UInt8` 列可以直接作为布尔条件使用,其中 `0` 表示 `false`,任意非零值(通常为 `1`)表示 `true`。 此用法的示例见[下文](#example-uint8-column-as-condition)。 - - ## 使用比较运算符 {#using-comparison-operators} 可以使用以下[比较运算符](/sql-reference/operators#comparison-operators): @@ -76,8 +66,6 @@ PREWHERE 是一种用于更高效执行过滤的优化手段。 | `a BETWEEN b AND c` | `a >= b AND a <= c` | 区间检查(包含端点) | `price BETWEEN 100 AND 500` | | `a NOT BETWEEN b AND c` | `a < b OR a > c` | 区间外检查 | `price NOT BETWEEN 100 AND 500` | - - ## 模式匹配和条件表达式 {#pattern-matching-and-conditional-expressions} 除了比较运算符之外,还可以在 `WHERE` 子句中使用模式匹配和条件表达式。 @@ -92,8 +80,6 @@ PREWHERE 是一种用于更高效执行过滤的优化手段。 请参见[“模式匹配和条件表达式”](#examples-pattern-matching-and-conditional-expressions)了解使用示例。 - - ## 包含字面量、列或子查询的表达式 {#expressions-with-literals-columns-subqueries} `WHERE` 子句后面的表达式也可以包含[字面量](/sql-reference/syntax#literals)、列或子查询。子查询是嵌套的 `SELECT` 语句,用于返回在条件中使用的值。 @@ -119,7 +105,6 @@ WHERE category = 'Electronics' AND id IN (SELECT product_id FROM bestsellers) ``` - -- 使用逻辑运算符组合三个条件 WHERE (price > 100 OR category IN (SELECT category FROM featured)) AND in_stock = true @@ -240,7 +225,6 @@ WHERE (category = 'Electronics' OR category = 'Furniture') AND price < 400; ``` - ```response ┌─id─┬─name────┬─price─┬─category────┬─in_stock─┐ 1. │ 2 │ 鼠标 │ 25.5 │ 电子产品 │ true │ @@ -366,7 +350,6 @@ WHERE category = 'Electronics' AND in_stock = true; #### LIKE 示例 {#like-examples} - ```sql -- 查找名称中包含 'o' 的产品 SELECT * FROM products WHERE name LIKE '%o%'; diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md index 68e7e5be320..653e4e8f12f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/show.md @@ -17,8 +17,6 @@ doc_type: 'reference' 此外,用户需要具备 [`displaySecretsInShowAndSelect`](grant.md/#displaysecretsinshowandselect) 权限。 ::: - - ## SHOW CREATE TABLE | DICTIONARY | VIEW | DATABASE {#show-create-table--dictionary--view--database} 这些语句会返回一个 `String` 类型的单列, @@ -36,7 +34,6 @@ SHOW [CREATE] TABLE | TEMPORARY TABLE | DICTIONARY | VIEW | DATABASE [db.]table| 但不能真正用于创建表。 ::: - ## SHOW DATABASES {#show-databases} 该语句会列出所有数据库。 @@ -111,7 +108,6 @@ SHOW DATABASES LIMIT 2 * [`CREATE DATABASE`](/sql-reference/statements/create/database) - ## SHOW TABLES {#show-tables} `SHOW TABLES` 语句用于显示表列表。 @@ -190,7 +186,6 @@ SHOW TABLES FROM system LIMIT 2 * [`Create Tables`](/sql-reference/statements/create/table) * [`SHOW CREATE TABLE`](#show-create-table--dictionary--view--database) - ## SHOW COLUMNS {#show_columns} `SHOW COLUMNS` 语句用于显示列列表。 @@ -243,7 +238,6 @@ SHOW COLUMNS FROM 'orders' LIKE 'delivery_%' * [`system.columns`](../../operations/system-tables/columns.md) - ## SHOW DICTIONARIES {#show-dictionaries} `SHOW DICTIONARIES` 语句用于显示 [字典(Dictionaries)](../../sql-reference/dictionaries/index.md) 的列表。 @@ -277,7 +271,6 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` - ## SHOW INDEX {#show-index} 显示表的主键索引和数据跳过索引(data skipping index)列表。 @@ -322,7 +315,6 @@ SHOW [EXTENDED] {INDEX | INDEXES | INDICES | KEYS } {FROM | IN}
[{FROM | SHOW INDEX FROM 'tbl' ``` - ```text title="Response" ┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐ │ tbl │ 1 │ blf_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ BLOOM_FILTER │ │ │ YES │ d, b │ @@ -339,7 +331,6 @@ SHOW INDEX FROM 'tbl' * [`system.tables`](../../operations/system-tables/tables.md) * [`system.data_skipping_indices`](../../operations/system-tables/data_skipping_indices.md) - ## SHOW PROCESSLIST {#show-processlist} 返回 [`system.processes`](/operations/system-tables/processes) 表的内容。该表包含当前正在处理的查询列表,但不包括 `SHOW PROCESSLIST` 查询。 @@ -361,7 +352,6 @@ $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ::: - ## SHOW GRANTS {#show-grants} `SHOW GRANTS` 语句用于显示某个用户所拥有的权限。 @@ -378,7 +368,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] `FINAL` 修饰符会合并来自用户本身及其被授予角色(包括继承)的所有权限。 - ## SHOW CREATE USER {#show-create-user} `SHOW CREATE USER` 语句会显示[创建用户](../../sql-reference/statements/create/user.md)时使用的参数。 @@ -389,7 +378,6 @@ SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL] SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] ``` - ## SHOW CREATE ROLE {#show-create-role} `SHOW CREATE ROLE` 语句会显示在[创建角色](../../sql-reference/statements/create/role.md)时使用的参数。 @@ -400,7 +388,6 @@ SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] SHOW CREATE ROLE name1 [, name2 ...] ``` - ## SHOW CREATE ROW POLICY {#show-create-row-policy} `SHOW CREATE ROW POLICY` 语句用于显示在[创建行策略](../../sql-reference/statements/create/row-policy.md)时使用的参数。 @@ -411,7 +398,6 @@ SHOW CREATE ROLE name1 [, name2 ...] SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] ``` - ## SHOW CREATE QUOTA {#show-create-quota} `SHOW CREATE QUOTA` 语句显示[创建配额](../../sql-reference/statements/create/quota.md)时所使用的参数。 @@ -422,7 +408,6 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] ``` - ## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile} `SHOW CREATE SETTINGS PROFILE` 语句会显示在[创建设置配置文件](../../sql-reference/statements/create/settings-profile.md)时使用的参数。 @@ -433,7 +418,6 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] ``` - ## SHOW USERS {#show-users} `SHOW USERS` 语句返回[用户账户](../../guides/sre/user-management/index.md#user-account-management)名称的列表。 @@ -445,7 +429,6 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] 显示用户 ``` - ## SHOW ROLES {#show-roles} `SHOW ROLES` 语句返回一份 [roles](../../guides/sre/user-management/index.md#role-management) 列表。 @@ -454,8 +437,6 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] ### 语法 {#syntax-14} - - ```sql title="Syntax" SHOW [CURRENT|ENABLED] ROLES ``` @@ -471,7 +452,6 @@ SHOW [CURRENT|ENABLED] ROLES SHOW [SETTINGS] PROFILES ``` - ## SHOW POLICIES {#show-policies} `SHOW POLICIES` 语句返回指定表的[行策略](../../guides/sre/user-management/index.md#row-policy-management)列表。 @@ -483,7 +463,6 @@ SHOW [SETTINGS] PROFILES SHOW [ROW] POLICIES [ON [db.]table] ``` - ## SHOW QUOTAS {#show-quotas} `SHOW QUOTAS` 语句返回[配额](../../guides/sre/user-management/index.md#quotas-management)列表。 @@ -495,7 +474,6 @@ SHOW [ROW] POLICIES [ON [db.]table] 显示配额 ``` - ## SHOW QUOTA {#show-quota} `SHOW QUOTA` 语句返回所有用户或当前用户的[配额](../../operations/quotas.md)使用情况。 @@ -503,8 +481,6 @@ SHOW [ROW] POLICIES [ON [db.]table] ### 语法 {#syntax-18} - - ```sql title="Syntax" SHOW [CURRENT] QUOTA ``` @@ -519,7 +495,6 @@ SHOW [CURRENT] QUOTA SHOW ACCESS ``` - ## SHOW CLUSTER(S) {#show-clusters} `SHOW CLUSTER(S)` 语句返回一个集群列表。 @@ -578,7 +553,6 @@ host_address: 127.0.0.1 port: 9000 ``` - ## SHOW SETTINGS {#show-settings} `SHOW SETTINGS` 语句返回系统设置及其值的列表。 @@ -636,7 +610,6 @@ SHOW CHANGED SETTINGS ILIKE '%MEMORY%' └──────────────────┴────────┴─────────────┘ ``` - ## SHOW SETTING {#show-setting} `SHOW SETTING` 语句返回指定设置名称对应的值。 @@ -651,7 +624,6 @@ SHOW SETTING <名称> * [`system.settings`](../../operations/system-tables/settings.md) 表 - ## 显示文件系统缓存 {#show-filesystem-caches} ### 示例 {#examples-7} @@ -670,7 +642,6 @@ SHOW SETTING <名称> * [`system.settings`](../../operations/system-tables/settings.md) 表 - ## SHOW ENGINES {#show-engines} `SHOW ENGINES` 语句会输出 [`system.table_engines`](../../operations/system-tables/table_engines.md) 表的内容, @@ -686,7 +657,6 @@ SHOW ENGINES [INTO OUTFILE filename] [FORMAT format] * [system.table_engines](../../operations/system-tables/table_engines.md) 表 - ## SHOW FUNCTIONS {#show-functions} `SHOW FUNCTIONS` 语句会返回 [`system.functions`](../../operations/system-tables/functions.md) 表的内容。 @@ -703,7 +673,6 @@ SHOW FUNCTIONS [LIKE | ILIKE ''] * [`system.functions`](../../operations/system-tables/functions.md) 表 - ## SHOW MERGES {#show-merges} `SHOW MERGES` 语句返回合并任务的列表。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md index 5ce13ac986b..2ea66fa61d6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/system.md @@ -288,8 +288,9 @@ SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` SLEEP ENTRY 0 1 #### PROFILE {#instrument-add-profile} -用于测量函数从 `ENTRY` 到 `EXIT` 之间的耗时。 -分析结果会存储在 [`system.trace_log`](../../operations/system-tables/trace_log.md) 中。 +测量函数从 `ENTRY` 到 `EXIT` 之间的耗时。 +分析结果存储在 [`system.trace_log`](../../operations/system-tables/trace_log.md) 中,并可转换为 +[Chrome Event Trace Format](../../operations/system-tables/trace_log.md#chrome-event-trace-format)。 ```sql SYSTEM INSTRUMENT ADD `QueryMetricLog::startQuery` PROFILE diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md index 5463d50a08c..29915864c4b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/statements/update.md @@ -28,7 +28,6 @@ UPDATE [db.]table [ON CLUSTER cluster] SET column1 = expr1 [, ...] [IN PARTITION `filter_expr` 必须是 `UInt8` 类型。此查询会将指定列的值更新为对应表达式的值,更新发生在那些 `filter_expr` 为非零的行上。 值会使用 `CAST` 运算符转换为列的数据类型。不支持更新用于计算主键或分区键的列。 - ## 示例 {#examples} ```sql @@ -37,15 +36,12 @@ UPDATE hits SET Title = 'Updated Title' WHERE EventDate = today(); UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; ``` - ## 轻量级更新不会立即更新数据 {#lightweight-update-does-not-update-data-immediately} 轻量级 `UPDATE` 是通过 **补丁部件(patch parts)** 实现的,这是一种只包含已更新列和行的特殊数据部件。 轻量级 `UPDATE` 会创建补丁部件,但不会立即对存储中的原始数据进行物理修改。 更新过程类似于 `INSERT ... SELECT ...` 查询,但 `UPDATE` 查询会在补丁部件创建完成后才返回。 - - 更新后的值具有以下特性: - 在应用补丁后,通过 `SELECT` 查询中**可立即看到** - 仅在后续的合并(merge)和变更(mutation)过程中才会在物理数据部分中被**实际物化** @@ -57,14 +53,10 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; 要使用轻量级更新,必须通过表设置 [`enable_block_number_column`](/operations/settings/merge-tree-settings#enable_block_number_column) 和 [`enable_block_offset_column`](/operations/settings/merge-tree-settings#enable_block_offset_column) 启用 `_block_number` 和 `_block_offset` 列的物化。 - - ## 轻量级删除 {#lightweight-delete} [轻量级 `DELETE`](/sql-reference/statements/delete) 查询可以作为轻量级 `UPDATE` 执行,而不是作为 `ALTER UPDATE` 变更语句。轻量级 `DELETE` 的实现由 [`lightweight_delete_mode`](/operations/settings/settings#lightweight_delete_mode) 设置进行控制。 - - ## 性能注意事项 {#performance-considerations} **轻量级更新的优势:** @@ -79,15 +71,11 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; - 过于频繁的小更新可能会导致 “too many parts” 错误。建议将多个更新合并为单个查询,例如在 `WHERE` 子句中通过一个 `IN` 子句统一指定所有要更新的 id - 轻量级更新旨在用于更新少量行(大约不超过表的 10%)。如果需要更新更大数量的数据,建议使用 [`ALTER TABLE ... UPDATE`](/sql-reference/statements/alter/update) 变更操作 - - ## 并发操作 {#concurrent-operations} 与重型 mutation 不同,轻量级更新不会等待当前正在运行的合并/变更操作完成。 并发轻量级更新的一致性由设置 [`update_sequential_consistency`](/operations/settings/settings#update_sequential_consistency) 和 [`update_parallel_mode`](/operations/settings/settings#update_parallel_mode) 控制。 - - ## 更新权限 {#update-permissions} `UPDATE` 需要 `ALTER UPDATE` 权限。要为指定用户在特定表上启用执行 `UPDATE` 语句的权限,请运行: @@ -96,7 +84,6 @@ UPDATE wikistat SET hits = hits + 1, time = now() WHERE path = 'ClickHouse'; GRANT ALTER UPDATE ON db.table TO username; ``` - ## 实现细节 {#details-of-the-implementation} Patch part 与常规 part 相同,但只包含已更新的列以及若干系统列: @@ -132,8 +119,6 @@ Patch part 之间可以相互合并,以减少在 `SELECT` 查询中需要应 join 模式比 merge 模式更慢且需要更多内存,但使用频率较低。 - - ## 相关内容 {#related-content} - [`ALTER UPDATE`](/sql-reference/statements/alter/update) - 大规模 `UPDATE` 操作 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md index 2251d4f2bc0..949cac5a9a8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorage.md @@ -11,20 +11,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # azureBlobStorage 表函数 {#azureblobstorage-table-function} 提供类似表的接口,用于在 [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) 中查询/插入文件。此表函数类似于 [s3 函数](../../sql-reference/table-functions/s3.md)。 - - ## 语法 {#syntax} ```sql azureBlobStorage(- connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure, partition_strategy, partition_columns_in_data_file, extra_credentials(client_id=, tenant_id=)]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -41,14 +37,10 @@ azureBlobStorage(- connection_string|storage_account_url, container_name, blobpa | `partition_columns_in_data_file` | 可选参数。仅在使用 `HIVE` 分区策略时生效。用于告知 ClickHouse 数据文件中是否会写入分区列。默认值为 `false`。 | | `extra_credentials` | 使用 `client_id` 和 `tenant_id` 进行身份验证。如果提供了 `extra_credentials`,则其优先级高于 `account_name` 和 `account_key`。 | - - ## 返回值 {#returned_value} 具有指定结构的表,用于在指定文件中读写数据。 - - ## 示例 {#examples} 与 [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) 表引擎类似,用户可以使用 Azurite 模拟器进行本地 Azure 存储的开发。更多详情参见[此处](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage)。下面我们假设可以通过主机名 `azurite1` 访问 Azurite。 @@ -88,7 +80,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -96,8 +87,6 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam - `_size` — 文件大小(字节)。类型:`Nullable(UInt64)`。如果文件大小未知,则值为 `NULL`。 - `_time` — 文件最后一次修改时间。类型:`Nullable(DateTime)`。如果时间未知,则值为 `NULL`。 - - ## 分区写入 {#partitioned-write} ### 分区策略 {#partition-strategy} @@ -123,7 +112,6 @@ select _path, * from azureBlobStorage(azure_conf2, storage_account_url = 'http:/ └─────────────────────────────────────────────────────────────────────────────────┴────┴──────┴─────────┘ ``` - ## use_hive_partitioning 设置 {#hive-style-partitioning} 这是一个设置,用于让 ClickHouse 在读取时解析 Hive 风格分区文件。它对写入没有任何影响。若要在读写两侧保持对称,请使用 `partition_strategy` 参数。 @@ -138,7 +126,6 @@ select _path, * from azureBlobStorage(azure_conf2, storage_account_url = 'http:/ SELECT * FROM azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 使用共享访问签名 (SAS) {#using-shared-access-signatures-sas-sas-tokens} 共享访问签名 (Shared Access Signature,SAS) 是一个 URI,用于授予对 Azure Storage 容器或文件的受限访问权限。使用它可以在不共享存储账户密钥的情况下,为存储账户资源提供限定时间的访问权限。详细信息请参阅[此处](https://learn.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature)。 @@ -171,6 +158,5 @@ FROM azureBlobStorage('https://clickhousedocstest.blob.core.windows.net/?sp=r&st 1 行结果,耗时 0.153 秒。 ``` - ## 相关内容 {#related} - [AzureBlobStorage 表引擎](engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md index 8231ac64521..39a91940236 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/azureBlobStorageCluster.md @@ -7,22 +7,17 @@ title: 'azureBlobStorageCluster' doc_type: 'reference' --- - - # azureBlobStorageCluster 表函数 {#azureblobstoragecluster-table-function} 允许在指定集群中的多个节点上并行处理来自 [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) 的文件。在发起节点上,它会与集群中的所有节点建立连接,展开 S3 文件路径中的星号通配符,并动态分发各个文件。在工作节点上,它向发起节点请求下一个要处理的任务并对其进行处理。该过程会重复进行,直到所有任务处理完毕。 此表函数类似于 [s3Cluster 函数](../../sql-reference/table-functions/s3Cluster.md)。 - - ## 语法 {#syntax} ```sql azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure]) ``` - ## 参数 {#arguments} | Argument | Description | @@ -37,14 +32,10 @@ azureBlobStorageCluster(cluster_name, connection_string|storage_account_url, con | `compression` | 支持的取值:`none`、`gzip/gz`、`brotli/br`、`xz/LZMA`、`zstd/zst`。默认情况下,会根据文件扩展名自动检测压缩格式(等同于设置为 `auto`)。 | | `structure` | 表结构。格式为 `'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返回值 {#returned_value} 具有指定结构的表,用于在指定文件中读取或写入数据。 - - ## 示例 {#examples} 与 [AzureBlobStorage](/engines/table-engines/integrations/azureBlobStorage) 表引擎类似,用户可以使用 Azurite 模拟器在本地进行 Azure 存储开发。更多详情请参见[此处](https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub%2Cblob-storage)。下面我们假设 Azurite 可通过主机名 `azurite1` 访问。 @@ -58,13 +49,10 @@ SELECT count(*) FROM azureBlobStorageCluster( 'auto', 'key UInt64') ``` - ## 使用共享访问签名 (SAS) {#using-shared-access-signatures-sas-sas-tokens} 示例请参见 [azureBlobStorage](/sql-reference/table-functions/azureBlobStorage#using-shared-access-signatures-sas-sas-tokens)。 - - ## 相关内容 {#related} - [AzureBlobStorage 引擎](../../engines/table-engines/integrations/azureBlobStorage.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md index 59167d6585f..721fb5089a3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/cluster.md @@ -7,8 +7,6 @@ title: 'clusterAllReplicas' doc_type: 'reference' --- - - # clusterAllReplicas 表函数 {#clusterallreplicas-table-function} 允许在无需创建 [Distributed](../../engines/table-engines/special/distributed.md) 表的情况下,访问集群在 `remote_servers` 配置段中定义的所有分片。查询时,每个分片仅会访问一个副本。 @@ -19,12 +17,8 @@ doc_type: 'reference' 所有可用的集群都列在 [system.clusters](../../operations/system-tables/clusters.md) 表中。 ::: - - ## 语法 {#syntax} - - ```sql cluster(['cluster_name', db.table, sharding_key]) cluster(['cluster_name', db, table, sharding_key]) @@ -40,13 +34,10 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) | `db.table` or `db`, `table` | 数据库和表的名称。 | | `sharding_key` | 分片键。可选项。如果集群包含多个分片,则必须指定。 | - ## 返回值 {#returned_value} 来自各集群的数据集。 - - ## 使用宏 {#using_macros} `cluster_name` 可以包含宏——用花括号括起的替换占位符。替换后的值取自服务器配置文件中的 [macros](../../operations/server-configuration-parameters/settings.md#macros) 部分。 @@ -57,7 +48,6 @@ clusterAllReplicas(['cluster_name', db, table, sharding_key]) SELECT * FROM cluster('{cluster}', default.example_table); ``` - ## 使用方式和建议 {#usage_recommendations} 使用 `cluster` 和 `clusterAllReplicas` 表函数的效率低于创建 `Distributed` 表,因为在这种情况下,每个请求都会重新建立与服务器的连接。在处理大量查询时,请务必预先创建 `Distributed` 表,而不要使用 `cluster` 和 `clusterAllReplicas` 表函数。 @@ -70,8 +60,6 @@ SELECT * FROM cluster('{cluster}', default.example_table); `host`、`port`、`user`、`password`、`compression`、`secure` 等连接设置从 `` 配置节中获取。详情参见 [Distributed 引擎](../../engines/table-engines/special/distributed.md)。 - - ## 相关内容 {#related} - [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md index 336bf0e21d1..642c71df1e0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalake.md @@ -7,14 +7,10 @@ title: 'deltaLake' doc_type: 'reference' --- - - # deltaLake 表函数 {#deltalake-table-function} 为存放在 Amazon S3、Azure Blob Storage 或本地挂载文件系统中的 [Delta Lake](https://github.com/delta-io/delta) 表提供只读的类表访问接口。 - - ## 语法 {#syntax} `deltaLake` 是 `deltaLakeS3` 的别名,为了兼容性而保留。 @@ -29,20 +25,15 @@ deltaLakeAzure(connection_string|storage_account_url, container_name, blobpath, deltaLakeLocal(path, [,format]) ``` - ## 参数 {#arguments} 参数说明分别与表函数 `s3`、`azureBlobStorage`、`HDFS` 和 `file` 中参数说明一致。 `format` 表示 Delta Lake 表中数据文件的格式。 - - ## 返回值 {#returned_value} 具有指定结构的表,用于从指定的 Delta Lake 表中读取数据。 - - ## 示例 {#examples} 从 S3 中的表 `https://clickhouse-public-datasets.s3.amazonaws.com/delta_lake/hits/` 中选择行: @@ -63,7 +54,6 @@ LIMIT 2 └───────────────────────────────────────────────────────────────────────┴───────────┘ ``` - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -72,8 +62,6 @@ LIMIT 2 - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则该值为 `NULL`。 - `_etag` — 文件的 ETag。类型:`LowCardinality(String)`。如果 ETag 未知,则该值为 `NULL`。 - - ## 相关 {#related} - [DeltaLake 引擎](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md index 63aa985363d..15cc045644c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/deltalakeCluster.md @@ -7,16 +7,12 @@ title: 'deltaLakeCluster' doc_type: 'reference' --- - - # deltaLakeCluster 表函数 {#deltalakecluster-table-function} 这是对 [deltaLake](sql-reference/table-functions/deltalake.md) 表函数的扩展。 允许在指定集群中的多个节点上,并行处理 Amazon S3 中来自 [Delta Lake](https://github.com/delta-io/delta) 表的文件。在发起节点上,它会创建到集群中所有节点的连接,并动态分派每个文件。在工作节点上,它会向发起节点请求下一个待处理任务并执行处理。该过程会重复进行,直到所有任务完成为止。 - - ## 语法 {#syntax} ```sql @@ -32,21 +28,16 @@ deltaLakeAzureCluster(cluster_name, named_collection[, option=value [,..]]) `deltaLakeS3Cluster` 是 `deltaLakeCluster` 的别名,两者都用于 S3。 - ## 参数 {#arguments} - `cluster_name` — 用于构建远程和本地服务器地址集合及连接参数的集群名称。 - 其他所有参数的说明与对应的 [deltaLake](sql-reference/table-functions/deltalake.md) 表函数中的参数说明相同。 - - ## 返回值 {#returned_value} 一个具有指定结构的表,用于在集群中读取 S3 上指定 Delta Lake 表的数据。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -55,8 +46,6 @@ deltaLakeAzureCluster(cluster_name, named_collection[, option=value [,..]]) - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则值为 `NULL`。 - `_etag` — 文件的 ETag。类型:`LowCardinality(String)`。如果 ETag 未知,则值为 `NULL`。 - - ## 相关内容 {#related} - [deltaLake 引擎](engines/table-engines/integrations/deltalake.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md index 7b513e4b662..8424964cb3b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/dictionary.md @@ -7,33 +7,24 @@ title: 'dictionary' doc_type: 'reference' --- - - # dictionary 表函数 {#dictionary-table-function} 将 [dictionary](../../sql-reference/dictionaries/index.md) 数据以 ClickHouse 表的形式呈现。其工作方式与 [Dictionary](../../engines/table-engines/special/dictionary.md) 引擎相同。 - - ## 语法 {#syntax} ```sql dictionary('dict') ``` - ## 参数 {#arguments} - `dict` — 字典名。[String](../../sql-reference/data-types/string.md)。 - - ## 返回值 {#returned_value} 一个 ClickHouse 表。 - - ## 示例 {#examples} 输入表 `dictionary_source_table`: @@ -67,7 +58,6 @@ SELECT * FROM dictionary('new_dictionary'); └────┴───────┘ ``` - ## 相关内容 {#related} - [Dictionary 引擎](/engines/table-engines/special/dictionary) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md index 4c0cc0bbcb8..9b0c9414ae6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/executable.md @@ -8,8 +8,6 @@ title: 'executable' doc_type: 'reference' --- - - # 用于 UDF 的 `executable` 表函数 {#executable-table-function-for-udfs} `executable` 表函数会基于用户自定义函数(UDF)的输出创建一张表,该函数定义在一个向 **stdout** 输出行的脚本中。可执行脚本存储在 `users_scripts` 目录中,并且可以从任意数据源读取数据。请确保你的 ClickHouse 服务器具备运行该可执行脚本所需的全部软件包。例如,如果这是一个 Python 脚本,则要确保服务器已经安装了所需的 Python 包。 @@ -20,8 +18,6 @@ doc_type: 'reference' 普通 UDF 函数与 `executable` 表函数和 `Executable` 表引擎之间的一个关键区别在于,普通 UDF 函数不能改变行数。例如,如果输入是 100 行,则结果也必须返回 100 行。使用 `executable` 表函数或 `Executable` 表引擎时,你的脚本可以执行任意所需的数据转换,包括复杂聚合。 ::: - - ## 语法 {#syntax} `executable` 表函数需要三个参数,并且可以接收一个可选的输入查询列表: @@ -90,7 +86,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random └────┴────────────┘ ``` - ## 设置 {#settings} - `send_chunk_header` - 控制在发送要处理的数据块之前,是否先发送行数。默认值为 `false`。 @@ -100,8 +95,6 @@ SELECT * FROM executable('generate_random.py', TabSeparated, 'id UInt32, random - `command_read_timeout` - 从命令的 stdout 读取数据的超时时间,单位为毫秒。默认值为 10000。 - `command_write_timeout` - 向命令的 stdin 写入数据的超时时间,单位为毫秒。默认值为 10000。 - - ## 将查询结果传递给脚本 {#passing-query-results-to-a-script} 请务必查看 `Executable` 表引擎中关于[如何将查询结果传递给脚本](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script)的示例。下面展示如何使用 `executable` 表函数来执行该示例中相同的脚本: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md index 4695003b268..3538024cdce 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/file.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # file 表函数 {#file-table-function} 这是一种表引擎,提供类似表的接口,可对文件执行 SELECT 和 INSERT 操作,类似于 [s3](/sql-reference/table-functions/url.md) 表函数。处理本地文件时使用 `file()`,访问对象存储(例如 S3、GCS 或 MinIO)中的 bucket 时使用 `s3()`。 `file` 函数可以在 `SELECT` 和 `INSERT` 查询中使用,用于从文件读取或向文件写入数据。 - - ## 语法 {#syntax} ```sql file([path_to_archive ::] 路径 [,格式] [,结构] [,压缩]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -36,14 +32,10 @@ file([path_to_archive ::] 路径 [,格式] [,结构] [,压缩]) | `structure` | 表结构。格式:`'column1_name column1_type, column2_name column2_type, ...'`。 | | `compression` | 在 `SELECT` 查询中使用时表示现有压缩类型,在 `INSERT` 查询中使用时表示期望的压缩类型。支持的压缩类型有 `gz`、`br`、`xz`、`zst`、`lz4` 和 `bz2`。 | - - ## 返回值 {#returned_value} 用于从文件读取或向文件写入数据的表。 - - ## 写入文件示例 {#examples-for-writing-to-a-file} ### 写入 TSV 文件 {#write-to-a-tsv-file} @@ -56,7 +48,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) 因此,数据被写入 `test.tsv` 文件: - ```bash # cat /var/lib/clickhouse/user_files/test.tsv {#cat-varlibclickhouseuser_filestesttsv} 1 2 3 @@ -77,18 +68,14 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) 因此,数据会写入三个文件:`test_1.tsv`、`test_2.tsv` 和 `test_3.tsv`。 - ```bash # cat /var/lib/clickhouse/user_files/test_1.tsv {#cat-varlibclickhouseuser_filestest_1tsv} 3 2 1 ``` - # cat /var/lib/clickhouse/user_files/test_2.tsv {#cat-varlibclickhouseuser_filestest_2tsv} 1 3 2 - - # cat /var/lib/clickhouse/user_files/test_3.tsv {#cat-varlibclickhouseuser_filestest_3tsv} 1 2 3 @@ -96,7 +83,6 @@ VALUES (1, 2, 3), (3, 2, 1), (1, 3, 2) ``` ``` - ## 从文件读取的示例 {#examples-for-reading-from-a-file} ### 对 CSV 文件执行 SELECT 查询 {#select-from-a-csv-file} @@ -154,7 +140,6 @@ file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32'); SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` - ## 路径中的通配符 {#globs-in-path} 路径可以使用通配模式。文件必须匹配整个路径模式,而不仅仅是后缀或前缀。有一个例外:如果路径指向一个已存在的目录并且未使用通配符,则会在路径末尾隐式添加一个 `*`,从而选中该目录中的所有文件。 @@ -167,8 +152,6 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); 使用 `{}` 的写法类似于 [remote](remote.md) 和 [hdfs](hdfs.md) 表函数。 - - ## 示例 {#examples} **示例** @@ -228,7 +211,6 @@ SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); ``` - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -236,8 +218,6 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — 文件大小(以字节为单位)。类型:`Nullable(UInt64)`。如果文件大小未知,则该值为 `NULL`。 - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则该值为 `NULL`。 - - ## use_hive_partitioning 设置 {#hive-style-partitioning} 当 `use_hive_partitioning` 被设置为 1 时,ClickHouse 会在路径中检测 Hive 风格的分区(`/name=value/`),并允许在查询中将分区列作为虚拟列使用。这些虚拟列的名称与分区路径中的名称相同,但会以 `_` 开头。 @@ -250,7 +230,6 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 设置 {#settings} | 设置项 | 说明 | @@ -261,8 +240,6 @@ SELECT * FROM file('data/path/date=*/country=*/code=*/*.parquet') WHERE _date > | [engine_file_skip_empty_files](operations/settings/settings.md#engine_file_skip_empty_files) | 允许在读取时跳过空文件。默认禁用。 | | [storage_file_read_method](/operations/settings/settings#engine_file_empty_if_not_exists) | 从存储文件读取数据的方法,可选值:`read`、`pread`、`mmap`(仅适用于 clickhouse-local)。默认值:clickhouse-server 为 `pread`,clickhouse-local 为 `mmap`。 | - - ## 相关内容 {#related} - [虚拟列](engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md index 721d6ffbfdb..40d9a2d469e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fileCluster.md @@ -7,8 +7,6 @@ title: 'fileCluster' doc_type: 'reference' --- - - # fileCluster 表函数 {#filecluster-table-function} 允许在集群中的多个节点上并行处理与指定路径匹配的文件。发起节点会与工作节点建立连接,展开文件路径中的通配符(globs),并将读文件任务分派给各个工作节点。每个工作节点都会向发起节点请求下一个要处理的文件,如此循环,直到所有任务完成(所有文件都被读取)。 @@ -18,15 +16,12 @@ doc_type: 'reference' 如果这些文件在不同节点之间存在差异,则返回值无法预先确定,并且取决于各工作节点向发起节点请求任务的先后顺序。 ::: - - ## 语法 {#syntax} ```sql fileCluster(cluster_name, path[, format, structure, compression_method]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -37,8 +32,6 @@ fileCluster(cluster_name, path[, format, structure, compression_method]) | `structure` | 以 `'UserID UInt64, Name String'` 形式指定的表结构。用于确定列名和类型。类型:[String](../../sql-reference/data-types/string.md)。 | | `compression_method` | 压缩方法。支持的压缩类型包括 `gz`、`br`、`xz`、`zst`、`lz4` 和 `bz2`。 | - - ## 返回值 {#returned_value} 具有指定格式和结构,并包含来自匹配指定路径的文件的数据的表。 @@ -88,13 +81,10 @@ SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s Str └────┴────────┘ ``` - ## 路径中的通配符 {#globs-in-path} FileCluster 同样支持 [File](../../sql-reference/table-functions/file.md#globs-in-path) 表函数所支持的所有模式。 - - ## 相关 {#related} - [file 表函数](../../sql-reference/table-functions/file.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md index 82703135d31..34113d1672d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/format.md @@ -7,35 +7,26 @@ title: 'format' doc_type: 'reference' --- - - # format 表函数 {#format-table-function} 根据指定的输入格式从参数中解析数据。如果未指定 structure 参数,则从数据中提取结构。 - - ## 语法 {#syntax} ```sql format(format_name, [structure], data) ``` - ## 参数 {#arguments} - `format_name` — 数据的[格式](/sql-reference/formats)。 - `structure` - 表结构。可选。格式为 `'column1_name column1_type, column2_name column2_type, ...'`。 - `data` — 字符串字面量或返回一个按指定格式组织的数据字符串的常量表达式。 - - ## 返回值 {#returned_value} 一个数据表,包含根据指定格式和指定或提取的结构从 `data` 参数中解析得到的数据。 - - ## 示例 {#examples} 不带 `structure` 参数: @@ -109,7 +100,6 @@ $$) └───────┴─────┘ ``` - ## 相关内容 {#related} - [格式](../../interfaces/formats.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md index d31e469b2df..c401f322488 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzJSON.md @@ -7,21 +7,16 @@ title: 'fuzzJSON' doc_type: 'reference' --- - - # fuzzJSON 表函数 {#fuzzjson-table-function} 对 JSON 字符串进行随机扰动。 - - ## 语法 {#syntax} ```sql fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -41,14 +36,10 @@ fuzzJSON({ named_collection [, option=value [,..]] | json_str[, random_seed] }) | `min_key_length` (UInt64) | 最小键长度。应至少为 1。 | | `max_key_length` (UInt64) | 最大键长度。如果已指定,应大于或等于 `min_key_length`。 | - - ## 返回值 {#returned_value} 一个表对象,包含一列经过扰动的 JSON 字符串。 - - ## 用法示例 {#usage-example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md index ff1cd554605..1b96d0f290a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/fuzzQuery.md @@ -7,21 +7,16 @@ title: 'fuzzQuery' doc_type: 'reference' --- - - # fuzzQuery 表函数 {#fuzzquery-table-function} 对给定的查询字符串进行随机扰动,生成不同的变体。 - - ## 语法 {#syntax} ```sql fuzzQuery(query[, max_query_length[, random_seed]]) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -30,14 +25,10 @@ fuzzQuery(query[, max_query_length[, random_seed]]) | `max_query_length` | (UInt64) - 查询语句在模糊测试过程中可能达到的最大长度。 | | `random_seed` | (UInt64) - 用于生成稳定测试结果的随机种子。 | - - ## 返回值 {#returned_value} 一个具有单个列的表对象,该列中包含扰动后的查询字符串。 - - ## 使用示例 {#usage-example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md index 73c91240daf..d973401850f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/gcs.md @@ -8,8 +8,6 @@ title: 'gcs' doc_type: 'reference' --- - - # gcs 表函数 {#gcs-table-function} 提供一个类表接口,用于在 [Google Cloud Storage(GCS)](https://cloud.google.com/storage/) 中执行 `SELECT` 和 `INSERT` 操作。需要具备 [`Storage Object User` IAM 角色](https://cloud.google.com/storage/docs/access-control/iam-roles)。 @@ -18,8 +16,6 @@ doc_type: 'reference' 如果集群中有多个副本,可以改用 [s3Cluster 函数](../../sql-reference/table-functions/s3Cluster.md)(同样适用于 GCS)来并行化写入。 - - ## 语法 {#syntax} ```sql @@ -32,7 +28,6 @@ GCS 表函数通过 GCS XML API 和 HMAC 密钥与 Google Cloud Storage 集成 有关端点和 HMAC 的更多信息,请参阅 [Google 互操作性文档](https://cloud.google.com/storage/docs/interoperability)。 ::: - ## 参数 {#arguments} | 参数 | 描述 | @@ -65,13 +60,10 @@ GCS 路径采用此格式,是因为 Google XML API 的 endpoint 与 JSON API | `no_sign_request` | 默认禁用。 | | `expiration_window_seconds` | 默认值为 120。 | - ## 返回值 {#returned_value} 具有指定结构的表,用于从指定文件读取或向其写入数据。 - - ## 示例 {#examples} 从 GCS 文件 `https://storage.googleapis.com/my-test-bucket-768/data.csv` 中选取表的前两行: @@ -104,7 +96,6 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` - ## 用法 {#usage} 假设我们在 GCS 中有若干文件,其 URI 如下: @@ -198,7 +189,6 @@ SELECT count(*) FROM gcs(creds, url='https://s3-object-url.csv') ``` - ## 分区写入 {#partitioned-write} 如果在向 `GCS` 表插入数据时指定了 `PARTITION BY` 表达式,则会为每个分区值创建一个单独的文件。将数据拆分为多个独立文件有助于提升读操作的效率。 @@ -225,7 +215,6 @@ INSERT INTO TABLE FUNCTION 因此,数据将被写入位于不同 bucket 中的三个文件:`my_bucket_1/file.csv`、`my_bucket_10/file.csv` 和 `my_bucket_20/file.csv`。 - ## 相关 {#related} - [S3 表函数](s3.md) - [S3 引擎](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md index d1586f816a6..b03e70e4a56 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/generate.md @@ -7,23 +7,18 @@ title: 'generateRandom' doc_type: 'reference' --- - - # generateRandom 表函数 {#generaterandom-table-function} 根据给定的 schema 生成随机数据。 可使用这些数据填充测试表。 并非所有数据类型都受支持。 - - ## 语法 {#syntax} ```sql generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]]) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -34,14 +29,10 @@ generateRandom(['name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_str | `max_string_length` | 所有生成字符串的最大长度。默认值为 `10`。 | | `max_array_length` | 所有生成数组或 Map 的最大元素数量。默认值为 `10`。 | - - ## 返回值 {#returned_value} 符合所请求 schema 的表对象。 - - ## 使用示例 {#usage-example} ```sql @@ -89,7 +80,6 @@ SELECT * FROM generateRandom(generateRandomStructure(4, 101), 101) LIMIT 3; SELECT * FROM generateRandom() LIMIT 3; ``` - ```text ┌───c1─┬─────────c2─┬─────────────────────c3─┬──────────────────────c4─┬─c5───────┐ │ -128 │ 317300854 │ 2030-08-16 08:22:20.65 │ 1994-08-16 12:08:56.745 │ R0qgiC46 │ @@ -104,7 +94,6 @@ SELECT * FROM generateRandom() LIMIT 3; SELECT * FROM generateRandom(11) LIMIT 3; ``` - ```text ┌───────────────────────────────────────c1─┬─────────────────────────────────────────────────────────────────────────────c2─┬─────────────────────────────────────────────────────────────────────────────c3─┬─────────c4─┬─────────────────────────────────────────────────────────────────────────────c5─┬──────────────────────c6─┬─c7──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─c8──────────────────────────────────────┬─────────c9─┐ │ -77422512305044606600216318673365695785 │ 636812099959807642229.503817849012019401335326013846687285151335352272727523 │ -34944452809785978175157829109276115789694605299387223845886143311647505037529 │ 544473976 │ 111220388331710079615337037674887514156741572807049614590010583571763691328563 │ 22016.22623506465 │ {'2052-01-31 20:25:33':4306400876908509081044405485378623663,'1993-04-16 15:58:49':164367354809499452887861212674772770279,'2101-08-19 03:07:18':-60676948945963385477105077735447194811,'2039-12-22 22:31:39':-59227773536703059515222628111999932330} │ a7b2:8f58:4d07:6707:4189:80cf:92f5:902d │ 1950-07-14 │ @@ -117,6 +106,5 @@ SELECT * FROM generateRandom(11) LIMIT 3; 在 `max_array_length` 足够大的情况下,`generateRandom(generateRandomStructure(), [random seed], max_string_length, max_array_length)` 可能会生成非常庞大的输出,这是因为复杂类型(`Array`、`Tuple`、`Map`、`Nested`)的嵌套深度可能很大(最多可达 16 层)。 ::: - ## 相关内容 {#related-content} - 博客文章:[在 ClickHouse 中生成随机测试分布数据](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md index cd0deaa5504..19c5f195e1c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfs.md @@ -10,20 +10,16 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # hdfs 表函数 {#hdfs-table-function} 基于 HDFS 中的文件创建一张表。此表函数类似于 [url](../../sql-reference/table-functions/url.md) 和 [file](../../sql-reference/table-functions/file.md) 表函数。 - - ## 语法 {#syntax} ```sql hdfs(URI, 格式, 结构) ``` - ## 参数 {#arguments} | Argument | Description | @@ -32,8 +28,6 @@ hdfs(URI, 格式, 结构) | `format` | 文件的[格式](/sql-reference/formats)。 | | `structure`| 表的结构。格式:`'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返回值 {#returned_value} 一个具有指定结构的表,用于在指定文件中读取或写入数据。 @@ -55,7 +49,6 @@ LIMIT 2 └─────────┴─────────┴─────────┘ ``` - ## 路径中的通配符 {#globs_in_path} 路径可以使用通配符匹配。文件必须匹配整个路径模式,而不仅仅是后缀或前缀。 @@ -110,7 +103,6 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` - ## 虚拟列 {#virtual-columns} - `_path` — 文件的路径。类型:`LowCardinality(String)`。 @@ -118,8 +110,6 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — 文件的大小(字节数)。类型:`Nullable(UInt64)`。如果大小未知,则该值为 `NULL`。 - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则该值为 `NULL`。 - - ## use_hive_partitioning 设置 {#hive-style-partitioning} 当将 `use_hive_partitioning` 设置为 1 时,ClickHouse 会在路径(`/name=value/`)中检测 Hive 风格的分区方式,并允许在查询中将分区列作为虚拟列使用。这些虚拟列的名称与分区路径中的名称相同,但会以下划线 `_` 开头。 @@ -132,15 +122,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin SELECT * FROM HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 存储设置 {#storage-settings} - [hdfs_truncate_on_insert](operations/settings/settings.md#hdfs_truncate_on_insert) - 允许在插入之前截断目标文件。默认关闭。 - [hdfs_create_new_file_on_insert](operations/settings/settings.md#hdfs_create_new_file_on_insert) - 如果格式带有后缀,允许在每次插入时创建一个新文件。默认关闭。 - [hdfs_skip_empty_files](operations/settings/settings.md#hdfs_skip_empty_files) - 允许在读取时跳过空文件。默认关闭。 - - ## 相关内容 {#related} - [虚拟列](../../engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md index bcdf903d9d0..c48be6d8b39 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hdfsCluster.md @@ -7,21 +7,16 @@ title: 'hdfsCluster' doc_type: 'reference' --- - - # hdfsCluster 表函数 {#hdfscluster-table-function} 允许在指定集群的多个节点上并行处理来自 HDFS 的文件。在发起节点上,它会与集群中所有节点建立连接,展开 HDFS 文件路径中的星号通配符,并动态分派每个文件。在工作节点上,它会向发起节点请求下一个要处理的任务并对其进行处理。该过程会重复进行,直到所有任务都完成。 - - ## 语法 {#syntax} ```sql hdfsCluster(cluster_name, URI, format, structure) ``` - ## 参数 {#arguments} | Argument | Description | @@ -31,14 +26,10 @@ hdfsCluster(cluster_name, URI, format, structure) | `format` | 文件的[格式](/sql-reference/formats)。 | | `structure` | 表的结构。格式为 `'column1_name column1_type, column2_name column2_type, ...'`。 | - - ## 返回值 {#returned_value} 具有指定结构的表,用于从指定文件中读取数据。 - - ## 示例 {#examples} 1. 假设我们有一个名为 `cluster_simple` 的 ClickHouse 集群,并且在 HDFS 上有若干文件,其 URI 如下: @@ -68,7 +59,6 @@ FROM hdfsCluster('cluster_simple', 'hdfs://hdfs1:9000/{some,another}_dir/*', 'TS 如果文件列表中包含带前导零的数字范围,请分别为每一位数字使用花括号语法,或使用 `?`。 ::: - ## 相关内容 {#related} - [HDFS 引擎](../../engines/table-engines/integrations/hdfs.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md index fb3ad64bcb5..dbd810b1f93 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudi.md @@ -7,21 +7,16 @@ title: 'hudi' doc_type: 'reference' --- - - # Hudi 表函数 {#hudi-table-function} 提供只读的类表接口,用于访问存储在 Amazon S3 中的 Apache [Hudi](https://hudi.apache.org/) 表。 - - ## 语法 {#syntax} ```sql hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -32,14 +27,10 @@ hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,co | `structure` | 表结构。格式为 `'column1_name column1_type, column2_name column2_type, ...'`。 | | `compression` | 可选参数。支持的取值:`none`、`gzip/gz`、`brotli/br`、`xz/LZMA`、`zstd/zst`。默认情况下,将根据文件扩展名自动检测压缩格式。 | - - ## 返回值 {#returned_value} 一个具有指定结构的表,用于从 S3 中指定的 Hudi 表读取数据。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -48,8 +39,6 @@ hudi(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,co - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则该值为 `NULL`。 - `_etag` — 文件的 etag 值。类型:`LowCardinality(String)`。如果 etag 未知,则该值为 `NULL`。 - - ## 相关内容 {#related} - [Hudi 引擎](/engines/table-engines/integrations/hudi.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md index f14739635c0..ef5b438ec26 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/hudiCluster.md @@ -7,23 +7,18 @@ title: 'hudiCluster 表函数' doc_type: 'reference' --- - - # hudiCluster 表函数 {#hudicluster-table-function} 这是对 [hudi](sql-reference/table-functions/hudi.md) 表函数的扩展。 允许在指定集群中的多个节点上并行处理 Amazon S3 中 Apache [Hudi](https://hudi.apache.org/) 表的文件。在发起节点上,它会创建到集群中所有节点的连接,并动态分发每个文件。在工作节点上,它向发起节点请求下一个要处理的任务并对其进行处理。此过程会重复,直到所有任务都完成。 - - ## 语法 {#syntax} ```sql hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -35,14 +30,10 @@ hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,form | `structure` | 表的结构。格式:`'column1_name column1_type, column2_name column2_type, ...'`。 | | `compression` | 可选参数。支持的取值:`none`、`gzip/gz`、`brotli/br`、`xz/LZMA`、`zstd/zst`。默认情况下,将根据文件扩展名自动检测压缩格式。 | - - ## 返回值 {#returned_value} 一个具有指定结构的表,用于在集群中从 S3 上指定的 Hudi 表读取数据。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -51,8 +42,6 @@ hudiCluster(cluster_name, url [,aws_access_key_id, aws_secret_access_key] [,form - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,该值为 `NULL`。 - `_etag` — 文件的 ETag。类型:`LowCardinality(String)`。如果 ETag 未知,该值为 `NULL`。 - - ## 相关 {#related} - [Hudi 引擎](engines/table-engines/integrations/hudi.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md index 0caf3f5b60e..dc5a4fefb68 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/iceberg.md @@ -1,5 +1,5 @@ --- -description: '为存储在 Amazon S3、Azure、HDFS 或本地的 Apache Iceberg 表提供类似表的只读接口。' +description: '提供对存储在 Amazon S3、Azure、HDFS 或本地的 Apache Iceberg 表的只读的类表接口。' sidebar_label: 'iceberg' sidebar_position: 90 slug: /sql-reference/table-functions/iceberg @@ -7,14 +7,10 @@ title: 'iceberg' doc_type: 'reference' --- - - # iceberg 表函数 {#iceberg-table-function} 为存储在 Amazon S3、Azure、HDFS 或本地的 Apache [Iceberg](https://iceberg.apache.org/) 表提供类似表的只读接口。 - - ## 语法 {#syntax} ```sql @@ -34,12 +30,12 @@ icebergLocal(named_collection[, option=value [,..]]) ## 参数 {#arguments} -参数说明分别与表函数 `s3`、`azureBlobStorage`、`HDFS` 和 `file` 中参数说明一致。 +各参数的说明分别与表函数 `s3`、`azureBlobStorage`、`HDFS` 和 `file` 中对应参数的说明一致。 `format` 表示 Iceberg 表中数据文件的格式。 ### 返回值 {#returned-value} -一个具有指定结构的表,用于从指定 Iceberg 表中读取数据。 +用于从指定的 Iceberg 表中读取数据、具有指定结构的表。 ### 示例 {#example} @@ -48,13 +44,13 @@ SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_tab ``` :::important -ClickHouse 目前通过 `icebergS3`、`icebergAzure`、`icebergHDFS` 和 `icebergLocal` 表函数,以及 `IcebergS3`、`icebergAzure`、`IcebergHDFS` 和 `IcebergLocal` 表引擎,支持读取 Iceberg 格式 v1 和 v2 的数据。 +ClickHouse 目前支持通过 `icebergS3`、`icebergAzure`、`icebergHDFS` 和 `icebergLocal` 表函数以及 `IcebergS3`、`icebergAzure`、`IcebergHDFS` 和 `IcebergLocal` 表引擎读取 Iceberg 格式的 v1 和 v2 版本。 ::: ## 定义命名集合 {#defining-a-named-collection} -以下示例演示如何配置一个命名集合,用于存储 URL 和凭据: +下面是一个示例,演示如何配置命名集合来存储 URL 和凭证: ```xml @@ -76,105 +72,143 @@ DESCRIBE icebergS3(iceberg_conf, filename = 'test_table') ``` -## 模式演进 {#schema-evolution} +## 使用数据目录 {#iceberg-writes-catalogs} -目前,借助 CH,你可以读取其 schema 随时间发生变更的 Iceberg 表。我们当前支持读取以下类型的表:列被新增或删除,或者列的顺序发生变化。你也可以将某个原本不允许为 NULL 的列修改为允许为 NULL 的列。此外,我们支持对简单类型进行受支持的类型转换,具体包括:  +Iceberg 表也可以与多种数据目录配合使用,例如 [REST Catalog](https://iceberg.apache.org/rest-catalog-spec/)、[AWS Glue Data Catalog](https://docs.aws.amazon.com/prescriptive-guidance/latest/serverless-etl-aws-glue/aws-glue-data-catalog.html) 和 [Unity Catalog](https://www.unitycatalog.io/)。 -* int -> long -* float -> double -* decimal(P, S) -> decimal(P', S) 其中 P' > P。 +:::important +在使用目录时,大多数用户会希望使用 `DataLakeCatalog` 数据库引擎,它将 ClickHouse 连接到数据目录以发现你的表。你可以使用这个数据库引擎来代替手动使用 `IcebergS3` 表引擎创建每个表。 +::: -目前,还不能对嵌套结构本身或数组和 Map 中元素的类型进行变更。 +要配合这些目录使用 Iceberg 表,请创建一个使用 `IcebergS3` 引擎的表,并提供必要的设置。 +例如,将 REST Catalog 与 MinIO 存储一起使用: +```sql +CREATE TABLE `database_name.table_name` +ENGINE = IcebergS3( + 'http://minio:9000/warehouse-rest/table_name/', + 'minio_access_key', + 'minio_secret_key' +) +SETTINGS + storage_catalog_type="rest", + storage_warehouse="demo", + object_storage_endpoint="http://minio:9000/warehouse-rest", + storage_region="us-east-1", + storage_catalog_url="http://rest:8181/v1" +``` -## 分区剪枝 {#partition-pruning} +或者,将 AWS Glue Data Catalog 与 S3 配合使用: -ClickHouse 在针对 Iceberg 表执行 SELECT 查询时支持分区剪枝,这有助于通过跳过无关的数据文件来优化查询性能。要启用分区剪枝,请设置 `use_iceberg_partition_pruning = 1`。有关 Iceberg 分区剪枝的更多信息,请参阅:https://iceberg.apache.org/spec/#partitioning。 +```sql +CREATE TABLE `my_database.my_table` +ENGINE = IcebergS3( + 's3://my-data-bucket/warehouse/my_database/my_table/', + 'aws_access_key', + 'aws_secret_key' +) +SETTINGS + storage_catalog_type = 'glue', + storage_warehouse = 'my_database', + object_storage_endpoint = 's3://my-data-bucket/', + storage_region = 'us-east-1', + storage_catalog_url = 'https://glue.us-east-1.amazonaws.com/iceberg/v1' +``` +## 模式演进 {#schema-evolution} + +目前,借助 ClickHouse,您可以读取模式随时间发生变化的 Iceberg 表。我们当前支持读取以下情况的表:列被添加或删除,且列的顺序发生变化。您也可以将一个原本要求必须有值的列更改为允许为 NULL 的列。此外,我们支持对简单类型进行允许的类型转换,具体包括:   + +* int -> long +* float -> double +* decimal(P, S) -> decimal(P', S) 其中 P' > P。 -## 时间回溯 {#time-travel} +目前尚不支持修改嵌套结构,或更改数组和 Map 中元素的类型。 -ClickHouse 为 Iceberg 表提供时间回溯功能,允许基于指定的时间戳或快照 ID 查询历史数据。 +## 分区裁剪 {#partition-pruning} +ClickHouse 在对 Iceberg 表执行 SELECT 查询时支持分区裁剪,通过跳过无关的数据文件来优化查询性能。要启用分区裁剪,请将 `use_iceberg_partition_pruning` 设置为 `1`。有关 Iceberg 分区裁剪的更多信息,请参阅 https://iceberg.apache.org/spec/#partitioning。 +## 时间旅行 {#time-travel} + +ClickHouse 支持 Iceberg 表的时间旅行功能,允许你基于特定的时间戳或快照 ID 查询历史数据。 ## 处理包含已删除行的表 {#deleted-rows} -目前,仅支持包含 [position deletes](https://iceberg.apache.org/spec/#position-delete-files) 的 Iceberg 表。 +目前,仅支持带有[位置删除(position deletes)](https://iceberg.apache.org/spec/#position-delete-files)的 Iceberg 表。 以下删除方式**不受支持**: -* [Equality deletes](https://iceberg.apache.org/spec/#equality-delete-files) -* [Deletion vectors](https://iceberg.apache.org/spec/#deletion-vectors)(在 v3 中引入) +- [等值删除(equality deletes)](https://iceberg.apache.org/spec/#equality-delete-files) +- [删除向量(deletion vectors)](https://iceberg.apache.org/spec/#deletion-vectors)(在 v3 中引入) ### 基本用法 {#basic-usage} ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_timestamp_ms = 1714636800000 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_timestamp_ms = 1714636800000 ``` ```sql -SELECT * FROM example_table ORDER BY 1 -SETTINGS iceberg_snapshot_id = 3547395809148285433 + SELECT * FROM example_table ORDER BY 1 + SETTINGS iceberg_snapshot_id = 3547395809148285433 ``` -注意:在同一个查询中,不能同时指定 `iceberg_timestamp_ms` 和 `iceberg_snapshot_id` 参数。 +注意:在同一个查询中无法同时指定 `iceberg_timestamp_ms` 和 `iceberg_snapshot_id` 参数。 -### 重要注意事项 {#important-considerations} -* **快照(snapshot)** 通常在以下情况下创建: +### 重要注意事项 {#important-considerations} +* **快照(Snapshot)** 通常在以下情况下创建: * 向表中写入新数据时 - * 执行某种数据压缩(compaction)操作时 -* **模式变更通常不会创建快照** —— 在对已经发生模式演进的表使用时光回溯功能时,这一点会导致一些重要行为差异。 +* **模式更改通常不会产生快照** —— 在对经历过模式演进(schema evolution)的表使用时间回溯(time travel)时,这会导致一些重要的行为差异。 ### 示例场景 {#example-scenarios} -所有场景都使用 Spark 编写,因为 CH 目前尚不支持向 Iceberg 表写入数据。 +所有场景都使用 Spark 编写,因为 ClickHouse(CH)目前尚不支持向 Iceberg 表写入。 -#### 场景 1:仅有模式变更且没有新快照 {#scenario-1} +#### 场景 1:在没有新快照的情况下进行架构变更 {#scenario-1} -考虑以下一系列操作: +请考虑以下操作顺序: ```sql --- 创建一个包含两列的表 - CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( - order_number bigint, - product_code string - ) - USING iceberg - OPTIONS ('format-version'='2') - -- - 向表中插入数据 - INSERT INTO spark_catalog.db.time_travel_example VALUES - (1, 'Mars') + -- 创建一个包含两列的表 + CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example ( + order_number bigint, + product_code string + ) + USING iceberg + OPTIONS ('format-version'='2') - ts1 = now() // 一段伪代码示例 +-- 向表中插入数据 + INSERT INTO spark_catalog.db.time_travel_example VALUES + (1, 'Mars') -- - 修改表,添加一个新列 - ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + ts1 = now() // 伪代码示例 - ts2 = now() +-- 修改表以添加新列 + ALTER TABLE spark_catalog.db.time_travel_example ADD COLUMN (price double) + + ts2 = now() -- - 向表中插入数据 - INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) +-- 向表中插入数据 + INSERT INTO spark_catalog.db.time_travel_example VALUES (2, 'Venus', 100) - ts3 = now() + ts3 = now() -- - 在每个时间点查询此表 - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +-- 在每个时间戳查询表 + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts1; +------------+------------+ |order_number|product_code| +------------+------------+ | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts2; +------------+------------+ |order_number|product_code| @@ -182,7 +216,7 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 | 1| Mars| +------------+------------+ - SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; + SELECT * FROM spark_catalog.db.time_travel_example TIMESTAMP AS OF ts3; +------------+------------+-----+ |order_number|product_code|price| @@ -192,14 +226,15 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 +------------+------------+-----+ ``` -在不同时间点下的查询结果: +在不同时间点的查询结果: -* 在 ts1 和 ts2:只包含最初的两列 -* 在 ts3:显示全部三列,第一行的 price 列为 NULL +* 在 ts1 和 ts2:只显示原始的两列 +* 在 ts3:显示全部三列,第一行的 price 为 NULL -#### 场景 2:历史表结构与当前表结构的差异 {#scenario-2} -在当前时刻执行的时间旅行查询,显示的表结构可能与当前表的表结构不同: +#### 场景 2:历史与当前模式的差异 {#scenario-2} + +在当前时刻执行的时间回溯查询,其显示的模式可能与当前表不同: ```sql -- 创建表 @@ -237,15 +272,15 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 +------------+------------+-----+ ``` -之所以会出现这种情况,是因为 `ALTER TABLE` 不会创建新的快照,而是对当前表,Spark 会从最新的元数据文件中读取 `schema_id` 的值,而不是从快照中读取。 +这是因为 `ALTER TABLE` 不会创建新的快照,而 Spark 在处理当前表时,会从最新的元数据文件中读取 `schema_id` 的值,而不是从某个快照中读取。 -#### 场景 3:历史与当前表结构的差异 {#scenario-3} +#### 场景 3:历史与当前模式的差异 {#scenario-3} -第二点是,在进行时间穿梭查询时,你无法获取表在写入任何数据之前的状态: +第二个问题是,在进行时间回溯(time travel)时,你无法获取在尚未向表写入任何数据之前的表状态: ```sql --- 创建一个表 +-- 创建表 CREATE TABLE IF NOT EXISTS spark_catalog.db.time_travel_example_3 ( order_number bigint, product_code string @@ -255,92 +290,83 @@ SETTINGS iceberg_snapshot_id = 3547395809148285433 ts = now(); --- 在指定时间戳查询该表 - SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- 报错:无法找到早于 ts 的快照。 +-- 在特定时间戳查询表 + SELECT * FROM spark_catalog.db.time_travel_example_3 TIMESTAMP AS OF ts; -- 以错误结束:找不到早于 ts 的快照。 ``` -在 ClickHouse 中,其行为与 Spark 一致。你可以在概念上将 Spark 的 SELECT 查询替换为 ClickHouse 的 SELECT 查询,二者的工作方式是相同的。 +在 ClickHouse 中,其行为与 Spark 一致。你可以直接将 Spark 的 Select 查询类比为 ClickHouse 的 Select 查询,它们的工作方式是相同的。 ## 元数据文件解析 {#metadata-file-resolution} -在 ClickHouse 中使用 `iceberg` 表函数时,系统需要定位描述 Iceberg 表结构的正确 metadata.json 文件。以下是该解析过程的工作原理: - -### 候选文件搜索(按优先级顺序) {#candidate-search} - -1. **直接路径指定**: - * 如果设置了 `iceberg_metadata_file_path`,系统将把该路径与 Iceberg 表目录路径组合,使用该精确路径。 - -* 提供此设置后,所有其他解析设置将被忽略。 - -2. **表 UUID 匹配**: - * 如果指定了 `iceberg_metadata_table_uuid`,系统将: - * 仅查找 `metadata` 目录中的 `.metadata.json` 文件 - * 筛选包含与指定 UUID 匹配的 `table-uuid` 字段的文件(不区分大小写) +在 ClickHouse 中使用 `iceberg` 表函数时,系统需要定位描述 Iceberg 表结构的正确 metadata.json 文件。下面说明该解析过程是如何进行的: -3. **默认搜索**: - * 如果未提供上述任何设置,`metadata` 目录中的所有 `.metadata.json` 文件都将成为候选文件 +### 候选文件搜索(按优先级顺序) {#candidate-search} -### 选择最新文件 {#most-recent-file} +1. **直接指定路径**: +*如果你设置了 `iceberg_metadata_file_path`,系统会将其与 Iceberg 表目录路径拼接,并使用这个精确路径。 -使用上述规则识别候选文件后,系统将确定哪个文件是最新的: +* 当提供此设置时,其他所有解析相关的设置都会被忽略。 -* 如果启用了 `iceberg_recent_metadata_file_by_last_updated_ms_field`: +2. **按表 UUID 匹配**: +*如果指定了 `iceberg_metadata_table_uuid`,系统将: + *只检查 `metadata` 目录中的 `.metadata.json` 文件 + *仅保留其中 `table-uuid` 字段与所指定 UUID 匹配的文件(不区分大小写) -* 选择具有最大 `last-updated-ms` 值的文件 +3. **默认搜索**: +*如果未提供上述任一设置,则 `metadata` 目录中的所有 `.metadata.json` 文件都将作为候选文件 -* 否则: +### 选择最新的文件 {#most-recent-file} -* 选择具有最高版本号的文件 +在使用上述规则识别候选文件后,系统会确定其中最新的一个: -* (版本号在文件名中显示为 `V`,格式为 `V.metadata.json` 或 `V-uuid.metadata.json`) +* 如果启用了 `iceberg_recent_metadata_file_by_last_updated_ms_field`: +* 选择 `last-updated-ms` 值最大的文件 +* 否则: +* 选择版本号最高的文件 +* (版本号在文件名中以 `V` 的形式出现,文件名格式为 `V.metadata.json` 或 `V-uuid.metadata.json`) -**注意**: 所有提到的设置均为表函数设置(而非全局或查询级别设置),必须按如下方式指定: +**注意**:上述所有提到的设置都是表函数的设置(而非全局或查询级设置),必须按如下方式进行指定: ```sql SELECT * FROM iceberg('s3://bucket/path/to/iceberg_table', SETTINGS iceberg_metadata_table_uuid = 'a90eed4c-f74b-4e5b-b630-096fb9d09021'); ``` -**注意**:尽管 Iceberg Catalog 通常负责元数据解析,但 ClickHouse 中的 `iceberg` 表函数会直接将存储在 S3 中的文件解释为 Iceberg 表,因此理解这些解析规则非常重要。 +**注意**:尽管 Iceberg Catalog 通常负责元数据解析工作,但 ClickHouse 中的 `iceberg` 表函数会直接将存储在 S3 中的文件解释为 Iceberg 表,因此理解这些解析规则尤为重要。 ## 元数据缓存 {#metadata-cache} -`Iceberg` 表引擎和表函数支持将关于 manifest 文件、manifest 列表和元数据 JSON 的信息缓存在内存中。此功能由设置 `use_iceberg_metadata_files_cache` 控制,默认启用。 - - +`Iceberg` 表引擎和表函数支持元数据缓存,用于存储 manifest 文件、manifest 列表以及元数据 JSON 的相关信息。该缓存保存在内存中。此功能由 `use_iceberg_metadata_files_cache` 设置项控制,默认启用。 ## 别名 {#aliases} -表函数 `iceberg` 现在是 `icebergS3` 的别名。 - - +`iceberg` 表函数现在是 `icebergS3` 的别名。 ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 - `_file` — 文件名。类型:`LowCardinality(String)`。 -- `_size` — 文件大小(字节数)。类型:`Nullable(UInt64)`。如果文件大小未知,则该值为 `NULL`。 -- `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则该值为 `NULL`。 -- `_etag` — 文件的 ETag。类型:`LowCardinality(String)`。如果 ETag 未知,则该值为 `NULL`。 +- `_size` — 文件大小(以字节为单位)。类型:`Nullable(UInt64)`。如果文件大小未知,该值为 `NULL`。 +- `_time` — 文件最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,该值为 `NULL`。 +- `_etag` — 文件的 ETag。类型:`LowCardinality(String)`。如果 ETag 未知,该值为 `NULL`。 +## 向 Iceberg 表写入 {#writes-into-iceberg-table} +自 25.7 版本起,ClickHouse 支持修改用户拥有的 Iceberg 表。 -## 写入 Iceberg 表 {#writes-into-iceberg-table} - -从 25.7 版本开始,ClickHouse 支持修改用户的 Iceberg 表。 - -目前这是一个实验性特性,因此需要先将其启用: +目前这是一个实验性功能,因此需要先将其手动启用: ```sql SET allow_experimental_insert_into_iceberg = 1; ``` + ### 创建表 {#create-iceberg-table} -要创建一个空的 Iceberg 表,使用与读取相同的命令,但需要显式指定 schema。 -写入支持 Iceberg 规范中的所有数据格式,例如 Parquet、Avro、ORC。 +要创建自己的空 Iceberg 表,请使用与读取相同的命令,但需要显式指定表结构(schema)。 +写入支持 Iceberg 规范中定义的所有数据格式,例如 Parquet、Avro、ORC。 ### 示例 {#example-iceberg-writes-create} @@ -356,9 +382,10 @@ ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') 注意:要创建版本提示文件,请启用 `iceberg_use_version_hint` 设置。 如果要压缩 metadata.json 文件,请在 `iceberg_metadata_compression_method` 设置中指定编解码器名称。 + ### INSERT {#writes-inserts} -创建新表之后,可以使用常规的 ClickHouse 语法进行数据插入。 +创建新表后,可使用常规的 ClickHouse 语法插入数据。 ### 示例 {#example-iceberg-writes-insert} @@ -369,25 +396,26 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -第 1 行: +行 1: ────── x: Pavel y: 777 -第 2 行: +行 2: ────── x: Ivanov y: 993 ``` + ### DELETE {#iceberg-writes-delete} -在 merge-on-read 格式中删除多余行在 ClickHouse 中同样受支持。 -该查询将创建带有 position delete 文件的新快照。 +ClickHouse 也支持在 merge-on-read 格式下删除多余行。 +此查询将创建一个包含 position delete 文件的新快照。 -注意:如果希望将来使用其他 Iceberg 引擎(例如 Spark)读取这些表,需要禁用配置项 `output_format_parquet_use_custom_encoder` 和 `output_format_parquet_parallel_encoding`。 -这是因为 Spark 是通过 Parquet field-id 来读取这些文件的,而当启用这些配置项时,ClickHouse 目前尚不支持写出 field-id。 -我们计划在未来修复此行为。 +注意:如果您希望将来使用其他 Iceberg 引擎(例如 Spark)读取表,则需要禁用 `output_format_parquet_use_custom_encoder` 和 `output_format_parquet_parallel_encoding` 这两个设置。 +这是因为 Spark 是通过 parquet 字段 ID 来读取这些文件的,而在启用这些设置时,ClickHouse 目前尚不支持写出字段 ID。 +我们计划在未来修复这一行为。 ### 示例 {#example-iceberg-writes-delete} @@ -398,15 +426,16 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -第 1 行: +行 1: ────── x: Ivanov y: 993 ``` + ### 模式演进 {#iceberg-writes-schema-evolution} -ClickHouse 允许对具有简单数据类型(非 tuple、非 array、非 map)的列进行添加、删除或修改操作。 +ClickHouse 允许对简单类型(非 tuple、非 array、非 map)列执行添加、删除或修改操作。 ### 示例 {#example-iceberg-writes-evolution} @@ -414,7 +443,7 @@ ClickHouse 允许对具有简单数据类型(非 tuple、非 array、非 map ALTER TABLE iceberg_writes_example MODIFY COLUMN y Nullable(Int64); SHOW CREATE TABLE iceberg_writes_example; - ┌─语句──────────────────────────────────────────────────────┐ + ┌─statement─────────────────────────────────────────────────┐ 1. │ CREATE TABLE default.iceberg_writes_example ↴│ │↳( ↴│ │↳ `x` Nullable(String), ↴│ @@ -426,7 +455,7 @@ SHOW CREATE TABLE iceberg_writes_example; ALTER TABLE iceberg_writes_example ADD COLUMN z Nullable(Int32); SHOW CREATE TABLE iceberg_writes_example; - ┌─语句──────────────────────────────────────────────────────┐ + ┌─statement─────────────────────────────────────────────────┐ 1. │ CREATE TABLE default.iceberg_writes_example ↴│ │↳( ↴│ │↳ `x` Nullable(String), ↴│ @@ -440,42 +469,39 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -第 1 行: +行 1: ────── x: Ivanov y: 993 z: ᴺᵁᴸᴸ -``` - -ALTER TABLE iceberg_writes_example DROP COLUMN z; -SHOW CREATE TABLE iceberg_writes_example; -┌─statement─────────────────────────────────────────────────┐ - -1. │ CREATE TABLE default.iceberg_writes_example ↴│ +ALTER TABLE iceberg_writes_example DROP COLUMN z; +SHOW CREATE TABLE iceberg_writes_example; + ┌─statement─────────────────────────────────────────────────┐ +1. │ CREATE TABLE default.iceberg_writes_example ↴│ │↳( ↴│ │↳ `x` Nullable(String), ↴│ │↳ `y` Nullable(Int64) ↴│ │↳) ↴│ - │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ + │↳ENGINE = IcebergLocal('/home/scanhex12/iceberg_example/') │ └───────────────────────────────────────────────────────────┘ SELECT * -FROM iceberg_writes_example +FROM iceberg_writes_example FORMAT VERTICAL; -第 1 行: +行 1: ────── x: Ivanov y: 993 +``` -```` ### 压缩 {#iceberg-writes-compaction} -ClickHouse 支持对 Iceberg 表进行压缩。目前可以在更新元数据的同时将位置删除文件合并到数据文件中。先前的快照 ID 和时间戳保持不变,因此时间旅行功能仍可使用相同的值。 +ClickHouse 支持对 Iceberg 表进行压缩。目前,它可以在更新元数据的同时,将 position delete 文件合并到数据文件中。先前的快照 ID 和时间戳保持不变,因此仍然可以使用相同的值进行时间旅行(time travel)。 -使用方法: +使用方法如下: ```sql SET allow_experimental_iceberg_compaction = 1 @@ -486,25 +512,14 @@ SELECT * FROM iceberg_writes_example FORMAT VERTICAL; -Row 1: +行 1: ────── x: Ivanov y: 993 -```` - - -## 使用目录的表 {#iceberg-writes-catalogs} - -上面描述的所有写入功能,同样可以通过 REST 和 Glue 目录来使用。 -要使用这些目录,请使用 `IcebergS3` 引擎创建表,并提供必要的设置: - -```sql -CREATE TABLE `database_name.table_name` ENGINE = IcebergS3('http://minio:9000/warehouse-rest/table_name/', 'minio_access_key', 'minio_secret_key') -SETTINGS storage_catalog_type="rest", storage_warehouse="demo", object_storage_endpoint="http://minio:9000/warehouse-rest", storage_region="us-east-1", storage_catalog_url="http://rest:8181/v1", ``` ## 另请参阅 {#see-also} * [Iceberg 引擎](/engines/table-engines/integrations/iceberg.md) -* [Iceberg 集群表函数](/sql-reference/table-functions/icebergCluster.md) +* [Iceberg 集群表函数](/sql-reference/table-functions/icebergCluster.md) \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md index c1eebde7b99..0645338b042 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/icebergCluster.md @@ -7,16 +7,12 @@ title: 'icebergCluster' doc_type: 'reference' --- - - # icebergCluster 表函数 {#icebergcluster-table-function} 这是对 [iceberg](/sql-reference/table-functions/iceberg.md) 表函数的扩展。 允许在指定集群中的多个节点上并行处理来自 Apache [Iceberg](https://iceberg.apache.org/) 的文件。在发起节点上,它会创建到集群中所有节点的连接,并为每个文件进行动态分发。在工作节点上,它会向发起节点请求下一个要处理的任务并对其进行处理。这个过程会反复进行,直到所有任务都完成为止。 - - ## 语法 {#syntax} ```sql @@ -30,7 +26,6 @@ icebergHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## 参数 {#arguments} * `cluster_name` — 用于构建访问远程和本地服务器所需的一组地址和连接参数的集群名称。 @@ -46,7 +41,6 @@ icebergHDFSCluster(cluster_name, named_collection[, option=value [,..]]) SELECT * FROM icebergS3Cluster('cluster_simple', 'http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test') ``` - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md index f2023b4cd13..4bb893f837d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/loop.md @@ -5,12 +5,8 @@ title: 'loop' doc_type: 'reference' --- - - # `loop` 表函数 {#loop-table-function} - - ## 语法 {#syntax} ```sql @@ -20,7 +16,6 @@ SELECT ... FROM loop(table); SELECT ... FROM loop(other_table_function(...)); ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -29,14 +24,10 @@ SELECT ... FROM loop(other_table_function(...)); | `table` | 表名称。 | | `other_table_function(...)` | 其他表函数。例如:`SELECT * FROM loop(numbers(10));` 中的 `other_table_function(...)` 即为 `numbers(10)`。 | - - ## 返回值 {#returned_values} 在无限循环中返回查询结果。 - - ## 示例 {#examples} 从 ClickHouse 中查询数据: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md index 23d3495bd21..165728c99cb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/merge.md @@ -7,20 +7,14 @@ title: 'merge' doc_type: 'reference' --- - - # merge 表函数 {#merge-table-function} 创建一个临时的 [Merge](../../engines/table-engines/special/merge.md) 表。 该表的表结构通过对底层表的列取并集并推导其公共数据类型来确定。 可用的虚拟列与 [Merge](../../engines/table-engines/special/merge.md) 表引擎中的相同。 - - ## 语法 {#syntax} - - ```sql merge(['db_name',] 'tables_regexp') ``` @@ -32,7 +26,6 @@ merge(['db_name',] 'tables_regexp') | `db_name` | 取值可以是(可选,默认值为 `currentDatabase()`):
- 数据库名称,
- 返回数据库名称字符串的常量表达式,例如 `currentDatabase()`,
- `REGEXP(expression)`,其中 `expression` 是用于匹配数据库名称的正则表达式。 | | `tables_regexp` | 用于匹配指定数据库或多个数据库中表名的正则表达式。 | - ## 相关内容 {#related} - [Merge](../../engines/table-engines/special/merge.md) 表引擎 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md index ae3af1a125d..f8ccc06115b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeIndex.md @@ -8,21 +8,16 @@ title: 'mergeTreeIndex' doc_type: 'reference' --- - - # mergeTreeIndex 表函数 {#mergetreeindex-table-function} 用于表示 MergeTree 表的索引文件和标记文件的内容,可用于内部检查。 - - ## 语法 {#syntax} ```sql mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) ``` - ## 参数 {#arguments} | 参数 | 说明 | @@ -32,8 +27,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) | `with_marks` | 是否在结果中包含带标记的列。 | | `with_minmax` | 是否在结果中包含最小-最大索引。 | - - ## 返回值 {#returned_value} 一个表对象,其列包括:源表主索引值以及(若启用)min-max 索引值的列、源表各个数据分片中所有可能文件(若启用了 marks)的标记值列,以及虚拟列: @@ -44,8 +37,6 @@ mergeTreeIndex(database, table [, with_marks = true] [, with_minmax = true]) 当某列在数据分片中不存在,或其某个子流的标记未被写入(例如在 compact 分片中)时,marks 列可能包含 `(NULL, NULL)` 值。 - - ## 使用示例 {#usage-example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md index bf05501e44f..2d087c7d598 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mergeTreeProjection.md @@ -8,21 +8,16 @@ title: 'mergeTreeProjection' doc_type: 'reference' --- - - # mergeTreeProjection 表函数 {#mergetreeprojection-table-function} 表示 MergeTree 表中某个投影的内容。可用于内部检查和分析。 - - ## 语法 {#syntax} ```sql mergeTreeProjection(database, table, projection) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -31,14 +26,10 @@ mergeTreeProjection(database, table, projection) | `table` | 要读取其投影的表名称。 | | `projection` | 要读取的投影名称。 | - - ## 返回值 {#returned_value} 一个表对象,其列由给定投影提供。 - - ## 使用示例 {#usage-example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md index 4c4c3a2ab50..a497aeea98f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mongodb.md @@ -7,21 +7,16 @@ title: 'mongodb' doc_type: 'reference' --- - - # MongoDB 表函数 {#mongodb-table-function} 可以对存储在远程 MongoDB 服务器中的数据执行 `SELECT` 查询。 - - ## 语法 {#syntax} ```sql mongodb(host:port, database, collection, user, password, structure[, options[, oid_columns]]) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -57,13 +52,10 @@ mongodb(uri, collection, structure[, oid_columns]) | `structure` | 此函数返回的 ClickHouse 表的模式(schema)。 | | `oid_columns` | 在 WHERE 子句中应被视为 `oid` 的列的逗号分隔列表。默认为 `_id`。 | - ## 返回值 {#returned_value} 一个表对象,其列与原始 MongoDB 表的列相同。 - - ## 示例 {#examples} 假设我们在名为 `test` 的 MongoDB 数据库中有一个名为 `my_collection` 的集合,并向其中插入了几条文档: @@ -106,7 +98,6 @@ SELECT * FROM mongodb( ) ``` - ## 相关 {#related} - [`MongoDB` 表引擎](engines/table-engines/integrations/mongodb.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md index a1d61159db6..1fa7123fdaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/mysql.md @@ -7,21 +7,16 @@ title: 'mysql' doc_type: 'reference' --- - - # MySQL 表函数 {#mysql-table-function} 允许对存储在远程 MySQL 服务器中的数据执行 `SELECT` 和 `INSERT` 查询。 - - ## 语法 {#syntax} ```sql mysql({host:port, database, table, user, password[, replace_query, on_duplicate_clause] | named_collection[, option=value [,..]]}) ``` - ## 参数 {#arguments} | Argument | Description | @@ -52,7 +47,6 @@ SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'us SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` - ## 返回值 {#returned_value} 一个表对象,其列与原始 MySQL 表相同。 @@ -65,8 +59,6 @@ SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 在 `INSERT` 查询中,为了区分表函数 `mysql(...)` 与带列名列表的表名,必须使用关键字 `FUNCTION` 或 `TABLE FUNCTION`。见下方示例。 ::: - - ## 示例 {#examples} MySQL 中的表: @@ -150,7 +142,6 @@ SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password') WHERE id > (SELECT max(id) FROM mysql_copy); ``` - ## 相关内容 {#related} - [`MySQL` 表引擎](../../engines/table-engines/integrations/mysql.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md index 75b1425ff70..2a6c79b03e4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/null.md @@ -7,33 +7,24 @@ title: 'null' doc_type: 'reference' --- - - # null 表函数 {#null-table-function} 使用 [Null](../../engines/table-engines/special/null.md) 表引擎创建具有指定结构的临时表。根据 `Null` 引擎的特性,表数据会被忽略,并且该表会在查询执行完成后立即被删除。该函数用于方便编写测试和进行演示。 - - ## 语法 {#syntax} ```sql null('structure') ``` - ## 参数 {#argument} - `structure` — 列及其类型的列表。[String](../../sql-reference/data-types/string.md)。 - - ## 返回值 {#returned_value} 具有指定结构的临时 `Null` 引擎表。 - - ## 示例 {#example} 使用 `null` 函数的查询: @@ -50,7 +41,6 @@ INSERT INTO t SELECT * FROM numbers_mt(1000000000); DROP TABLE IF EXISTS t; ``` - ## 相关 {#related} - [Null 表引擎](../../engines/table-engines/special/null.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md index 4a21319662b..bcd6d51c802 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/odbc.md @@ -7,14 +7,10 @@ title: 'odbc' doc_type: 'reference' --- - - # odbc 表函数 {#odbc-table-function} 返回一个通过 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity) 连接的表。 - - ## 语法 {#syntax} ```sql @@ -23,7 +19,6 @@ odbc(数据源, external_table) odbc(named_collection) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -38,8 +33,6 @@ odbc(named_collection) 外部表中值为 `NULL` 的字段会被转换为其基础数据类型的默认值。比如,如果远程 MySQL 表的某个字段类型为 `INT NULL`,它会被转换为 0(ClickHouse `Int32` 数据类型的默认值)。 - - ## 使用示例 {#usage-example} **通过 ODBC 从本地 MySQL 安装获取数据** @@ -117,7 +110,6 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') └────────┴──────────────┴───────┴────────────────┘ ``` - ## 另请参阅 {#see-also} - [ODBC 字典](/sql-reference/dictionaries#dbms) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md index cec386b0b97..19c188a8f72 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimon.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # paimon 表函数 {#paimon-table-function} 为存储在 Amazon S3、Azure、HDFS 或本地的 Apache [Paimon](https://paimon.apache.org/) 表提供只读的类似表的接口。 - - ## 语法 {#syntax} ```sql @@ -32,7 +29,6 @@ paimonHDFS(path_to_table, [,format] [,compression_method]) paimonLocal(path_to_table, [,format] [,compression_method]) ``` - ## 参数 {#arguments} 参数说明与表函数 `s3`、`azureBlobStorage`、`HDFS` 和 `file` 中参数的说明相同。 @@ -42,8 +38,6 @@ paimonLocal(path_to_table, [,format] [,compression_method]) 一个具有指定结构的表,用于读取指定 Paimon 表中的数据。 - - ## 定义命名集合 {#defining-a-named-collection} 下面是一个示例,展示如何配置一个命名集合用于存储 URL 和凭证: @@ -67,13 +61,10 @@ SELECT * FROM paimonS3(paimon_conf, filename = 'test_table') DESCRIBE paimonS3(paimon_conf, filename = 'test_table') ``` - ## 别名 {#aliases} 表函数 `paimon` 现在是 `paimonS3` 的别名。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 @@ -82,8 +73,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,该值为 `NULL`。 - `_etag` — 文件的 etag。类型:`LowCardinality(String)`。如果 etag 未知,该值为 `NULL`。 - - ## 支持的数据类型 {#data-types-supported} | Paimon 数据类型 | ClickHouse 数据类型 @@ -106,8 +95,6 @@ DESCRIBE paimonS3(paimon_conf, filename = 'test_table') |ARRAY |Array | |MAP |Map | - - ## 支持的分区 {#partition-supported} Paimon 分区键支持如下数据类型: * `CHAR` @@ -125,8 +112,6 @@ Paimon 分区键支持如下数据类型: * `FLOAT` * `DOUBLE` - - ## 另请参阅 {#see-also} * [Paimon 集群表函数](/sql-reference/table-functions/paimonCluster.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md index 990c59f4397..fc96bccdc48 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/paimonCluster.md @@ -9,7 +9,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # paimonCluster 表函数 {#paimoncluster-table-function} @@ -18,8 +17,6 @@ import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; 允许在指定集群中的多个节点上并行处理来自 Apache [Paimon](https://paimon.apache.org/) 的文件。在发起节点上,它会与集群中所有节点建立连接,并动态分派每个文件。在工作节点上,它会向发起节点请求下一个要处理的任务并对其进行处理。此过程会重复,直到所有任务全部完成。 - - ## 语法 {#syntax} ```sql @@ -30,7 +27,6 @@ paimonAzureCluster(cluster_name, connection_string|storage_account_url, containe paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) ``` - ## 参数 {#arguments} - `cluster_name` — 用于构建远程和本地服务器地址及连接参数集合的集群名称。 @@ -40,8 +36,6 @@ paimonHDFSCluster(cluster_name, path_to_table, [,format] [,compression_method]) 一个具有指定结构的表,用于从集群中读取指定 Paimon 表的数据。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md index a58c6251ea9..628bf6311f3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/postgresql.md @@ -7,21 +7,16 @@ title: 'postgresql' doc_type: 'reference' --- - - # postgresql 表函数 {#postgresql-table-function} 允许对存储在远程 PostgreSQL 服务器上的数据执行 `SELECT` 和 `INSERT` 查询。 - - ## 语法 {#syntax} ```sql postgresql({host:port, database, table, user, password[, schema, [, on_conflict]] | named_collection[, option=value [,..]]}) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -36,8 +31,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] 参数也可以通过[命名集合](operations/named-collections.md)传递。在这种情况下,应分别指定 `host` 和 `port`。此方式推荐用于生产环境。 - - ## 返回值 {#returned_value} 一个表对象,其列与原始 PostgreSQL 表相同。 @@ -46,8 +39,6 @@ postgresql({host:port, database, table, user, password[, schema, [, on_conflict] 在 `INSERT` 语句中,为了将表函数 `postgresql(...)` 与后面带列名列表的表名区分开来,必须使用关键字 `FUNCTION` 或 `TABLE FUNCTION`。请参见下方示例。 ::: - - ## 实现细节 {#implementation-details} PostgreSQL 端的 `SELECT` 查询以 `COPY (SELECT ...) TO STDOUT` 的形式在只读 PostgreSQL 事务中运行,每个 `SELECT` 查询结束后提交事务。 @@ -78,7 +69,6 @@ SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database' 支持为 PostgreSQL 字典数据源设置副本优先级。`map` 中数值越大,优先级越低,最高优先级为 `0`。 - ## 示例 {#examples} PostgreSQL 中的表: @@ -157,7 +147,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); ``` - ## 相关 {#related} - [PostgreSQL 表引擎](../../engines/table-engines/integrations/postgresql.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md index ba1f6374087..cfc8b2046d9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQuery.md @@ -7,14 +7,10 @@ title: 'prometheusQuery' doc_type: 'reference' --- - - # prometheusQuery 表函数 {#prometheusquery-table-function} 使用 TimeSeries 表中的数据执行 Prometheus 查询。 - - ## 语法 {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQuery(db_name.time_series_table, 'promql_query', evaluation_time) prometheusQuery('time_series_table', 'promql_query', evaluation_time) ``` - ## 参数 {#arguments} - `db_name` - 包含 TimeSeries 表的数据库名称。 @@ -31,8 +26,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) - `promql_query` - 使用 [PromQL 语法](https://prometheus.io/docs/prometheus/latest/querying/basics/) 编写的查询。 - `evaluation_time` - 用于评估的时间戳。要在当前时间点评估查询,请将 `now()` 作为 `evaluation_time` 传入。 - - ## 返回值 {#returned_value} 该函数会根据传入到参数 `promql_query` 的查询结果类型返回不同的列: @@ -44,8 +37,6 @@ prometheusQuery('time_series_table', 'promql_query', evaluation_time) | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## 示例 {#example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md index 7943c01f30e..d46b95b79a4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/prometheusQueryRange.md @@ -7,14 +7,10 @@ title: 'prometheusQueryRange' doc_type: 'reference' --- - - # prometheusQuery 表函数 {#prometheusquery-table-function} 在一段评估时间范围内,使用 TimeSeries 表中的数据执行 Prometheus 查询。 - - ## 语法 {#syntax} ```sql @@ -23,7 +19,6 @@ prometheusQueryRange(db_name.time_series_table, 'promql_query', start_time, end_ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, step) ``` - ## 参数 {#arguments} - `db_name` - TimeSeries 表所在数据库的名称。 @@ -33,8 +28,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, - `end_time` - 评估区间的结束时间。 - `step` - 用于在从 `start_time` 到 `end_time`(含起止时间)范围内迭代评估时间的步长。 - - ## 返回值 {#returned_value} 该函数会根据传给参数 `promql_query` 的查询结果类型返回不同的列结构: @@ -46,8 +39,6 @@ prometheusQueryRange('time_series_table', 'promql_query', start_time, end_time, | scalar | scalar ValueType | prometheusQuery(mytable, '1h30m') | | string | string String | prometheusQuery(mytable, '"abc"') | - - ## 示例 {#example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md index ea8150dfa93..b0d59364377 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/redis.md @@ -7,21 +7,16 @@ title: 'redis' doc_type: 'reference' --- - - # redis 表函数 {#redis-table-function} 此表函数用于将 ClickHouse 与 [Redis](https://redis.io/) 集成。 - - ## 语法 {#syntax} ```sql redis(host:port, key, structure[, db_index[, password[, pool_size]]]) ``` - ## 参数 {#arguments} | Argument | Description | @@ -39,14 +34,10 @@ redis(host:port, key, structure[, db_index[, password[, pool_size]]]) 目前 `redis` 表函数不支持使用 [Named collections](/operations/named-collections.md)。 - - ## 返回值 {#returned_value} 一个表对象,其键为 Redis 键,其余各列打包在一起作为 Redis 值。 - - ## 使用示例 {#usage-example} 从 Redis 读取: @@ -68,7 +59,6 @@ INSERT INTO TABLE FUNCTION redis( 'key String, v1 String, v2 UInt32') values ('1', '1', 1); ``` - ## 相关 {#related} - [`Redis` 表引擎](/engines/table-engines/integrations/redis.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md index ee04dc92970..e9d41be8908 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/remote.md @@ -7,16 +7,12 @@ title: 'remote, remoteSecure' doc_type: 'reference' --- - - # remote、remoteSecure 表函数 {#remote-remotesecure-table-function} 表函数 `remote` 允许按需访问远程服务器,即无需创建 [Distributed](../../engines/table-engines/special/distributed.md) 表。表函数 `remoteSecure` 与 `remote` 相同,只是通过安全连接进行访问。 这两个函数都可以在 `SELECT` 和 `INSERT` 查询中使用。 - - ## 语法 {#syntax} ```sql @@ -28,7 +24,6 @@ remoteSecure(addresses_expr, [db.table, user [, password], sharding_key]) remoteSecure(named_collection[, option=value [,..]]) ``` - ## 参数 {#parameters} | 参数 | 说明 | @@ -42,14 +37,10 @@ remoteSecure(named_collection[, option=value [,..]]) 这些参数也可以通过 [命名集合](operations/named-collections.md) 传递。 - - ## 返回值 {#returned-value} 位于远程服务器上的数据表。 - - ## 用法 {#usage} 由于表函数 `remote` 和 `remoteSecure` 会在每个请求时重新建立连接,建议改为使用 `Distributed` 表。另外,如果设置了主机名,这些名称会被解析,并且在与不同副本协同工作时,解析错误不会计入错误统计。在处理大量查询时,应始终预先创建 `Distributed` 表,而不要使用 `remote` 表函数。 @@ -81,7 +72,6 @@ localhost example01-01-1,example01-02-1 ``` - ## 示例 {#examples} ### 从远程服务器查询数据: {#selecting-data-from-a-remote-server} @@ -171,7 +161,6 @@ remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD') SELECT * from imdb.actors ``` - ## 通配模式 {#globs-in-addresses} 花括号 `{ }` 中的模式用于生成一组分片并指定副本。如果存在多个花括号对,则会生成对应集合的笛卡尔积。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md index c62104c36ca..fc95cfe1e3a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3.md @@ -11,7 +11,6 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # s3 表函数 {#s3-table-function} 提供一个类似表的接口,用于在 [Amazon S3](https://aws.amazon.com/s3/) 和 [Google Cloud Storage](https://cloud.google.com/storage/) 中读取/写入文件。此表函数类似于 [hdfs 函数](../../sql-reference/table-functions/hdfs.md),但提供了特定于 S3 的功能。 @@ -20,8 +19,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; 在 [`INSERT INTO...SELECT`](../../sql-reference/statements/insert-into#inserting-the-results-of-select) 中使用 `s3 table function` 时,数据以流式方式读取和插入。内存中只会保留少量数据块,同时这些数据块会持续从 S3 中读取并推送到目标表中。 - - ## 语法 {#syntax} ```sql @@ -72,13 +69,10 @@ GCS URL 使用如下格式,因为 Google XML API 的端点与 JSON API 不同 | `no_sign_request` | 默认禁用。 | | `expiration_window_seconds` | 默认值为 120。 | - ## 返回值 {#returned_value} 一个具有指定结构的表,用于在指定文件中读写数据。 - - ## 示例 {#examples} 从由 S3 文件 `https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv` 创建的表中选取前 5 行: @@ -133,7 +127,6 @@ FROM s3( ::: - ## 使用方法 {#usage} 假设在 S3 上有若干文件,其 URI 如下: @@ -216,7 +209,6 @@ SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bu SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); ``` - 注意:可以在服务器配置文件中指定自定义 URL 映射规则。例如: ```sql @@ -252,7 +244,6 @@ SELECT count(*) FROM s3(creds, url='https://s3-object-url.csv') ``` - ## 分区写入 {#partitioned-write} ### 分区策略 {#partition-strategy} @@ -299,7 +290,6 @@ INSERT INTO TABLE FUNCTION 因此,数据会被写入到不同 bucket 下的三个文件:`my_bucket_1/file.csv`、`my_bucket_10/file.csv` 和 `my_bucket_20/file.csv`。 - ## 访问公共 bucket {#accessing-public-buckets} ClickHouse 会尝试从多种不同的来源获取凭证。 @@ -316,7 +306,6 @@ FROM s3( LIMIT 5; ``` - ## 使用 S3 凭证(ClickHouse Cloud) {#using-s3-credentials-clickhouse-cloud} 对于非公开的 bucket,用户可以向该函数传入 `aws_access_key_id` 和 `aws_secret_access_key`。例如: @@ -337,7 +326,6 @@ SELECT count() FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.co 更多示例请参见[此处](/cloud/data-sources/secure-s3#access-your-s3-bucket-with-the-clickhouseaccess-role) - ## 使用归档文件 {#working-with-archives} 假设我们在 S3 上有几个归档文件,其 URI 如下: @@ -363,13 +351,10 @@ TAR 虽然可以从任何受支持的存储位置访问 ZIP 和 TAR 归档文件,但 7Z 归档文件只能从安装了 ClickHouse 的本地文件系统读取。 ::: - ## 插入数据 {#inserting-data} 请注意,行只能插入到新文件中。不会进行合并循环或文件拆分操作。一旦文件写入完成,后续的插入操作将失败。更多详情请参见[此处](/integrations/s3#inserting-data)。 - - ## 虚拟列 {#virtual-columns} - `_path` — 文件路径。类型:`LowCardinality(String)`。对于归档文件,以如下格式显示路径:`"{path_to_archive}::{path_to_file_inside_archive}"` @@ -377,8 +362,6 @@ TAR - `_size` — 文件大小(字节数)。类型:`Nullable(UInt64)`。如果文件大小未知,该值为 `NULL`。对于归档文件,显示归档内文件未压缩时的文件大小。 - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,该值为 `NULL`。 - - ## use_hive_partitioning setting {#hive-style-partitioning} 这是一个提示配置,用于指示 ClickHouse 在读取时解析采用 Hive 风格分区的文件。它对写入没有任何影响。若要实现读写对称,请使用 `partition_strategy` 参数。 @@ -391,7 +374,6 @@ TAR SELECT * FROM s3('s3://data/path/date=*/country=*/code=*/*.parquet') WHERE date > '2020-01-01' AND country = 'Netherlands' AND code = 42; ``` - ## 访问 requester-pays 存储桶 {#accessing-requester-pays-buckets} 要访问 requester-pays(请求者付费)存储桶,所有请求中都必须携带请求头 `x-amz-request-payer = requester`。可以通过向 S3 函数传递参数 `headers('x-amz-request-payer' = 'requester')` 来实现。例如: @@ -410,15 +392,12 @@ FROM s3('https://coiled-datasets-rp.s3.us-east-1.amazonaws.com/1trc/measurements 峰值内存使用量:192.27 KiB。 ``` - ## 存储设置 {#storage-settings} - [s3_truncate_on_insert](operations/settings/settings.md#s3_truncate_on_insert) - 允许在插入数据前截断文件。默认禁用。 - [s3_create_new_file_on_insert](operations/settings/settings.md#s3_create_new_file_on_insert) - 如果格式带有后缀,允许在每次插入时创建一个新文件。默认禁用。 - [s3_skip_empty_files](operations/settings/settings.md#s3_skip_empty_files) - 允许在读取时跳过空文件。默认启用。 - - ## 嵌套 Avro 模式 {#nested-avro-schemas} 当读取包含**嵌套记录**且不同文件之间嵌套结构不一致的 Avro 文件时(例如,某些文件在嵌套对象中多了一个字段),ClickHouse 可能会返回如下错误: @@ -447,7 +426,6 @@ FROM s3('https://bucket-name/*.avro', 'Avro') SETTINGS schema_inference_mode='union'; ``` - ## 相关 {#related} - [S3 引擎](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md index 93b6120a361..ffb92e620c5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/s3Cluster.md @@ -7,16 +7,12 @@ title: 's3Cluster' doc_type: 'reference' --- - - # s3Cluster 表函数 {#s3cluster-table-function} 这是对 [s3](sql-reference/table-functions/s3.md) 表函数的扩展。 允许在指定集群中的多个节点上并行处理来自 [Amazon S3](https://aws.amazon.com/s3/) 和 [Google Cloud Storage](https://cloud.google.com/storage/) 的文件。在发起节点上,它会与集群中所有节点建立连接,展开 S3 文件路径中的星号通配符,并动态分发每个文件。在工作节点上,它会向发起节点请求下一个要处理的任务并进行处理。该过程会重复进行,直到所有任务完成。 - - ## 语法 {#syntax} ```sql @@ -24,7 +20,6 @@ s3Cluster(cluster_name, url[, NOSIGN | access_key_id, secret_access_key,[session s3Cluster(cluster_name, named_collection[, option=value [,..]]) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -49,14 +44,10 @@ s3Cluster(cluster_name, named_collection[, option=value [,..]]) | `no_sign_request` | 默认禁用。 | | `expiration_window_seconds` | 默认值为 120。 | - - ## 返回值 {#returned_value} 一个具有指定结构的表,用于对指定文件进行数据读写。 - - ## 示例 {#examples} 使用 `cluster_simple` 集群中的所有节点,查询 `/root/data/clickhouse` 和 `/root/data/database/` 目录中所有文件的数据: @@ -91,19 +82,14 @@ SELECT count(*) FROM s3Cluster( ) ``` - ## 访问私有和公共存储桶 {#accessing-private-and-public-buckets} 用户可以使用与 s3 函数文档中描述的相同方法,详见[此处](/sql-reference/table-functions/s3#accessing-public-buckets)。 - - ## 性能优化 {#optimizing-performance} 有关如何优化 `s3` 函数性能的更多信息,请参阅[详细指南](/integrations/s3/performance)。 - - ## 相关 {#related} - [S3 引擎](../../engines/table-engines/integrations/s3.md) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md index ce9bdd72cf3..0fff19f64ae 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/sqlite.md @@ -7,34 +7,25 @@ title: 'sqlite' doc_type: 'reference' --- - - # sqlite 表函数 {#sqlite-table-function} 用于对存储在 [SQLite](../../engines/database-engines/sqlite.md) 数据库中的数据执行查询。 - - ## 语法 {#syntax} ```sql sqlite('db_path', 'table_name') ``` - ## 参数 {#arguments} - `db_path` — SQLite 数据库文件的路径。[String](../../sql-reference/data-types/string.md)。 - `table_name` — SQLite 数据库中某个表的名称。[String](../../sql-reference/data-types/string.md)。 - - ## 返回值 {#returned_value} - 一个表对象,其列与原始 `SQLite` 表相同。 - - ## 示例 {#example} 查询: @@ -53,7 +44,6 @@ SELECT * FROM sqlite('sqlite.db', 'table1') ORDER BY col2; └───────┴──────┘ ``` - ## 相关 {#related} - [SQLite](../../engines/table-engines/integrations/sqlite.md) 表引擎 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md index 5ef407a9048..fa2d2e89d61 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/timeSeriesSelector.md @@ -7,15 +7,11 @@ title: 'timeSeriesSelector' doc_type: 'reference' --- - - # timeSeriesSelector 表函数 {#timeseriesselector-table-function} 从 TimeSeries 表中读取满足选择器过滤条件且时间戳位于指定时间区间内的时间序列。 此函数类似于 [range selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#range-vector-selectors),但也可用于实现 [instant selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#instant-vector-selectors)。 - - ## 语法 {#syntax} ```sql @@ -24,7 +20,6 @@ timeSeriesSelector(db_name.time_series_table, 'instant_query', min_time, max_tim timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) ``` - ## 参数 {#arguments} - `db_name` - 包含 TimeSeries 表的数据库名称。 @@ -33,8 +28,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) - `min_time` - 起始时间戳(含)。 - `max_time` - 结束时间戳(含)。 - - ## 返回值 {#returned_value} 该函数返回三列: @@ -44,8 +37,6 @@ timeSeriesSelector('time_series_table', 'instant_query', min_time, max_time) 返回的数据不保证特定顺序。 - - ## 示例 {#example} ```sql diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md index d96ed595a4e..158eb537313 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/url.md @@ -10,22 +10,18 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # url 表函数 {#url-table-function} `url` 函数根据给定的 `format` 和 `structure`,基于指定的 `URL` 创建一个表。 `url` 函数可以在对 [URL](../../engines/table-engines/special/url.md) 表数据执行的 `SELECT` 和 `INSERT` 查询中使用。 - - ## 语法 {#syntax} ```sql url(URL [,format] [,structure] [,headers]) ``` - ## 参数 {#parameters} | 参数 | 描述 | @@ -35,14 +31,10 @@ url(URL [,format] [,structure] [,headers]) | `structure` | 表结构,格式为 `'UserID UInt64, Name String'`。用于确定列名和列类型。类型: [String](../../sql-reference/data-types/string.md)。 | | `headers` | 请求头,格式为 `'headers('key1'='value1', 'key2'='value2')'`。可用于为 HTTP 调用设置请求头。 | - - ## 返回值 {#returned_value} 一个具有指定格式和结构,并包含来自已定义 `URL` 的数据的表。 - - ## 示例 {#examples} 从以 [CSV](/interfaces/formats/CSV) 格式响应的 HTTP 服务器获取一个包含 `String` 和 [UInt32](../../sql-reference/data-types/int-uint.md) 类型列的表的前 3 行。 @@ -59,14 +51,11 @@ INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FO SELECT * FROM test_table; ``` - ## URL 中的通配模式 {#globs-in-url} 花括号 `{ }` 中的模式用于生成一组分片,或用于指定故障转移地址。受支持的模式类型及示例请参见 [remote](remote.md#globs-in-addresses) 函数的描述。 模式中的字符 `|` 用于指定故障转移地址。故障转移地址会按照在模式中列出的顺序依次迭代。生成地址的数量受 [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) 设置的限制。 - - ## 虚拟列 {#virtual-columns} - `_path` — `URL` 的路径。类型:`LowCardinality(String)`。 @@ -75,8 +64,6 @@ SELECT * FROM test_table; - `_time` — 文件的最后修改时间。类型:`Nullable(DateTime)`。如果时间未知,则值为 `NULL`。 - `_headers` - HTTP 响应头部。类型:`Map(LowCardinality(String), LowCardinality(String))`。 - - ## use_hive_partitioning 设置 {#hive-style-partitioning} 当将 `use_hive_partitioning` 设置为 1 时,ClickHouse 会在路径(`/name=value/`)中检测 Hive 风格的分区,并允许在查询中将分区列作为虚拟列使用。这些虚拟列的名称与分区路径中的名称相同,但会以 `_` 作为前缀。 @@ -89,20 +76,15 @@ SELECT * FROM test_table; SELECT * FROM url('http://data/path/date=*/country=*/code=*/*.parquet') WHERE _date > '2020-01-01' AND _country = 'Netherlands' AND _code = 42; ``` - ## 存储设置 {#storage-settings} - [engine_url_skip_empty_files](/operations/settings/settings.md#engine_url_skip_empty_files) - 用于在读取时跳过空文件。默认禁用。 - [enable_url_encoding](/operations/settings/settings.md#enable_url_encoding) - 用于控制是否对 URI 中路径进行解码/编码。默认启用。 - - ## 权限 {#permissions} `url` 函数需要 `CREATE TEMPORARY TABLE` 权限。因此,对于将 [readonly](/operations/settings/permissions-for-queries#readonly) 设置为 1 的用户,它将无法使用。至少需要 readonly = 2。 - - ## 相关内容 {#related} - [虚拟列](/engines/table-engines/index.md#table_engines-virtual_columns) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md index 2cdcd32a4fb..b6de967e135 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/urlCluster.md @@ -7,21 +7,16 @@ title: 'urlCluster' doc_type: 'reference' --- - - # urlCluster 表函数 {#urlcluster-table-function} 允许在指定集群的多个节点上并行处理通过 URL 访问的文件。在发起端,它会与集群中所有节点建立连接,展开 URL 文件路径中的星号,并动态分发每个文件。在工作节点上,它会向发起端请求下一个要处理的任务并进行处理。该过程会重复进行,直到所有任务完成。 - - ## 语法 {#syntax} ```sql urlCluster(cluster_name, URL, format, structure) ``` - ## 参数 {#arguments} | 参数 | 描述 | @@ -31,14 +26,10 @@ urlCluster(cluster_name, URL, format, structure) | `format` | 数据的[格式](/sql-reference/formats)。类型:[String](../../sql-reference/data-types/string.md)。 | | `structure` | 以 `'UserID UInt64, Name String'` 形式表示的表结构。用于确定列名和数据类型。类型:[String](../../sql-reference/data-types/string.md)。 | - - ## 返回值 {#returned_value} 一个具有指定格式和结构,并包含来自指定 `URL` 中数据的表。 - - ## 示例 {#examples} 从 HTTP 服务器获取一个表的前 3 行,该表包含 `String` 和 [UInt32](../../sql-reference/data-types/int-uint.md) 类型的列,服务器以 [CSV](/interfaces/formats/CSV) 格式返回结果。 @@ -65,14 +56,11 @@ if __name__ == "__main__": SELECT * FROM urlCluster('cluster_simple','http://127.0.0.1:12345', CSV, 'column1 String, column2 UInt32') ``` - ## URL 中的通配符 {#globs-in-url} 花括号 `{ }` 中的模式可用于生成一组分片,或用于指定故障转移地址。支持的模式类型及示例,参见 [remote](remote.md#globs-in-addresses) 函数的描述。 模式内的字符 `|` 用于指定故障转移地址。它们会按照在模式中出现的顺序进行迭代。生成的地址数量受 [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) 设置的限制。 - - ## 相关内容 {#related} - [HDFS 引擎](/engines/table-engines/integrations/hdfs) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md index 256075b212d..939052e3942 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/values.md @@ -8,8 +8,6 @@ title: 'values' doc_type: 'reference' --- - - # Values 表函数 {#values-table-function} `Values` 表函数允许你创建一个临时存储,用于为列填充值。它对于快速测试或生成示例数据非常有用。 @@ -18,8 +16,6 @@ doc_type: 'reference' Values 是不区分大小写的函数。也就是说,`VALUES` 或 `values` 都是有效的写法。 ::: - - ## 语法 {#syntax} `VALUES` 表函数的基本语法如下: @@ -39,7 +35,6 @@ VALUES( ) ``` - ## 参数 {#arguments} - `column1_name Type1, ...`(可选)。[String](/sql-reference/data-types/string) @@ -51,14 +46,10 @@ VALUES( 以逗号分隔的元组也可以用单个值代替。在这种情况下,每个值都被视为一行新数据。详情参见[示例](#examples)部分。 ::: - - ## 返回值 {#returned-value} - 返回一个包含传入值的临时表。 - - ## 示例 {#examples} ```sql title="Query" @@ -195,7 +186,6 @@ FROM VALUES( └──────────┘ ``` - ## 另请参阅 {#see-also} - [Values 格式](/interfaces/formats/Values) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md index c42fa878488..7204c00eaac 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/view.md @@ -7,33 +7,24 @@ title: '视图' doc_type: 'reference' --- - - # view 表函数 {#view-table-function} 将子查询转换为一张表。该函数用于实现视图(参见 [CREATE VIEW](/sql-reference/statements/create/view))。生成的表本身不存储数据,而只保存指定的 `SELECT` 查询。从该表读取时,ClickHouse 会执行该查询,并从结果中丢弃所有不需要的列。 - - ## 语法 {#syntax} ```sql view(subquery) ``` - ## 参数 {#arguments} - `subquery` — `SELECT` 查询。 - - ## 返回值 {#returned_value} - 一张表。 - - ## 示例 {#examples} 输入表: @@ -74,7 +65,6 @@ SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)); SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)); ``` - ## 相关内容 {#related} - [View 表引擎](/engines/table-engines/special/view/) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md index 5cda0a3642b..94bf945f814 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/table-functions/ytsaurus.md @@ -9,15 +9,12 @@ doc_type: 'reference' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; - # ytsaurus 表函数 {#ytsaurus-table-function} 此表函数用于从 YTsaurus 集群读取数据。 - - ## 语法 {#syntax} ```sql @@ -31,7 +28,6 @@ ytsaurus(http_proxy_url, cypress_path, oauth_token, format) 执行命令 `set allow_experimental_ytsaurus_table_function = 1`。 ::: - ## 参数 {#arguments} - `http_proxy_url` — YTsaurus HTTP 代理的 URL。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/transactions.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/transactions.md index d70c9405392..ad237ba9640 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/transactions.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/transactions.md @@ -8,11 +8,8 @@ doc_type: 'guide' import ExperimentalBadge from '@theme/badges/ExperimentalBadge'; import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - # 事务(ACID)支持 {#transactional-acid-support} - - ## 案例 1:对 MergeTree* 系列中某张表的一个分区执行 INSERT {#case-1-insert-into-one-partition-of-one-table-of-the-mergetree-family} 当插入的行被打包并作为单个数据块插入时(参见备注),该操作具备事务(ACID)特性: @@ -22,35 +19,25 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - 持久性(Durable):成功的 INSERT 在响应客户端之前会被写入文件系统,可以只写入单个副本,也可以写入多个副本(由 `insert_quorum` 设置控制),并且 ClickHouse 可以请求操作系统将文件系统数据同步到存储介质(由 `fsync_after_insert` 设置控制)。 - 如果涉及物化视图,则可以通过一个语句向多个表执行 INSERT(即客户端的 INSERT 目标是一张带有关联物化视图的表)。 - - ## 情况 2:对 MergeTree* 系列中的一个表执行跨多个分区的 INSERT {#case-2-insert-into-multiple-partitions-of-one-table-of-the-mergetree-family} 与上述情况 1 相同,补充细节如下: - 如果表包含多个分区且 INSERT 涵盖多个分区,那么对每个分区的插入在各自分区内单独具备事务性 - - ## 情况 3:向 MergeTree* 系列的一个分布式表执行 INSERT {#case-3-insert-into-one-distributed-table-of-the-mergetree-family} 与上述情况 1 相同,但有以下区别: - 向 Distributed 表执行 INSERT 时,整体操作不具备事务性,而对每个分片的插入则是事务性的 - - ## 案例 4:使用 Buffer 表 {#case-4-using-a-buffer-table} - 对 Buffer 表的插入操作不具备原子性、隔离性、一致性或持久性 - - ## 案例 5:使用 async_insert {#case-5-using-async_insert} 与上面的案例 1 相同,但有以下差异: - 即使启用了 `async_insert` 且 `wait_for_async_insert` 设置为 1(默认值),也可以保证原子性;但如果将 `wait_for_async_insert` 设置为 0,则不再保证原子性。 - - ## 说明 {#notes} - 在以下情况下,客户端以某种数据格式插入的多行会被打包到同一个数据块中: - 插入格式是行式的(例如 CSV、TSV、Values、JSONEachRow 等),并且数据包含的行数少于 `max_insert_block_size`(默认约 1 000 000 行);如果启用了并行解析(默认启用),当数据大小少于 `min_chunk_bytes_for_parallel_parsing` 字节(默认 10 MB)时也会被打包为单个数据块 @@ -63,8 +50,6 @@ import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; - ACID 语境下的“consistency”并不涵盖分布式系统的语义,参见 https://jepsen.io/consistency;这类语义由不同的设置(select_sequential_consistency)控制 - 本说明未涵盖新的事务特性,该特性允许在多张表、物化视图上以及针对多个 SELECT 等执行完整功能的事务(参见下一节 “Transactions, Commit, and Rollback”) - - ## 事务、提交和回滚 {#transactions-commit-and-rollback} @@ -203,7 +188,6 @@ ENGINE = MergeTree ORDER BY n ``` - ```response 确认。 ``` @@ -321,7 +305,6 @@ is_readonly: 1 state: RUNNING ``` - ## 更多详情 {#more-details} 请参阅此 [meta issue](https://github.com/ClickHouse/ClickHouse/issues/48794),以了解更全面的测试内容,并及时跟进最新进展。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md index aa09d39edfa..237b4507de1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/sql-reference/window-functions/index.md @@ -7,15 +7,11 @@ title: '窗口函数' doc_type: 'reference' --- - - # 窗口函数 {#window-functions} 窗口函数可以在与当前行相关的一组行上执行计算。 其中有些计算类似于使用聚合函数所能完成的计算,但窗口函数不会将多行合并为单个结果——每一行仍然会单独返回。 - - ## 标准窗口函数 {#standard-window-functions} ClickHouse 支持用于定义窗口和窗口函数的标准语法。下表说明各功能当前是否受支持。 @@ -36,8 +32,6 @@ ClickHouse 支持用于定义窗口和窗口函数的标准语法。下表说明 | `lag/lead(value, offset)` | ✅
你还可以使用以下任一变通方式:
1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`,对于 `lead` 使用 `following`
2) 使用 `lagInFrame/leadInFrame`,其行为类似,但会遵循窗口帧定义。若要获得与 `lag/lead` 完全相同的行为,请使用 `rows between unbounded preceding and unbounded following` | | `ntile(buckets)` | ✅
以如下方式指定窗口:`partition by x order by y rows between unbounded preceding and unbounded following`。 | - - ## ClickHouse 特定窗口函数 {#clickhouse-specific-window-functions} ClickHouse 还提供以下特定窗口函数: @@ -51,7 +45,6 @@ ClickHouse 还提供以下特定窗口函数: - 第 1 行为 `0`, - 第 $i$ 行为 ${\text{metric}_i - \text{metric}_{i-1} \over \text{timestamp}_i - \text{timestamp}_{i-1}} * \text{interval}$。 - ## 语法 {#syntax} ```text @@ -97,7 +90,6 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column] * [`lagInFrame(x)`](./lagInFrame.md) - 返回在其有序窗口中,相对于当前行之前指定物理偏移量那一行计算得到的值。 * [`leadInFrame(x)`](./leadInFrame.md) - 返回在其有序窗口中,相对于当前行之后指定偏移量那一行计算得到的值。 - ## 示例 {#examples} 我们来看一些使用窗口函数的示例。 @@ -196,7 +188,6 @@ SELECT FROM salaries; ``` - ```text ┌─球员────────────┬─薪水───┬─球队──────────────────────┬─队内最高┬───差额─┐ │ Charles Juarez │ 190000 │ New Coreystad Archdukes │ 190000 │ 0 │ @@ -280,7 +271,6 @@ ORDER BY └──────────┴───────┴───────┴──────────────┘ ``` - ```sql -- 简写形式 - 无边界表达式,无 ORDER BY, -- 等同于 `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` @@ -355,7 +345,6 @@ ORDER BY └──────────┴───────┴───────┴────────────────────┴──────────────┘ ``` - ```sql -- 框架范围从分区起始位置到当前行,但排序为倒序 SELECT @@ -451,7 +440,6 @@ ORDER BY value ASC; ``` - ┌─part_key─┬─value─┬─order─┬─frame_values─┬─rn_1─┬─rn_2─┬─rn_3─┬─rn_4─┐ │ 1 │ 1 │ 1 │ [5,4,3,2,1] │ 5 │ 5 │ 5 │ 2 │ │ 1 │ 2 │ 2 │ [5,4,3,2] │ 4 │ 4 │ 4 │ 2 │ @@ -520,7 +508,6 @@ ORDER BY value ASC; ``` - ┌─frame_values_1─┬─second_value─┐ │ [1] │ ᴺᵁᴸᴸ │ │ [1,2] │ 2 │ @@ -532,7 +519,6 @@ ORDER BY ``` ``` - ## 实际案例 {#real-world-examples} 以下示例演示如何解决一些常见的实际问题。 @@ -646,7 +632,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('cpu_temp', '2020-01-01 00:00:00', 87), ('cpu_temp', '2020-01-01 00:00:01', 77), ('cpu_temp', '2020-01-01 00:00:02', 93), @@ -725,7 +710,6 @@ CREATE TABLE sensors ENGINE = Memory; ``` - insert into sensors values('ambient_temp', '2020-01-01 00:00:00', 16), ('ambient_temp', '2020-01-01 12:00:00', 16), ('ambient_temp', '2020-01-02 11:00:00', 9), @@ -769,7 +753,6 @@ ORDER BY └──────────────┴─────────────────────┴───────┴─────────────────────────┘ ```` - ## 参考资料 {#references} ### GitHub 议题 {#github-issues} @@ -804,8 +787,6 @@ https://dev.mysql.com/doc/refman/8.0/en/window-functions-usage.html https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html - - ## 相关内容 {#related-content} - 博客:[在 ClickHouse 中处理时间序列数据](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md index 3aef52f7862..f766e721540 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/debugging-insights.md @@ -19,14 +19,10 @@ title: '实践经验 - 调试洞见' description: '查找最常见 ClickHouse 问题的解决方案,包括慢查询、内存错误、连接问题和配置问题。' --- - - # ClickHouse 运维:社区调试洞见 {#clickhouse-operations-community-debugging-insights} *本指南是基于社区活动中总结出的经验与结论的一部分。想获取更多真实场景下的解决方案与洞见,可以[按具体问题浏览](./community-wisdom.md)。* *是否正为高昂的运维成本发愁?请查看[成本优化](./cost-optimization.md)社区洞见指南。* - - ## 关键系统表 {#essential-system-tables} 以下系统表是生产环境调试/排障的基础: @@ -86,7 +82,6 @@ GROUP BY database, table ORDER BY count() DESC; ``` - ## 常见生产环境问题 {#common-production-issues} ### 磁盘空间问题 {#disk-space-problems} @@ -126,7 +121,6 @@ WHERE is_done = 0; 先在较小的数据集上验证模式变更。 - ## 内存与性能 {#memory-and-performance} ### 外部聚合 {#external-aggregation} @@ -170,7 +164,6 @@ SETTINGS max_bytes_before_external_group_by = 1000000000; -- 1GB 阈值 * [自定义分区键](/engines/table-engines/mergetree-family/custom-partitioning-key) - ## 快速参考 {#quick-reference} | 问题 | 检测方式 | 解决方案 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md index 5e971452d9a..196ac35f994 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/materialized-views.md @@ -21,24 +21,18 @@ title: '物化视图实践经验' description: '物化视图的真实案例、常见问题与解决方案' --- - - # 物化视图:如何让它成为一把双刃剑 {#materialized-views-the-double-edged-sword} *本指南是从社区线下交流活动中总结出的经验的一部分。想获取更多真实场景的解决方案和洞察,可以[按具体问题浏览](./community-wisdom.md)。* *过多的数据分片正在拖慢你的数据库吗?请查看 [Too Many Parts](./too-many-parts.md) 社区洞察指南。* *进一步了解[物化视图](/materialized-views)。* - - ## 10 倍存储反模式 {#storage-antipattern} **真实生产问题:** *“我们有一个物化视图。原始日志表大约是 20GB,但基于这个日志表的视图膨胀到了 190GB,几乎是原始表大小的 10 倍。之所以会这样,是因为我们为每个属性创建了一行,而每条日志可以有 10 个属性。”* **规则:** 如果你的 `GROUP BY` 产生的行数多于它减少的行数,那你构建的是一个代价高昂的索引,而不是物化视图。 - - ## 生产环境物化视图健康状况验证 {#mv-health-validation} 此查询可帮助你在创建物化视图之前预测,它是会压缩数据还是导致数据膨胀。请在实际的表和列上运行此查询,以避免出现类似 “190GB 爆炸” 的情况。 @@ -62,7 +56,6 @@ WHERE your_filter_conditions; -- 如果 aggregation_ratio < 10%,将获得良好的压缩效果 ``` - ## 当物化视图开始带来问题时 {#mv-problems} **需要监控的预警信号:** @@ -73,7 +66,5 @@ WHERE your_filter_conditions; 你可以使用 `system.query_log` 比较添加物化视图前后的写入性能,以跟踪查询耗时趋势。 - - ## 视频来源 {#video-sources} - [ClickHouse at CommonRoom - Kirill Sapchuk](https://www.youtube.com/watch?v=liTgGiTuhJE) - “过度迷恋物化视图”和“20GB→190GB 激增”案例研究的出处 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md index 5d3b847c2a8..422e6c7dda7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/performance-optimization.md @@ -21,15 +21,11 @@ title: '实践经验 - 性能优化' description: '性能优化策略的真实案例' --- - - # 性能优化:经过社区验证的策略 {#performance-optimization} *本指南是基于社区 Meetup 活动总结的经验汇总。若想获取更多真实场景中的解决方案与见解,可以[按具体问题浏览](./community-wisdom.md)。* *在使用物化视图时遇到问题?请查看[物化视图](./materialized-views.md)社区见解指南。* *如果你遇到查询变慢的问题并希望查看更多示例,我们还提供了[查询优化](/optimize/query-optimization)指南。* - - ## 按基数排序(从低到高) {#cardinality-ordering} 当低基数列排在前面时,ClickHouse 的主索引效果最佳,可以更高效地跳过大块数据。键中后面的高基数列则用于在这些数据块内提供更细粒度的排序。请从具有较少唯一值的列开始(如 status、category、country),最后再放置具有大量唯一值的列(如 user_id、timestamp、session_id)。 @@ -37,8 +33,6 @@ description: '性能优化策略的真实案例' - [选择主键](/best-practices/choosing-a-primary-key) - [主索引](/primary-indexes) - - ## 时间粒度很重要 {#time-granularity} 在 ORDER BY 子句中使用时间戳时,需要权衡基数与精度之间的取舍。微秒级精度的时间戳会产生非常高的基数(几乎每行一个唯一值),从而降低 ClickHouse 稀疏主索引的效率。对时间戳进行取整可以降低基数,从而实现更好的索引跳过,但会在基于时间的查询中损失时间精度。 @@ -68,7 +62,6 @@ FROM github.github_events WHERE created_at >= '2024-01-01'; ``` - ## 聚焦单条查询,而不是平均值 {#focus-on-individual-queries-not-averages} 在排查 ClickHouse 性能问题时,不要依赖平均查询时间或系统整体指标。相反,要找出为什么某些特定查询会变慢。系统在平均意义上可能表现良好,但单条查询可能会因为内存耗尽、过滤不佳或高基数操作而表现很差。 @@ -77,8 +70,6 @@ ClickHouse 的 CTO Alexey 指出:*"正确的做法是问自己,为什么这 当某条查询变慢时,不要只看平均值。要问“为什么偏偏是这条查询慢?”,并检查其实际的资源使用模式。 - - ## 内存与行扫描 {#memory-and-row-scanning} Sentry 是一个面向开发者的错误跟踪平台,每天为 400 多万开发者处理数十亿个事件。他们的一个关键认识是:*“在这种特定情形下,驱动内存使用的是分组键的基数(cardinality)”*——高基数聚合拖垮性能,根本原因在于内存被耗尽,而不是扫描了太多行。 @@ -95,7 +86,6 @@ WHERE cityHash64(user_id) % 10 = 0 -- 始终为相同的 10% 用户 这可以确保相同的用户在每次查询中都会以相同的方式出现,从而在不同时间段内提供一致的结果。关键在于:`cityHash64()` 会对相同输入生成一致的哈希值,因此 `user_id = 12345` 始终会被哈希到同一个值,保证该用户要么始终出现在你的 10% 样本中,要么从不出现——不会在不同查询之间时有时无。 - ## Sentry 的位掩码优化 {#bit-mask-optimization} 当按高基数列(如 URL)进行聚合时,每个唯一值都会在内存中创建一个单独的聚合状态,最终可能导致内存耗尽。Sentry 的解决方案是:不再按实际的 URL 字符串分组,而是按会被归约为位掩码的布尔表达式分组。 @@ -139,7 +129,6 @@ LIMIT 20 来自 Sentry 工程团队的反馈:“这些重量级查询的速度提升了 10 倍以上,而内存使用降低了 100 倍(更重要的是,现在是有上界的)。我们最大的一些客户在搜索回放时不再遇到错误,我们现在也可以在不耗尽内存的情况下支持任意规模的客户。” - ## 视频资源 {#video-sources} - [Lost in the Haystack - Optimizing High Cardinality Aggregations](https://www.youtube.com/watch?v=paK84-EUJCA) - 来自 Sentry 的生产环境内存优化实战经验 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md index d56b64590bb..72648b22cae 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tips-and-tricks/too-many-parts.md @@ -21,14 +21,10 @@ title: '经验总结 - Too Many Parts 问题' description: 'Too Many Parts 问题的解决方案与预防' --- - - # 部分过多问题 {#the-too-many-parts-problem} *本指南属于一系列基于社区线下交流与经验分享整理而成的内容。若想获取更多真实场景下的解决方案和见解,可以[按具体问题浏览](./community-wisdom.md)。* *需要更多性能优化方面的建议?请查看[性能优化](./performance-optimization.md)社区洞见指南。* - - ## 理解问题 {#understanding-the-problem} ClickHouse 会抛出 “Too many parts” 错误,以避免出现严重的性能下降。过多的小 part 会引发多种问题:查询期间需要读取和合并更多文件,导致查询性能下降;内存使用增加,因为每个 part 都需要在内存中保存元数据;压缩效率降低,因为更小的数据块压缩效果更差;由于更多的文件句柄和寻道操作而带来更高的 I/O 开销;以及后台合并变慢,使合并调度器的工作量显著增加。 @@ -38,8 +34,6 @@ ClickHouse 会抛出 “Too many parts” 错误,以避免出现严重的性 - [Parts](/parts) - [Parts System Table](/operations/system-tables/parts) - - ## 及早识别问题 {#recognize-parts-problem} 此查询通过分析所有活动表的分片数量和大小来监控表碎片情况。它会识别出分片数量过多或过小、可能需要合并优化的表。请定期使用此查询,在碎片问题影响查询性能之前将其发现。 @@ -76,7 +70,6 @@ ORDER BY total_parts DESC LIMIT 20; ``` - ## 视频资源 {#video-sources} - [Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse](https://www.youtube.com/watch?v=AsMPEfN5QtM) - 由 ClickHouse 团队成员讲解异步 INSERT 及 “too many parts” 问题 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md b/i18n/zh/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md index f2eb4c4329d..8ed0b0029e1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tools-and-utilities/static-files-disk-uploader.md @@ -6,23 +6,18 @@ description: '提供 clickhouse-static-files-disk-uploader 实用工具的介绍 doc_type: 'guide' --- - - # clickhouse-static-files-disk-uploader {#clickhouse-static-files-disk-uploader} 生成一个数据目录,其中包含指定 ClickHouse 表的元数据。可以使用这些元数据在另一台服务器上创建一个 ClickHouse 表,该表包含由 `web` 磁盘作为后端的只读数据集。 不要使用此工具迁移数据。请改用 [`BACKUP` 和 `RESTORE` 命令](/operations/backup)。 - - ## 使用方法 {#usage} ```bash $ clickhouse static-files-disk-uploader [args] ``` - ## 命令 {#commands} |命令|说明| @@ -34,8 +29,6 @@ $ clickhouse static-files-disk-uploader [args] |`--url [url]`|`test` 模式下使用的 Web 服务器 URL| |`--output-dir [dir]`|在 `non-test` 模式下输出文件的目录| - - ## 获取指定表的元数据路径 {#retrieve-metadata-path-for-the-specified-table} 使用 `clickhouse-static-files-disk-uploader` 时,必须先获取所需表的元数据路径。 @@ -60,7 +53,6 @@ SELECT data_paths └───────────────────────────────────────────────────────┘ ``` - ## 将表的元数据目录导出到本地文件系统 {#output-table-metadata-directory-to-the-local-filesystem} 使用目标输出目录 `output` 和指定的元数据路径,执行以下命令: @@ -75,7 +67,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st 数据路径:"/Users/john/store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee",目标路径:"output" ``` - ## 将表元数据目录输出到外部 URL {#output-table-metadata-directory-to-an-external-url} 此步骤与将数据目录输出到本地文件系统类似,但需要额外添加 `--test-mode` 标志。不同之处在于,你不再指定输出目录,而必须通过 `--url` 标志指定目标 URL。 @@ -86,7 +77,6 @@ $ clickhouse static-files-disk-uploader --output-dir output --metadata-path ./st $ clickhouse static-files-disk-uploader --test-mode --url http://nginx:80/test1 --metadata-path ./store/bcc/bccc1cfd-d43d-43cf-a5b6-1cda8178f1ee/ ``` - ## 使用表元数据目录创建 ClickHouse 表 {#using-the-table-metadata-directory-to-create-a-clickhouse-table} 获得表元数据目录后,你可以使用它在另一台服务器上创建一个 ClickHouse 表。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tutorial.md b/i18n/zh/docusaurus-plugin-content-docs/current/tutorial.md index c4df89d9c4c..9f306111d2c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tutorial.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tutorial.md @@ -9,12 +9,8 @@ show_related_blogs: true doc_type: 'guide' --- - - # 进阶教程 {#advanced-tutorial} - - ## 概述 {#overview} 了解如何使用纽约市出租车示例数据集在 ClickHouse 中摄取和查询数据。 @@ -25,7 +21,6 @@ doc_type: 'guide' - ## 创建新表 {#create-a-new-table} 纽约市出租车数据集包含数百万次出租车行程的详细信息,其中的列包括小费金额、过路费、支付方式等。创建一个表来存储这些数据。 @@ -89,8 +84,6 @@ doc_type: 'guide' ORDER BY pickup_datetime; ``` - - ## 添加数据集 {#add-the-dataset} 表已经创建好之后,接下来从 S3 中的 CSV 文件中添加纽约市出租车数据。 @@ -159,8 +152,6 @@ doc_type: 'guide' 此查询应返回 1,999,657 行。 - - ## 分析数据 {#analyze-the-data} 运行查询以分析数据。您可以参考以下示例或尝试编写自己的 SQL 查询。 @@ -267,8 +258,6 @@ doc_type: 'guide' 预期输出

- - ```response ┌──────────────avg_tip─┬───────────avg_fare─┬──────avg_passenger─┬──count─┬─trip_minutes─┐ │ 1.9600000381469727 │ 8 │ 1 │ 1 │ 27511 │ @@ -340,8 +329,6 @@ doc_type: 'guide'

- - 7. 查询前往 LaGuardia 或 JFK 机场的行程: ```sql SELECT @@ -382,8 +369,6 @@ doc_type: 'guide'

- - ## 创建字典 {#create-a-dictionary} 字典是在内存中存储的键值对映射。详情请参见 [Dictionaries](/sql-reference/dictionaries/index.md) @@ -467,7 +452,6 @@ LAYOUT(HASHED_ARRAY()) ORDER BY total DESC ``` - 此查询汇总了各行政区在拉瓜迪亚机场或 JFK 机场结束的出租车行程次数。结果如下所示,可以注意到有相当多行程的上车区域是未知的: ```response @@ -484,7 +468,6 @@ LAYOUT(HASHED_ARRAY()) 7 行数据。耗时:0.019 秒。处理了 2.00 百万行,4.00 MB(105.70 百万行/秒,211.40 MB/秒)。 ``` - ## 执行连接查询 {#perform-a-join} 编写一些查询语句,将 `taxi_zone_dictionary` 与 `trips` 表进行连接。 @@ -537,7 +520,6 @@ LAYOUT(HASHED_ARRAY())
- ## 后续步骤 {#next-steps} 通过以下文档进一步了解 ClickHouse: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md index 28a34892140..c06544d5834 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/AIChat/index.md @@ -21,20 +21,16 @@ import img_history from '@site/static/images/use-cases/AI_ML/AIChat/5_history.pn import img_result_actions from '@site/static/images/use-cases/AI_ML/AIChat/6_result_actions.png'; import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_editor.png'; - # 在 ClickHouse Cloud 中使用 AI Chat {#using-ai-chat-in-clickhouse-cloud} > 本指南介绍如何在 ClickHouse Cloud 控制台中启用并使用 AI Chat 功能。 - ## 前提条件 {#prerequisites} 1. 您必须具备访问已启用 AI 功能的 ClickHouse Cloud 组织的权限(如果尚未开通,请联系您所在组织的管理员或支持团队)。 - - ## 打开 AI Chat 面板 {#open-panel} 1. 进入某个 ClickHouse Cloud 服务。 @@ -43,8 +39,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## 接受数据使用授权(首次运行) {#consent} 1. 首次使用时,会弹出一个同意对话框,说明数据处理方式以及第三方 LLM 子处理方。 @@ -52,8 +46,6 @@ import img_new_tab from '@site/static/images/use-cases/AI_ML/AIChat/7_open_in_ed - - ## 选择聊天模式 {#modes} AI Chat 当前支持: @@ -65,30 +57,22 @@ AI Chat 当前支持: - - ## 撰写并发送消息 {#compose} 1. 输入你的问题(例如:“创建一个物化视图,用于按用户聚合每日事件”)。 2. 按下 Enter 键发送(使用 Shift + Enter 键换行)。 3. 在模型处理过程中,你可以点击“Stop”来中断。 - - ## 了解 Agent 的思考步骤 {#thinking-steps} 在 Agent 模式下,你可能会看到可展开的中间“思考”或规划步骤。这些步骤可以帮助你了解助手是如何生成回答的。你可以根据需要将它们折叠或展开。 - - ## 开始新的对话 {#new-chats} 点击 “New Chat” 按钮以清除现有上下文并开始新的会话。 - - ## 查看聊天历史记录 {#history} 1. 下方区域会列出您最近的聊天记录。 @@ -97,8 +81,6 @@ AI Chat 当前支持: - - ## 使用生成的 SQL {#sql-actions} 当助手返回 SQL 时: @@ -111,8 +93,6 @@ AI Chat 当前支持: - - ## 停止或中断回复 {#interrupt} 如果回复耗时过长或开始偏离预期: @@ -120,8 +100,6 @@ AI Chat 当前支持: 1. 点击“停止”按钮(在生成过程中可见)。 2. 消息会被标记为已中断;你可以调整提示后重新发送。 - - ## 键盘快捷键 {#shortcuts} | 操作 | 快捷键 | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md index fe4fa5c7ddb..d5a3ac1ab4f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/01_remote_mcp.md @@ -22,14 +22,12 @@ import img5 from '@site/static/images/use-cases/AI_ML/MCP/5connected_mcp_claude. import img6 from '@site/static/images/use-cases/AI_ML/MCP/6slash_mcp_claude.png'; import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; - # 启用 ClickHouse Cloud 远程 MCP 服务器 {#enabling-the-clickhouse-cloud-remote-mcp-server} > 本指南介绍如何启用和使用 ClickHouse Cloud 远程 MCP 服务器。本示例使用 Claude Code 作为 MCP 客户端,但您可以使用任何支持 MCP 的 LLM 客户端。 - ## 为 ClickHouse Cloud 服务启用远程 MCP 服务器 {#enable-remote-mcp-server} 1. 连接到 ClickHouse Cloud 服务,单击 `Connect` 按钮,为该服务启用远程 MCP 服务器 @@ -44,7 +42,6 @@ import img7 from '@site/static/images/use-cases/AI_ML/MCP/7usage_mcp.png'; https://mcp.clickhouse.cloud/mcp ``` - ## 在 Claude Code 中添加 ClickHouse MCP 服务器 {#add-clickhouse-mcp-server-claude-code} 1. 在您的工作目录中运行以下命令,将 ClickHouse Cloud MCP 服务器的配置添加到 Claude Code 中。在此示例中,我们在 Claude Code 配置中将 MCP 服务器命名为 `clickhouse_cloud`。 @@ -71,7 +68,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc [user@host ~/Documents/repos/mcp_test] $ claude ``` - ## 通过 OAuth 验证 ClickHouse Cloud 身份 {#authenticate-via-oauth} 1. 在首次会话时,Claude Code 会打开一个浏览器窗口。否则,您也可以在 Claude Code 中运行 `/mcp` 命令,并选择 `clickhouse_cloud` MCP 服务器来发起连接 @@ -82,8 +78,6 @@ claude mcp add --transport http clickhouse_cloud https://mcp.clickhouse.cloud/mc - - ## 从 Claude Code 使用 ClickHouse Cloud 远程 MCP 服务器 {#use-rempte-mcp-from-claude-code} 1. 在 Claude Code 中验证远程 MCP 服务器已连接 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md index 48c88b2cc4b..733fc942faa 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/02_claude-desktop.md @@ -18,7 +18,6 @@ import FindMCPServers from '@site/static/images/use-cases/AI_ML/MCP/find-mcp-ser import MCPPermission from '@site/static/images/use-cases/AI_ML/MCP/mcp-permission.png'; import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-conversation.png'; - # 在 Claude Desktop 中使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server-with-claude-desktop} > 本指南介绍如何使用 uv 为 Claude Desktop 设置 ClickHouse MCP 服务器, @@ -37,20 +36,15 @@ import ClaudeConversation from '@site/static/images/use-cases/AI_ML/MCP/claude-c - ## 安装 uv {#install-uv} 你需要先安装 [uv](https://docs.astral.sh/uv/),才能按照本指南中的步骤进行操作。 如果你不打算使用 uv,则需要更新 MCP Server 配置以改用其他包管理器。 - - ## 下载 Claude Desktop {#download-claude-desktop} 你还需要安装 Claude Desktop 应用程序,可以从 [Claude Desktop 网站](https://claude.ai/desktop) 下载。 - - ## 配置 ClickHouse MCP 服务器 {#configure-clickhouse-mcp-server} 在你安装好 Claude Desktop 之后,就可以开始配置 [ClickHouse MCP 服务器](https://github.com/ClickHouse/mcp-clickhouse) 了。 @@ -113,7 +107,6 @@ MCP mcp-clickhouse: spawn uv ENOENT 如果发生这种情况,你需要更新 `command`,将 `uv` 的完整路径填入其中。比如,如果你是通过 Cargo 安装的,路径会是 `/Users/<username>/.cargo/bin/uv` ::: - ## 使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server} 重启 Claude Desktop 后,您可以通过点击 `Search and tools` 图标找到 ClickHouse MCP 服务器: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md index 85aa4fdb843..7de1f314a50 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/03_librechat.md @@ -15,7 +15,6 @@ import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.png'; - # 在 LibreChat 中使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server-with-librechat} > 本指南介绍如何使用 Docker 设置 LibreChat 与 ClickHouse MCP 服务器, @@ -23,7 +22,6 @@ import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.pn - ## 安装 Docker {#install-docker} 要运行 LibreChat 和 MCP 服务器,需要先安装 Docker。获取 Docker 的步骤如下: @@ -34,8 +32,6 @@ import LibreInterface from '@site/static/images/use-cases/AI_ML/MCP/librechat.pn
更多信息请参见 [Docker 文档](https://docs.docker.com/get-docker/)。 - - ## 克隆 LibreChat 仓库 {#clone-librechat-repo} 打开终端(命令提示符、终端或 PowerShell),然后使用以下命令克隆 LibreChat 仓库: @@ -45,7 +41,6 @@ git clone https://github.com/danny-avila/LibreChat.git cd LibreChat ``` - ## 创建和编辑 .env 文件 {#create-and-edit-env-file} 将示例配置文件 `.env.example` 复制并重命名为 `.env`: @@ -56,7 +51,6 @@ cp .env.example .env 使用你常用的文本编辑器打开 `.env` 文件。你会看到许多主流 LLM 提供商的配置段落,包括 OpenAI、Anthropic、AWS Bedrock 等,例如: - ```text title=".venv" #============# # Anthropic # {#anthropic} @@ -73,7 +67,6 @@ ANTHROPIC_API_KEY=user_provided 如果你没有 API key,可以使用像 Ollama 这样的本地 LLM。你将在后面的步骤 ["Install Ollama"](#add-local-llm-using-ollama) 中看到具体操作方法。暂时先不要修改 .env 文件,继续执行后续步骤。 ::: - ## 创建 librechat.yaml 文件 {#create-librechat-yaml-file} 运行以下命令新建一个 `librechat.yaml` 文件: @@ -84,7 +77,6 @@ cp librechat.example.yaml librechat.yaml 这会创建用于 LibreChat 的[主配置文件](https://www.librechat.ai/docs/configuration/librechat_yaml)。 - ## 将 ClickHouse MCP 服务器添加到 Docker Compose {#add-clickhouse-mcp-server-to-docker-compose} 接下来,我们将把 ClickHouse MCP 服务器添加到 LibreChat 的 Docker Compose 文件中, @@ -137,7 +129,6 @@ services: /> - ## 在 librechat.yaml 中配置 MCP 服务器 {#configure-mcp-server-in-librechat-yaml} 打开 `librechat.yaml`,将以下配置添加到文件末尾: @@ -163,7 +154,6 @@ socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple', 'sa socialLogins: [] ``` - ## 使用 Ollama 添加本地 LLM(可选) {#add-local-llm-using-ollama} ### 安装 Ollama {#install-ollama} @@ -203,7 +193,6 @@ custom: modelDisplayLabel: "Ollama" ``` - ## 启动所有服务 {#start-all-services} 在 LibreChat 项目的根目录下,运行以下命令来启动这些服务: @@ -214,7 +203,6 @@ docker compose up 等待所有服务全部启动完成。 - ## 在浏览器中打开 LibreChat {#open-librechat-in-browser} 所有服务启动并运行后,打开浏览器并访问 `http://localhost:3080/` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md index ae02284671e..71b31b9b2e7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/04_anythingllm.md @@ -18,7 +18,6 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/allm_conversat import MCPServers from '@site/static/images/use-cases/AI_ML/MCP/allm_mcp-servers.png'; import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png'; - # 在 AnythingLLM 中使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server-with-anythingllm} > 本指南介绍如何使用 Docker 设置 [AnythingLLM](https://anythingllm.com/) 与 ClickHouse MCP 服务器, @@ -26,7 +25,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png' - ## 安装 Docker {#install-docker} 要运行 LibreChat 和 MCP 服务器,需要先安装 Docker。获取 Docker 的步骤如下: @@ -37,8 +35,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png'
更多信息请参阅 [Docker 文档](https://docs.docker.com/get-docker/)。 - - ## 拉取 AnythingLLM Docker 镜像 {#pull-anythingllm-docker-image} 运行以下命令,将 AnythingLLM Docker 镜像拉取到本地机器: @@ -47,7 +43,6 @@ import ToolIcon from '@site/static/images/use-cases/AI_ML/MCP/alm_tool-icon.png' docker pull anythingllm/anythingllm ``` - ## 设置存储位置 {#setup-storage-location} 创建一个用于存储数据的目录,并初始化环境文件: @@ -58,7 +53,6 @@ mkdir -p $STORAGE_LOCATION && \ touch "$STORAGE_LOCATION/.env" ``` - ## 配置 MCP Server 的配置文件 {#configure-mcp-server-config-file} 创建 `plugins` 目录: @@ -95,7 +89,6 @@ mkdir -p "$STORAGE_LOCATION/plugins" 如果你想探索自己的数据,可以使用你自己的 ClickHouse Cloud 服务的 [主机地址、用户名和密码](https://clickhouse.com/docs/getting-started/quick-start/cloud#connect-with-your-app)。 - ## 启动 AnythingLLM Docker 容器 {#start-anythingllm-docker-container} 运行以下命令来启动 AnythingLLM Docker 容器: @@ -112,7 +105,6 @@ mintplexlabs/anythingllm 启动完成后,在浏览器中访问 `http://localhost:3001`。 选择要使用的模型,并提供您的 API 密钥。 - ## 等待 MCP 服务器启动 {#wait-for-mcp-servers-to-start-up} 点击界面左下角的工具图标: @@ -124,8 +116,6 @@ mintplexlabs/anythingllm - - ## 使用 AnythingLLM 与 ClickHouse MCP Server 对话 {#chat-with-clickhouse-mcp-server-with-anythingllm} 现在可以开始对话了。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md index 5b490949042..2fb3944e2d6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/05_open-webui.md @@ -25,7 +25,6 @@ import AddConnection from '@site/static/images/use-cases/AI_ML/MCP/7_add_connect import OpenAIModels from '@site/static/images/use-cases/AI_ML/MCP/8_openai_models_more.png'; import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation.png'; - # 在 Open WebUI 中使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server-with-open-webui} > 本指南介绍如何配置 [Open WebUI](https://github.com/open-webui/open-webui) 与 ClickHouse MCP 服务器, @@ -33,14 +32,11 @@ import Conversation from '@site/static/images/use-cases/AI_ML/MCP/9_conversation - ## 安装 uv {#install-uv} 要按照本指南的说明进行操作,你需要先安装 [uv](https://docs.astral.sh/uv/)。 如果你不想使用 uv,则需要更新 MCP 服务器配置以使用其他包管理器。 - - ## 启动 Open WebUI {#launch-open-webui} 要启动 Open WebUI,可以运行以下命令: @@ -51,7 +47,6 @@ uv run --with open-webui open-webui serve 访问 [http://localhost:8080/](http://localhost:8080/) 查看 UI。 - ## 配置 ClickHouse MCP Server {#configure-clickhouse-mcp-server} 要配置 ClickHouse MCP Server,我们需要将 MCP Server 暴露为一组 OpenAPI 端点。 @@ -93,7 +88,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - ## 配置 OpenAI {#configure-openai} 默认情况下,Open WebUI 使用 Ollama 模型,但我们也可以添加兼容 OpenAI 的端点。 @@ -109,8 +103,6 @@ uvx mcpo --port 8000 -- uv run --with mcp-clickhouse --python 3.10 mcp-clickhous - - ## 使用 Open WebUI 与 ClickHouse MCP 服务器对话 {#chat-to-clickhouse-mcp-server} 然后我们可以进行对话,Open WebUI 会在必要时调用 MCP 服务器: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md index cf30ce3d3f6..097088a61e8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/06_ollama.md @@ -14,14 +14,12 @@ import {CardHorizontal} from '@clickhouse/click-ui/bundled' import Link from '@docusaurus/Link'; import Image from '@theme/IdealImage'; - # 使用 ClickHouse MCP 服务器与 Ollama {#using-clickhouse-mcp-server-with-ollama} > 本指南介绍如何使用 ClickHouse MCP 服务器与 Ollama。 - ## 安装 Ollama {#install-ollama} Ollama 是一个用于在本机运行大语言模型(LLM)的库。 @@ -93,7 +91,6 @@ ollama show qwen3 从该输出可以看出,默认的 qwen3 模型拥有稍多于 80 亿个参数。 - ## 安装 MCPHost {#install-mcphost} 截至撰写本文时(2025 年 7 月),还没有将 Ollama 与 MCP Servers 一起使用的原生支持。 @@ -108,7 +105,6 @@ go install github.com/mark3labs/mcphost@latest 该可执行文件会安装在 `~/go/bin` 目录下,因此我们需要确保该目录已添加到我们的 `PATH` 中。 - ## 配置 ClickHouse MCP 服务器 {#configure-clickhouse-mcp-server} 我们可以在 YAML 或 JSON 配置文件中通过 MCPHost 配置 MCP 服务器。 @@ -157,7 +153,6 @@ export CLICKHOUSE_PASSWORD="" 原则上,可以在 MCP 配置文件的 `environment` 字段下提供这些变量,但我们发现这样做并不起作用。 ::: - ## 运行 MCPHost {#running-mcphost} 配置好 ClickHouse MCP 服务器后,可以通过运行以下命令来运行 MCPHost: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md index 6ed65e7a44b..3aea79ea4a8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/07_janai.md @@ -26,14 +26,12 @@ import ToolsCalled from '@site/static/images/use-cases/AI_ML/MCP/8_janai_tools_c import ToolsCalledExpanded from '@site/static/images/use-cases/AI_ML/MCP/9_janai_tools_called_expanded.png'; import Result from '@site/static/images/use-cases/AI_ML/MCP/10_janai_result.png'; - # 在 Jan.ai 中使用 ClickHouse MCP 服务器 {#using-clickhouse-mcp-server-with-janai} > 本指南介绍如何在 [Jan.ai](https://jan.ai/docs) 中使用 ClickHouse MCP 服务器。 - ## 安装 Jan.ai {#install-janai} Jan.ai 是一个开源的 ChatGPT 替代品,可以100% 离线运行。 @@ -41,8 +39,6 @@ Jan.ai 是一个开源的 ChatGPT 替代品,可以100% 离线运行。 这是一个原生应用,下载完成后即可启动使用。 - - ## 将 LLM 添加到 Jan.ai {#add-llm-to-janai} 我们可以在设置菜单中启用模型。 @@ -51,8 +47,6 @@ Jan.ai 是一个开源的 ChatGPT 替代品,可以100% 离线运行。 - - ## 启用 MCP 服务器 {#enable-mcp-servers} 在撰写本文时,MCP 服务器仍然是 Jan.ai 中的一项实验性功能。 @@ -62,8 +56,6 @@ Jan.ai 是一个开源的 ChatGPT 替代品,可以100% 离线运行。 启用该开关后,我们会在左侧菜单中看到 `MCP Servers`。 - - ## 配置 ClickHouse MCP Server {#configure-clickhouse-mcp-server} 点击 `MCP Servers` 菜单后,我们会看到可连接的 MCP 服务器列表: @@ -84,8 +76,6 @@ Jan.ai 是一个开源的 ChatGPT 替代品,可以100% 离线运行。 - - ## 使用 Jan.ai 与 ClickHouse MCP 服务器对话 {#chat-to-clickhouse-mcp-server} 现在可以开始查询存储在 ClickHouse 中的数据了! diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md index a22e836907a..ef3ba53bae1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/agno.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 Agno 和 ClickHouse MCP Server 构建 AI Agent {#how-to-build-an-ai-agent-with-agno-and-the-clickhouse-mcp-server} 在本指南中,你将学习如何构建一个 [Agno](https://github.com/agno-agi/agno) AI agent,使其能够通过 [ClickHouse 的 MCP Server](https://github.com/ClickHouse/mcp-clickhouse) 与 [ClickHouse 的 SQL playground](https://sql.clickhouse.com/) 进行交互。 @@ -20,8 +18,6 @@ doc_type: 'guide' 该示例可以在 [示例仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/agno/agno.ipynb) 中以 Notebook 的形式找到。 ::: - - ## 前置条件 {#prerequisites} - 您需要在系统上安装 Python。 @@ -32,7 +28,6 @@ doc_type: 'guide' - ## 安装库 {#install-libraries} 运行以下命令来安装 Agno 库: @@ -43,7 +38,6 @@ pip install -q agno pip install -q ipywidgets ``` - ## 配置凭证 {#setup-credentials} 接下来,您需要提供 Anthropic API 密钥: @@ -74,7 +68,6 @@ env = { } ``` - ## 初始化 MCP 服务器和 Agno 代理 {#initialize-mcp-and-agent} 现在配置 ClickHouse MCP 服务器指向 ClickHouse SQL 演练场, diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md index 4ab1c503f3c..484c5408c29 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/chainlit.md @@ -10,20 +10,14 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 Chainlit 和 ClickHouse MCP Server 构建 AI 智能体 {#how-to-build-an-ai-agent-with-chainlit-and-the-clickhouse-mcp-server} 本指南介绍如何将功能强大的 Chainlit 聊天界面框架与 ClickHouse Model Context Protocol (MCP) Server 相结合,以构建交互式数据应用程序。Chainlit 使你可以用最少的代码为 AI 应用构建对话式界面,而 ClickHouse MCP Server 则提供与 ClickHouse 高性能列式数据库的无缝集成。 - - ## 前提条件 {#prerequisites} - 需要一个 Anthropic API 密钥 - 需要已安装 [`uv`](https://docs.astral.sh/uv/getting-started/installation/) - - ## 基本 Chainlit 应用 {#basic-chainlit-app} 你可以通过运行以下命令来查看一个简单聊天应用的示例: @@ -34,7 +28,6 @@ uv run --with anthropic --with chainlit chainlit run chat_basic.py -w -h 然后访问 `http://localhost:8000` - ## 添加 ClickHouse MCP Server {#adding-clickhouse-mcp-server} 在添加 ClickHouse MCP Server 后,事情会变得更有趣。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md index 7e294fa10a3..2d16cb61c0e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/claude-agent-sdk.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 Claude Agent SDK 和 ClickHouse MCP Server 构建 AI Agent {#how-to-build-an-ai-agent-with-claude-agent-sdk-and-the-clickhouse-mcp-server} 在本指南中,您将学习如何使用 [ClickHouse 的 MCP Server](https://github.com/ClickHouse/mcp-clickhouse),构建一个可以与 [ClickHouse 的 SQL Playground](https://sql.clickhouse.com/) 交互的 [Claude Agent SDK](https://docs.claude.com/en/api/agent-sdk/overview) AI Agent。 @@ -20,8 +18,6 @@ doc_type: 'guide' 该示例可以在 [examples 仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/claude-agent/claude-agent.ipynb) 中找到对应的笔记本。 ::: - - ## 前提条件 {#prerequisites} - 您需要在系统上安装 Python。 @@ -32,7 +28,6 @@ doc_type: 'guide' - ## 安装库 {#install-libraries} 运行以下命令安装 Claude Agent SDK 库: @@ -43,7 +38,6 @@ pip install -q claude-agent-sdk pip install -q ipywidgets ``` - ## 设置凭据 {#setup-credentials} 接下来,您需要提供 Anthropic API 密钥: @@ -69,7 +63,6 @@ env = { } ``` - ## 初始化 MCP 服务器和 Claude Agent SDK 代理 {#initialize-mcp-and-agent} 现在将 ClickHouse MCP 服务器配置为指向 ClickHouse SQL playground,然后初始化我们的代理并向它提出一个问题: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md index 8a4ca241769..5527761c900 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/copilotkit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 CopilotKit 和 ClickHouse MCP Server 构建 AI 智能体 {#how-to-build-an-ai-agent-with-copilotkit-and-the-clickhouse-mcp-server} 这是一个示例,演示如何使用存储在 ClickHouse 中的数据构建智能体应用。它使用 [ClickHouse MCP Server](https://github.com/ClickHouse/mcp-clickhouse) @@ -24,15 +22,11 @@ doc_type: 'guide' 此示例的代码可以在 [examples 仓库](https://github.com/ClickHouse/examples/edit/main/ai/mcp/copilotkit) 中找到。 ::: - - ## 前提条件 {#prerequisites} - `Node.js >= 20.14.0` - `uv >= 0.1.0` - - ## 安装依赖 {#install-dependencies} 在本地克隆项目:`git clone https://github.com/ClickHouse/examples`,然后 @@ -40,8 +34,6 @@ doc_type: 'guide' 可以跳过本节,直接运行脚本 `./install.sh` 来安装依赖。若要手动安装依赖,请按照下文说明进行操作。 - - ## 手动安装依赖 {#install-dependencies-manually} 1. 安装依赖: @@ -65,21 +57,16 @@ uv sync uv add fastmcp ``` - ## 配置应用程序 {#configure-the-application} 将 `env.example` 文件复制到 `.env`,并在其中填入您的 `ANTHROPIC_API_KEY`。 - - ## 使用你自己的 LLM {#use-your-own-llm} 如果你希望使用 Anthropic 以外的其他 LLM 提供商,可以修改 Copilotkit 运行时以使用不同的 LLM 适配器。 受支持的提供商列表见[这里](https://docs.copilotkit.ai/guides/bring-your-own-llm)。 - - ## 使用您自己的 ClickHouse 集群 {#use-your-own-clickhouse-cluster} 默认情况下,本示例默认配置为连接到 @@ -91,8 +78,6 @@ Copilotkit 运行时以使用不同的 LLM 适配器。 - `CLICKHOUSE_PASSWORD` - `CLICKHOUSE_SECURE` - - # 运行应用 {#run-the-application} 运行 `npm run dev` 来启动开发服务器。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md index f97cf6fcb4c..a7346f419b7 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/llamaindex.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 ClickHouse MCP Server 构建 LlamaIndex AI Agent {#how-to-build-a-llamaindex-ai-agent-using-clickhouse-mcp-server} 在本指南中,你将学习如何构建一个 [LlamaIndex](https://docs.llamaindex.ai) AI Agent,使其能够通过 [ClickHouse 的 MCP Server](https://github.com/ClickHouse/mcp-clickhouse) 与 [ClickHouse 的 SQL playground](https://sql.clickhouse.com/) 进行交互。 @@ -20,8 +18,6 @@ doc_type: 'guide' 该示例可以在 [examples 仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/llamaindex/llamaindex.ipynb)中以 Notebook 形式查看。 ::: - - ## 前置条件 {#prerequisites} - 您需要在系统上安装 Python。 @@ -32,7 +28,6 @@ doc_type: 'guide' - ## 安装依赖库 {#install-libraries} 运行以下命令来安装所需的依赖库: @@ -42,7 +37,6 @@ pip install -q --upgrade pip pip install -q llama-index clickhouse-connect llama-index-llms-anthropic llama-index-tools-mcp ``` - ## 设置凭据 {#setup-credentials} 接下来,您需要提供 Anthropic API 密钥: @@ -61,7 +55,6 @@ os.environ["ANTHROPIC_API_KEY"] = getpass.getpass("Enter Anthropic API Key:") 可以在 [LlamaIndex「LLMs」文档](https://docs.llamaindex.ai/en/stable/examples/) 中找到配置凭据的说明。 ::: - ## 初始化 MCP Server {#initialize-mcp-and-agent} 现在将 ClickHouse MCP Server 配置为指向 ClickHouse SQL playground。 @@ -92,7 +85,6 @@ mcp_tool_spec = McpToolSpec( ) ``` - tools = await mcp_tool_spec.to_tool_list_async() ```` @@ -110,7 +102,6 @@ agent_worker = FunctionCallingAgentWorker.from_tools( agent = AgentRunner(agent_worker) ```` - ## 初始化 LLM {#initialize-llm} 使用以下代码初始化 Claude Sonnet 4.0 模型: @@ -120,7 +111,6 @@ from llama_index.llms.anthropic import Anthropic llm = Anthropic(model="claude-sonnet-4-0") ``` - ## 运行代理 {#run-agent} 最后,您可以向代理提问: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md index 255505f9843..48c78251402 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/openai-agents.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 ClickHouse MCP Server 构建 OpenAI Agent {#how-to-build-an-openai-agent-using-clickhouse-mcp-server} 在本指南中,你将学习如何构建一个 [OpenAI](https://github.com/openai/openai-agents-python) agent,使其可以通过 [ClickHouse 的 MCP Server](https://github.com/ClickHouse/mcp-clickhouse) 与 [ClickHouse 的 SQL playground](https://sql.clickhouse.com/) 交互。 @@ -20,8 +18,6 @@ doc_type: 'guide' 该示例可以在 [示例仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/openai-agents/openai-agents.ipynb) 中找到对应的笔记本。 ::: - - ## 前置条件 {#prerequisites} - 系统需已安装 Python。 @@ -32,7 +28,6 @@ doc_type: 'guide' - ## 安装库 {#install-libraries} 运行以下命令安装所需库: @@ -42,7 +37,6 @@ pip install -q --upgrade pip pip install -q openai-agents ``` - ## 设置凭据 {#setup-credentials} 接下来,您需要提供 OpenAI API 密钥: @@ -56,7 +50,6 @@ os.environ["OPENAI_API_KEY"] = getpass.getpass("输入 OpenAI API 密钥:") 输入 OpenAI API 密钥:········ ``` - ## 初始化 MCP Server 和 OpenAI 代理 {#initialize-mcp-and-agent} 现在将 ClickHouse MCP Server 配置为连接到 ClickHouse SQL playground, @@ -155,7 +148,6 @@ async with MCPServerStdio( simple_render_chunk(chunk) ``` - ```response title="响应" 运行中:2025 年迄今为止最大的 GitHub 项目是什么? 🔧 工具:list_databases({}) diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md index 200335f3a8b..94fe26c5dfb 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/pydantic-ai.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 ClickHouse MCP Server 构建 PydanticAI 代理 {#how-to-build-a-pydanticai-agent-using-clickhouse-mcp-server} 在本指南中,您将学习如何构建一个 [PydanticAI](https://ai.pydantic.dev/mcp/client/#__tabbed_1_1) 代理, @@ -21,8 +19,6 @@ doc_type: 'guide' 该示例可以在 [示例仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/pydanticai/pydantic.ipynb) 中以 notebook 形式查阅。 ::: - - ## 前提条件 {#prerequisites} - 需要在系统上安装 Python。 @@ -33,7 +29,6 @@ doc_type: 'guide' - ## 安装库 {#install-libraries} 通过运行以下命令来安装所需的库: @@ -44,7 +39,6 @@ pip install -q "pydantic-ai-slim[mcp]" pip install -q "pydantic-ai-slim[anthropic]" # 如果使用其他 LLM 提供商,请替换为对应的包 ``` - ## 设置凭据 {#setup-credentials} 接下来,您需要提供 Anthropic API 密钥: @@ -75,7 +69,6 @@ env = { } ``` - ## 初始化 MCP Server 和 PydanticAI 代理 {#initialize-mcp} 现在将 ClickHouse MCP Server 配置为连接到 ClickHouse SQL Playground: @@ -98,7 +91,6 @@ server = MCPServerStdio( agent = Agent('anthropic:claude-sonnet-4-0', mcp_servers=[server]) ``` - ## 向智能体提问 {#ask-agent} 最后,你可以向智能体提出一个问题: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md index f08647df5a0..dfa9309819a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/slackbot.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 ClickHouse MCP Server 构建 SlackBot 代理 {#how-to-build-a-slackbot-agent-using-clickhouse-mcp-server} 在本指南中,您将学习如何构建一个 [SlackBot](https://slack.com/intl/en-gb/help/articles/202026038-An-introduction-to-Slackbot) 代理。 @@ -22,8 +20,6 @@ doc_type: 'guide' 此示例的代码可以在 [examples 仓库](https://github.com/ClickHouse/examples/blob/main/ai/mcp/slackbot/README.md) 中找到。 ::: - - ## 前置条件 {#prerequisites} - 您需要安装 [`uv`](https://docs.astral.sh/uv/getting-started/installation/) @@ -32,22 +28,17 @@ doc_type: 'guide' - ## 创建 Slack 应用 {#create-a-slack-app} 1. 前往 [slack.com/apps](https://slack.com/apps) 并点击 `Create New App`。 2. 选择 `From scratch` 选项,并为您的应用命名。 3. 选择您的 Slack 工作区。 - - ## 将应用安装到你的工作区 {#install-the-app-to-your-workspace} 接下来,你需要将上一步创建的应用添加到你的工作区。 你可以参考 Slack 文档中的说明:[“Add apps to your Slack workspace”](https://slack.com/intl/en-gb/help/articles/202035138-Add-apps-to-your-Slack-workspace)。 - - ## 配置 Slack 应用设置 {#configure-slack-app-settings} - 前往 `App Home` @@ -73,8 +64,6 @@ doc_type: 'guide' - `message:im` - 保存更改 - - ## 添加环境变量 (`.env`) {#add-env-vars} 在项目根目录创建一个 `.env` 文件,并加入以下环境变量, @@ -93,7 +82,6 @@ CLICKHOUSE_SECURE=true 如果您愿意,可以调整这些 ClickHouse 变量,以使用您自己的 ClickHouse 服务器或 ClickHouse Cloud 实例。 - ## 使用机器人 {#using-the-bot} 1. **启动机器人:** diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md index 6f4f50d73c7..c27deb1678b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/MCP/ai_agent_libraries/streamlit.md @@ -10,8 +10,6 @@ show_related_blogs: true doc_type: 'guide' --- - - # 如何使用 Streamlit 构建基于 ClickHouse 的 AI 代理 {#how-to-build-a-clickhouse-backed-ai-agent-with-streamlit} 在本指南中,您将学习如何使用 [Streamlit](https://streamlit.io/) 构建一个基于 Web 的 AI 代理,它可以通过 [ClickHouse 的 MCP Server](https://github.com/ClickHouse/mcp-clickhouse) 和 [Agno](https://github.com/agno-agi/agno) 与 [ClickHouse 的 SQL playground](https://sql.clickhouse.com/) 进行交互。 @@ -21,8 +19,6 @@ doc_type: 'guide' 您可以在 [示例仓库](https://github.com/ClickHouse/examples/tree/main/ai/mcp/streamlit) 中找到该示例的源代码。 ::: - - ## 前置条件 {#prerequisites} - 您需要在系统上安装 Python。 @@ -33,7 +29,6 @@ doc_type: 'guide' - ## 安装库 {#install-libraries} 通过运行以下命令来安装所需的库: @@ -42,7 +37,6 @@ doc_type: 'guide' pip install streamlit agno ipywidgets ``` - ## 创建工具文件 {#create-utilities} 创建一个名为 `utils.py` 的文件,其中包含两个工具函数。第一个是一个用于处理来自 Agno 代理的流式响应的异步函数生成器,第二个是一个用于为 Streamlit 应用程序设置样式的函数: @@ -68,7 +62,6 @@ def apply_styles():
""", unsafe_allow_html=True) ``` - ## 设置凭证 {#setup-credentials} 将 Anthropic API 密钥设置为环境变量: @@ -82,7 +75,6 @@ export ANTHROPIC_API_KEY="your_api_key_here" 可以在 [Agno「Integrations(集成)」文档](https://docs.agentops.ai/v2/integrations/ag2) 中找到配置凭据的相关说明。 ::: - ## 导入所需的库 {#import-libraries} 首先创建主 Streamlit 应用程序文件(例如 `app.py`),并添加如下导入: @@ -107,7 +99,6 @@ import threading from queue import Queue ``` - ## 定义代理的流式函数 {#define-agent-function} 添加主代理函数,该函数连接到 [ClickHouse 的 SQL Playground](https://sql.clickhouse.com/),并以流式方式输出响应: @@ -158,7 +149,6 @@ async def stream_clickhouse_agent(message): yield chunk.content ``` - ## 添加同步包装函数 {#add-wrapper-functions} 在 Streamlit 中添加用于处理异步流式传输的帮助函数: @@ -181,7 +171,6 @@ async def _agent_stream_to_queue(message, queue): queue.put(chunk) ``` - ## 创建 Streamlit 界面 {#create-interface} 添加 Streamlit 界面组件和聊天功能: @@ -211,7 +200,6 @@ if prompt := st.chat_input("有什么可以帮您?"): st.session_state.messages.append({"role": "assistant", "content": response}) ``` - ## 运行应用程序 {#run-application} 要启动您的 ClickHouse AI 代理 Web 应用程序,请在终端中运行以下命令: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md index 47ceaae8b8a..d3f645d8c50 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/AI_ML/data-exploration/jupyter-notebook.md @@ -18,7 +18,6 @@ import image_7 from '@site/static/images/use-cases/AI_ML/jupyter/7.png'; import image_8 from '@site/static/images/use-cases/AI_ML/jupyter/8.png'; import image_9 from '@site/static/images/use-cases/AI_ML/jupyter/9.png'; - # 使用 Jupyter Notebook 和 chDB 探索数据 {#exploring-data-with-jupyter-notebooks-and-chdb} 在本指南中,您将学习如何借助 [chDB](/chdb)(一个由 ClickHouse 驱动的快速进程内 SQL OLAP 引擎),在 Jupyter Notebook 中探索 ClickHouse Cloud 上的数据集。 @@ -112,7 +111,6 @@ result = chdb.query("SELECT '你好,ClickHouse!' as message") print(result) ``` - ## 探索数据 {#exploring-the-data} 在已经完成 UK price paid 数据集的配置,并在 Jupyter notebook 中成功运行 chDB 之后,我们现在可以开始探索这些数据。 @@ -237,7 +235,6 @@ df_2 = chdb.query(query, "DataFrame") df_2.head() ``` -
在一步中从多个数据源读取 你也可以在一步中从多个数据源读取。可以使用下面带有 `JOIN` 的查询来实现: @@ -323,7 +320,6 @@ plt.show() 2012 年之后增速显著加快,从约 £400,000 急剧上升,到 2019 年超过 £1,000,000。 与销售量不同,价格几乎未受到 2008 年危机的影响,并一直保持上升趋势。真是惊人! - ## 总结 {#summary} 本指南演示了如何通过 chDB 将 ClickHouse Cloud 与本地数据源连接起来,在 Jupyter Notebook 中实现无缝的数据探索。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md index b6b00ecc138..88bb24a6c2e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/glue_catalog.md @@ -20,7 +20,6 @@ ClickHouse 支持与多个目录集成(Unity、Glue、Polaris 等)。在本 Glue 支持多种不同的表格式,但此集成仅支持 Iceberg 表。 ::: - ## 在 AWS 中配置 Glue {#configuring} 要连接到 Glue 数据目录,您需要确定目录所在的区域,并提供访问密钥和秘密访问密钥。 @@ -43,7 +42,6 @@ SETTINGS aws_secret_access_key = '' ``` - ## 使用 ClickHouse 查询 Glue 数据目录 {#query-glue-catalog} 现在连接已经建立,可以开始查询 Glue 了: @@ -81,7 +79,6 @@ SELECT count(*) FROM `iceberg-benchmark.hitsiceberg`; SHOW CREATE TABLE `iceberg-benchmark.hitsiceberg`; ``` - ```sql title="Response" ┌─statement───────────────────────────────────────────────┐ 1.│ CREATE TABLE glue.`iceberg-benchmark.hitsiceberg` │ diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md index 65cfe18c4a7..0e2626b5069 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/lakekeeper_catalog.md @@ -32,7 +32,6 @@ Lakekeeper 是一个面向 Apache Iceberg 的开源 REST catalog 实现,提供 `SET allow_experimental_database_iceberg = 1;` ::: - ## 本地开发环境设置 {#local-development-setup} 在进行本地开发和测试时,你可以使用容器化的 Lakekeeper 环境。此方式非常适合用于学习、原型验证和开发环境。 @@ -230,7 +229,6 @@ docker-compose logs -f Lakekeeper 的部署要求必须先将样例数据加载到 Iceberg 表中。请确保在通过 ClickHouse 查询这些表之前,环境中已经创建并填充好这些表。表是否可用取决于具体的 docker-compose 配置和样例数据加载脚本。 ::: - ### 连接到本地 Lakekeeper 目录 {#connecting-to-local-lakekeeper-catalog} 连接到 ClickHouse 容器: @@ -249,7 +247,6 @@ ENGINE = DataLakeCatalog('http://lakekeeper:8181/catalog', 'minio', 'ClickHouse_ SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/warehouse-rest', warehouse = 'demo' ``` - ## 使用 ClickHouse 查询 Lakekeeper 目录表 {#querying-lakekeeper-catalog-tables-using-clickhouse} 现在连接已经建立,你可以开始通过 Lakekeeper 目录来查询数据。例如: @@ -333,7 +330,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## 将数据湖中的数据加载到 ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} 如需将 Lakekeeper 目录中的数据加载到 ClickHouse,请先创建一个本地 ClickHouse 表: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md index 6e9432ea5a1..e1ab5a7a96a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/nessie_catalog.md @@ -35,7 +35,6 @@ Nessie 是一个面向数据湖的开源事务型 catalog,提供: `SET allow_experimental_database_iceberg = 1;` ::: - ## 本地开发环境设置 {#local-development-setup} 在进行本地开发和测试时,你可以使用容器化的 Nessie 环境。此方式非常适合用于学习、原型验证以及开发环境。 @@ -149,7 +148,6 @@ docker-compose logs -f Nessie 设置使用基于内存的版本存储,并要求先将示例数据加载到 Iceberg 表中。请确保在通过 ClickHouse 查询这些表之前,环境中已经创建并填充好这些表。 ::: - ### 连接到本地 Nessie Catalog {#connecting-to-local-nessie-catalog} 连接到 ClickHouse 容器: @@ -168,7 +166,6 @@ ENGINE = DataLakeCatalog('http://nessie:19120/iceberg', 'admin', 'password') SETTINGS catalog_type = 'rest', storage_endpoint = 'http://minio:9002/my-bucket', warehouse = 'warehouse' ``` - ## 使用 ClickHouse 查询 Nessie 目录表 {#querying-nessie-catalog-tables-using-clickhouse} 现在连接已就绪,您可以开始通过 Nessie 目录执行查询。例如: @@ -252,7 +249,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## 将数据湖中的数据加载到 ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} 如果需要将 Nessie 目录中的数据加载到 ClickHouse,请首先创建一个本地 ClickHouse 表: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md index 646c7b13f65..81bc6033879 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/onelake_catalog.md @@ -23,7 +23,6 @@ Microsoft OneLake 的 lakehouse 支持多种表格式。借助 ClickHouse,你 `SET allow_database_iceberg = 1;` ::: - ## 收集 OneLake 所需信息 {#gathering-requirements} 在 Microsoft Fabric 中查询数据表之前,你需要收集以下信息: @@ -43,7 +42,6 @@ Microsoft OneLake 的 lakehouse 支持多种表格式。借助 ClickHouse,你 SET allow_database_iceberg=1 ``` - ### 连接 OneLake {#connect-onelake} ```sql @@ -59,7 +57,6 @@ onelake_client_id = '', onelake_client_secret = '' ``` - ## 使用 ClickHouse 查询 OneLake {#querying-onelake-using-clickhouse} 连接已建立后,您就可以开始查询 OneLake 了: @@ -120,7 +117,6 @@ source_file: green_tripdata_2017-05.parquet 要查看该表的 DDL: - ```sql SHOW CREATE TABLE onelake_catalog.`year_2017.green_tripdata_2017` @@ -155,7 +151,6 @@ Query id: 8bd5bd8e-83be-453e-9a88-32de12ba7f24 └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## 将数据湖中的数据导入 ClickHouse {#loading-data-from-onelake-into-clickhouse} 如果您需要从 OneLake 向 ClickHouse 导入数据: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md index 6aa788fccf8..67a80012179 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/rest_catalog.md @@ -32,7 +32,6 @@ REST Catalog 是针对 Iceberg catalog 的标准化 API 规范,已被多种平 `SET allow_experimental_database_iceberg = 1;` ::: - ## 本地开发环境设置 {#local-development-setup} 在本地开发和测试场景中,你可以使用容器化的 REST 目录(REST catalog)来进行部署配置。此方式非常适合用于学习、原型验证和开发环境。 @@ -89,7 +88,6 @@ docker-compose logs -f REST catalog 的配置要求必须先将示例数据加载到 Iceberg 表中。请确保 Spark 环境已经创建并填充好这些表,然后再通过 ClickHouse 尝试查询它们。表是否可用取决于所使用的具体 docker-compose 配置以及示例数据加载脚本。 ::: - ### 连接到本地 REST Catalog {#connecting-to-local-rest-catalog} 连接到 ClickHouse 容器: @@ -111,7 +109,6 @@ SETTINGS warehouse = 'demo' ``` - ## 使用 ClickHouse 查询 REST 目录表 {#querying-rest-catalog-tables-using-clickhouse} 连接建立完成后,就可以通过 REST 目录开始查询。例如: @@ -195,7 +192,6 @@ SHOW CREATE TABLE `default.taxis`; └───────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ## 将数据湖(Data Lake)中的数据加载到 ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} 如果需要将 REST 目录中的数据加载到 ClickHouse,请先创建一个本地 ClickHouse 表: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md index 7a50f5eb0a3..f87b3cce72b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/data_lake/unity_catalog.md @@ -28,7 +28,6 @@ Databricks 为其湖仓(lakehouse)支持多种数据格式。借助 ClickHou `SET allow_experimental_database_unity_catalog = 1;` ::: - ## 在 Databricks 中配置 Unity {#configuring-unity-in-databricks} 为了允许 ClickHouse 与 Unity Catalog 交互,需要确保已将 Unity Catalog 配置为允许与外部读取方交互。可按照[“Enable external data access to Unity Catalog”](https://docs.databricks.com/aws/en/external-access/admin) 指南进行配置。 @@ -53,7 +52,6 @@ ENGINE = DataLakeCatalog('https://.cloud.databricks.com/api/2.1/un SETTINGS warehouse = 'CATALOG_NAME', catalog_credential = '', catalog_type = 'unity' ``` - ### 读取 Iceberg 表 {#read-iceberg} ```sql @@ -63,7 +61,6 @@ SETTINGS catalog_type = 'rest', catalog_credential = ':); ``` - ## 从数据湖将数据加载到 ClickHouse {#loading-data-from-your-data-lake-into-clickhouse} 如果需要将 Databricks 中的数据加载到 ClickHouse,请先创建一个本地 ClickHouse 表: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md index 74f6f32b2da..6e09038aed4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/grafana.md @@ -19,7 +19,6 @@ import observability_23 from '@site/static/images/use-cases/observability/observ import observability_24 from '@site/static/images/use-cases/observability/observability-24.png'; import Image from '@theme/IdealImage'; - # 使用 Grafana 和 ClickHouse 构建可观测性 {#using-grafana-and-clickhouse-for-observability} Grafana 是 ClickHouse 可观测性数据的首选可视化工具。这是通过 Grafana 官方的 ClickHouse 插件实现的。用户可以按照[此处](/integrations/grafana)的安装说明进行操作。 @@ -55,7 +54,6 @@ SELECT Timestamp as timestamp, Body as body, SeverityText as level, TraceId as t 查询构建器提供了一种简便方式来修改查询,从而避免用户手写 SQL。包括关键词搜索在内的筛选操作都可以在查询构建器中完成。希望编写更复杂查询的用户可以切换到 SQL 编辑器。只要返回了合适的列,并且在 Query Type 中选择了 `logs`,结果就会以日志的形式呈现。用于日志渲染的必需列列在[此处](https://grafana.com/developers/plugin-tools/tutorials/build-a-logs-data-source-plugin#logs-data-frame-format)。 - ### 从日志跳转到 Trace {#logs-to-traces} 如果日志中包含 trace ID,用户可以从特定的日志行导航到对应的 trace 进行查看。 @@ -85,7 +83,6 @@ WHERE ( Timestamp >= $__fromTime AND Timestamp <= $__toTime ) 如需编写更复杂的查询,用户可以切换到 `SQL 编辑器`。 - ### 查看 Trace 详情 {#view-trace-details} 如上所示,Trace ID 会显示为可点击的链接。点击某个 Trace ID 后,用户可以通过 `View Trace` 链接查看关联的 span。系统会执行如下查询(假设使用 OTel 列),以所需结构检索这些 span,并将结果以瀑布图形式呈现。 @@ -120,7 +117,6 @@ LIMIT 1000 - ### Trace 到日志 {#traces-to-logs} 如果日志中包含 trace ID,用户可以从某个 trace 跳转到其关联的日志。要查看日志,单击某个 trace ID 并选择 `View Logs`。在使用默认 OTel 列的情况下,将会执行如下查询。 @@ -135,7 +131,6 @@ ORDER BY timestamp ASC LIMIT 1000 - ## 仪表盘 {#dashboards} 用户可以在 Grafana 中使用 ClickHouse 数据源构建仪表盘。建议参考 Grafana 与 ClickHouse 的[数据源文档](https://github.com/grafana/clickhouse-datasource)以获取更多详细信息,尤其是其中关于[宏](https://github.com/grafana/clickhouse-datasource?tab=readme-ov-file#macros)和[变量](https://grafana.com/docs/grafana/latest/dashboards/variables/)的内容。 @@ -165,7 +160,6 @@ LIMIT 100000 - ### 多折线图 {#multi-line-charts} 当查询满足以下条件时,会自动渲染为多折线图: @@ -191,7 +185,6 @@ LIMIT 100000 - ### 可视化地理数据 {#visualizing-geo-data} 在前文中,我们已经探讨了如何使用 IP 字典为可观测性数据补充地理坐标。假设已经有 `latitude` 和 `longitude` 列,就可以使用 `geohashEncode` 函数来对可观测性数据进行可视化。该函数生成的地理哈希与 Grafana Geo Map 图表兼容。下面展示了一个示例查询和可视化结果: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md index 3ba6fea2a7c..7d306e445d3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/integrating-opentelemetry.md @@ -16,7 +16,6 @@ import observability_8 from '@site/static/images/use-cases/observability/observa import observability_9 from '@site/static/images/use-cases/observability/observability-9.png'; import Image from '@theme/IdealImage'; - # 集成 OpenTelemetry 进行数据采集 {#integrating-opentelemetry-for-data-collection} 任何可观测性解决方案都需要一种方式来采集并导出日志和追踪(traces)。为此,ClickHouse 推荐使用 [OpenTelemetry(OTel)项目](https://opentelemetry.io/)。 @@ -112,7 +111,6 @@ Collector 提供了两个用于收集日志的主要 receiver: 我们建议用户尽可能采用结构化日志,并使用 JSON 格式记录日志(例如 ndjson)。这将简化后续对日志所需的处理,无论是在发送到 ClickHouse 之前使用 [Collector 处理器](https://opentelemetry.io/docs/collector/configuration/#processors),还是在写入时通过物化视图进行处理。结构化日志最终将节省后续处理所需的资源,从而降低 ClickHouse 部署中的 CPU 需求。 - ### 示例 {#example} 作为示例,我们提供了一个结构化(JSON)和一个非结构化的日志数据集,每个大约包含 1,000 万行,可通过以下链接获取: @@ -166,7 +164,6 @@ service: 如果使用结构化日志,输出的消息将具有如下形式: - ```response LogRecord #98 ObservedTimestamp: 2024-06-19 13:21:16.414259 +0000 UTC @@ -206,7 +203,6 @@ Operators 是日志处理的最基本单元。每个 operator 只负责一项职 对于需要采集本地或 Kubernetes 日志文件的用户,我们建议熟悉 [filelog receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/filelogreceiver/README.md#configuration) 提供的配置选项,以及[偏移量(offset)](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#offset-tracking)管理和[多行日志解析的处理方式](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver#example---multiline-logs-parsing)。 - ## 收集 Kubernetes 日志 {#collecting-kubernetes-logs} 对于收集 Kubernetes 日志,我们建议参考 [OpenTelemetry 的 Kubernetes 指南](https://opentelemetry.io/docs/kubernetes/)。建议使用 [Kubernetes Attributes Processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) 来为日志和指标补充 pod(容器组)元数据。这样可以生成动态元数据(例如标签),并将其存储在 `ResourceAttributes` 列中。ClickHouse 当前为该列使用 `Map(String, String)` 类型。关于如何处理和优化此类型,请参阅 [使用 Map](/use-cases/observability/schema-design#using-maps) 和 [从 Map 中提取](/use-cases/observability/schema-design#extracting-from-maps)。 @@ -279,7 +275,6 @@ Attributes: trace 消息的完整 schema 可以在[这里](https://opentelemetry.io/docs/concepts/signals/traces/)找到。我们强烈建议用户充分熟悉这一 schema。 - ## 处理:过滤、转换和丰富 {#processing---filtering-transforming-and-enriching} 如前面设置日志事件时间戳的示例所示,用户通常需要对事件消息进行过滤、转换和丰富。这可以通过 OpenTelemetry 中的多种功能来实现: @@ -340,7 +335,6 @@ service: ./otelcol-contrib --config config-unstructured-logs-with-processor.yaml ``` - ## 导出到 ClickHouse {#exporting-to-clickhouse} Exporter 会将数据发送到一个或多个后端或目标。Exporter 可以是拉取式或推送式。要将事件发送到 ClickHouse,用户需要使用推送式的 [ClickHouse exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md)。 @@ -403,7 +397,6 @@ service: 请注意以下关键设置: - * **pipelines** - 上述配置重点展示了对 [pipelines](https://opentelemetry.io/docs/collector/configuration/#pipelines) 的使用,它由一组 receivers、processors 和 exporters 组成,并分别为 logs 和 traces 定义了一个 pipeline。 * **endpoint** - 与 ClickHouse 的通信通过 `endpoint` 参数进行配置。连接字符串 `tcp://localhost:9000?dial_timeout=10s&compress=lz4&async_insert=1` 指定通过 TCP 进行通信。如果用户出于流量切换等原因更偏好使用 HTTP,请按照[此处](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options)的说明修改该连接字符串。完整的连接细节(包括在连接字符串中指定用户名和密码的功能)在[这里](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/clickhouseexporter/README.md#configuration-options)有详细描述。 @@ -431,7 +424,6 @@ $GOBIN/telemetrygen traces --otlp-insecure --traces 300 运行后,使用一个简单查询确认已存在日志事件: - ```sql SELECT * FROM otel_logs @@ -492,7 +484,6 @@ Links.TraceState: [] Links.Attributes: [] ``` - ## 开箱即用的 schema {#out-of-the-box-schema} 默认情况下,ClickHouse exporter 会为 logs 和 traces 分别创建目标表。可以通过设置 `create_schema` 来禁用此行为。此外,可以通过上述设置,将 logs 和 traces 表的名称从默认的 `otel_logs` 和 `otel_traces` 修改为其他名称。 @@ -541,7 +532,6 @@ SETTINGS ttl_only_drop_parts = 1 关于此 schema,有几点重要说明: - - 默认情况下,表通过 `PARTITION BY toDate(Timestamp)` 按日期进行分区。这样可以高效地删除过期数据。 - TTL 通过 `TTL toDateTime(Timestamp) + toIntervalDay(3)` 设置,并与在 collector 配置中设置的值相对应。[`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) 表示仅在某个数据分片内所有行都已过期时才会删除整个分片。相比在分片内部逐行删除(会触发代价高昂的删除操作),这种方式更加高效。我们建议始终启用该设置。更多细节请参见 [Data management with TTL](/observability/managing-data#data-management-with-ttl-time-to-live)。 - 表使用经典的 [`MergeTree` engine](/engines/table-engines/mergetree-family/mergetree)。这对于日志和 trace 是推荐的选择,通常不需要更改。 @@ -594,7 +584,6 @@ SETTINGS ttl_only_drop_parts = 1 我们建议用户禁用自动创建 schema 的功能,改为手动创建表。这样可以修改主键和辅助键,并可根据需要增加额外列以优化查询性能。更多详情参见 [Schema design](/use-cases/observability/schema-design)。 - ## 优化写入 {#optimizing-inserts} 为了在获得强一致性保证的同时实现高写入性能,用户在通过 OTel collector 向 ClickHouse 插入可观测性数据时,应遵循一些简单的规则。只要正确配置 OTel collector,遵循以下规则就会变得很简单。这也有助于避免用户在首次使用 ClickHouse 时遇到的一些[常见问题](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse)。 @@ -693,7 +682,6 @@ service: exporters: [otlp] ``` - [clickhouse-gateway-config.yaml](https://www.otelbin.io/#config=receivers%3A*N__otlp%3A*N____protocols%3A*N____grpc%3A*N____endpoint%3A_0.0.0.0%3A4317*N*Nprocessors%3A*N__batch%3A*N____timeout%3A_5s*N____send*_batch*_size%3A_10000*N*Nexporters%3A*N__clickhouse%3A*N____endpoint%3A_tcp%3A%2F%2Flocalhost%3A9000*Qdial*_timeout*E10s*Acompress*Elz4*N____ttl%3A_96h*N____traces*_table*_name%3A_otel*_traces*N____logs*_table*_name%3A_otel*_logs*N____create*_schema%3A_true*N____timeout%3A_10s*N____database%3A_default*N____sending*_queue%3A*N____queue*_size%3A_10000*N____retry*_on*_failure%3A*N____enabled%3A_true*N____initial*_interval%3A_5s*N____max*_interval%3A_30s*N____max*_elapsed*_time%3A_300s*N*Nservice%3A*N__pipelines%3A*N____logs%3A*N______receivers%3A_%5Botlp%5D*N______processors%3A_%5Bbatch%5D*N______exporters%3A_%5Bclickhouse%5D%7E\&distro=otelcol-contrib%7E\&distroVersion=v0.103.1%7E) ```yaml @@ -741,7 +729,6 @@ service: 关于如何管理更大规模的网关型架构及其相关经验总结,我们推荐阅读这篇[博客文章](https://clickhouse.com/blog/building-a-logging-platform-with-clickhouse-and-saving-millions-over-datadog)。 - ### 添加 Kafka {#adding-kafka} 读者可能已经注意到,上述架构并未使用 Kafka 作为消息队列。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md index c05a16d6448..07cbb76e5c6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/introduction.md @@ -11,7 +11,6 @@ import observability_1 from '@site/static/images/use-cases/observability/observa import observability_2 from '@site/static/images/use-cases/observability/observability-2.png'; import Image from '@theme/IdealImage'; - # 使用 ClickHouse 实现可观测性 {#using-clickhouse-for-observability} ## 引言 {#introduction} @@ -86,7 +85,6 @@ import Image from '@theme/IdealImage'; 虽然 ClickHouse 可以用于存储 metrics 数据,但在 ClickHouse 中,这一支柱目前尚不够成熟,对 Prometheus 数据格式和 PromQL 等特性的支持仍在完善中。 ::: - ### 分布式追踪 {#distributed-tracing} 分布式追踪是可观测性中的关键特性。分布式追踪(通常简称为 trace)用于描绘请求在系统中的完整路径。请求源自终端用户或应用程序,并在系统中扩散,通常表现为在各个微服务之间的一系列操作流转。通过记录这一序列,并支持对后续事件进行关联,可观测性平台用户或 SRE 能够在不受架构复杂度或是否采用无服务器架构影响的情况下,对应用流程中的问题进行诊断。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md index 55d675061f6..89c7e773a38 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/managing-data.md @@ -10,7 +10,6 @@ doc_type: 'guide' import observability_14 from '@site/static/images/use-cases/observability/observability-14.png'; import Image from '@theme/IdealImage'; - # 管理数据 {#managing-data} 为可观测性部署 ClickHouse 通常会涉及大规模数据集,而这些数据集需要妥善管理。ClickHouse 提供了多种功能来协助进行数据管理。 @@ -78,7 +77,6 @@ WHERE `table` = 'otel_logs' 我们可能还会有一张名为 `otel_logs_archive` 的表,用于存储较旧的数据。可以通过分区将数据高效地移动到该表(这只是元数据层面的变更)。 - ```sql CREATE TABLE otel_logs_archive AS otel_logs --将数据移至归档表 @@ -145,7 +143,6 @@ ORDER BY c DESC 当使用设置 [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts) 时,TTL 会使用此特性。有关更多详细信息,请参阅 [基于 TTL(生存时间)的数据管理](#data-management-with-ttl-time-to-live)。 ::: - ### 应用场景 {#applications} 上文展示了如何按分区高效地迁移和处理数据。在实际使用中,在可观测性场景下,用户最常利用分区操作的两种场景是: @@ -193,7 +190,6 @@ TTL 不是立即应用,而是按计划执行,如上所述。MergeTree 表设 **重要:我们推荐使用设置 [`ttl_only_drop_parts=1`](/operations/settings/merge-tree-settings#ttl_only_drop_parts)**(默认模式中已应用)。启用该设置后,当一个数据分片中的所有行都已过期时,ClickHouse 会直接删除整个分片。相比于在 `ttl_only_drop_parts=0` 时通过资源消耗较大的 mutation 对 TTL 过期的行进行部分清理,删除整个分片可以缩短 `merge_with_ttl_timeout` 的时间,并降低对系统性能的影响。如果数据按与执行 TTL 过期相同的时间单位进行分区,例如按天分区,那么各个分片自然只会包含该时间区间的数据。这将确保可以高效地应用 `ttl_only_drop_parts=1`。 - ### 列级 TTL {#column-level-ttl} 上面的示例是在表级别设置数据过期。用户也可以在列级别设置数据过期。随着数据变旧,可以用这种方式删除那些在排障或分析中价值不足以抵消其保留成本的列。例如,我们建议保留 `Body` 列,以防新增的动态元数据在插入时尚未被提取出来,比如一个新的 Kubernetes 标签。在经过一段时间(例如 1 个月)后,如果显然这些附加元数据并没有带来实际价值,那么继续保留 `Body` 列的意义就有限了。 @@ -215,7 +211,6 @@ ORDER BY (ServiceName, Timestamp) 指定列级 TTL 时,用户需要自行定义表结构(schema)。这一点无法通过 OTel collector 配置。 ::: - ## 重新压缩数据 {#recompressing-data} 虽然我们通常建议对可观测性数据集使用 `ZSTD(1)`,但用户可以尝试不同的压缩算法或更高的压缩级别,例如 `ZSTD(3)`。除了在创建 schema 时进行指定外,还可以配置在经过一段预设时间后更改压缩方式。如果某种编解码器或压缩算法能带来更好的压缩率,但会导致较差的查询性能,那么这种配置可能是合适的。对于较旧的数据,由于查询不那么频繁,这种权衡是可以接受的;但对于近期数据,由于更频繁地在排障和调查中被使用,则通常不可接受。 @@ -254,7 +249,6 @@ TTL Timestamp + INTERVAL 4 DAY RECOMPRESS CODEC(ZSTD(3)) 有关配置 TTL 的更多详细信息和示例,请参见[此处](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes)。关于如何为表和列添加和修改 TTL 的示例,请参见[此处](/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl)。关于 TTL 如何支持诸如冷热分层架构等存储层级,请参见[存储层级](#storage-tiers)。 - ## 存储分层 {#storage-tiers} 在 ClickHouse 中,用户可以在不同磁盘上创建存储分层,例如将热数据或近期数据存放在 SSD 上,而将较旧的数据存放在由 S3 支持的存储上。此架构可以让旧数据使用成本更低的存储,而这些数据由于在排障调查中使用频率较低,其查询 SLA 通常可以更宽松。 @@ -351,7 +345,6 @@ LIMIT 5 为了确保今后所有数据都会插入该值,我们可以按下面所示使用 `ALTER TABLE` 语法来修改我们的物化视图: - ```sql ALTER TABLE otel_logs_mv MODIFY QUERY @@ -378,7 +371,6 @@ FROM otel_logs 之后插入的行会在写入时自动填充 `Size` 列。 - ### 创建新表 {#create-new-tables} 作为上述流程的另一种选择,用户可以直接使用新的 schema 创建一个新的目标表。然后,可以通过前面介绍的 `ALTER TABLE MODIFY QUERY` 修改任意物化视图以使用该新表。采用这种方式,用户可以为表进行版本管理,例如 `otel_logs_v3`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md index f9f84d6b79a..0ffd7848bd2 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/build-your-own/schema-design.md @@ -13,7 +13,6 @@ import observability_12 from '@site/static/images/use-cases/observability/observ import observability_13 from '@site/static/images/use-cases/observability/observability-13.png'; import Image from '@theme/IdealImage'; - # 为可观测性设计 schema {#designing-a-schema-for-observability} 我们建议用户始终为日志和追踪数据创建自己的 schema,原因如下: @@ -82,7 +81,6 @@ LIMIT 5 我们通常建议用户在 ClickHouse 中对结构化日志进行 JSON 解析。我们有信心 ClickHouse 提供了最快的 JSON 解析实现。不过,我们也意识到,有些用户可能希望将日志发送到其他系统,而不希望将这部分逻辑写在 SQL 中。 ::: - ```sql SELECT path(JSONExtractString(Body, 'request_path')) AS path, count() AS c FROM otel_logs @@ -156,7 +154,6 @@ LIMIT 5 用户也可以按[此处](/observability/integrating-opentelemetry#processing---filtering-transforming-and-enriching)所述,使用 OTel Collector 的 processors 和 operators 进行处理。在大多数情况下,用户会发现 ClickHouse 在资源利用率和速度方面都明显优于 collector 的 processors。使用 SQL 执行所有事件处理的主要缺点是会将你的解决方案与 ClickHouse 紧密耦合。例如,用户可能希望从 OTel collector 将已处理日志发送到其他目的地,例如 S3。 ::: - ### 物化列 {#materialized-columns} 物化列是从其他列中提取结构化信息的最简单方案。这类列的值始终在插入时计算,且不能在 INSERT 查询中显式指定。 @@ -224,7 +221,6 @@ Peak memory usage: 3.16 MiB. 默认情况下,物化列不会包含在 `SELECT *` 的返回结果中。这样可以保持这样一个不变量:`SELECT *` 的结果始终可以通过 INSERT 语句原样插回同一张表。可以通过将 `asterisk_include_materialized_columns` 设置为 1 来关闭此行为,也可以在 Grafana 中启用该设置(参见数据源配置中的 `Additional Settings -> Custom Settings`)。 ::: - ## 物化视图 {#materialized-views} [物化视图](/materialized-views) 提供了一种更强大的方式,用于对日志和追踪应用 SQL 过滤和转换。 @@ -268,7 +264,6 @@ Null 表引擎是一种强大的优化手段——可以把它类比为 `/dev/nu 来看下面的查询。它将我们的行转换为希望保留的格式,从 `LogAttributes` 中提取所有列(我们假设这是由采集器使用 `json_parser` 算子设置的),并设置 `SeverityText` 和 `SeverityNumber`(基于一些简单条件以及对[这些列](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext)的定义)。在这个例子中,我们还仅选择那些我们确定会被填充的列——忽略诸如 `TraceId`、`SpanId` 和 `TraceFlags` 等列。 - ```sql SELECT Body, @@ -354,7 +349,6 @@ ORDER BY (ServiceName, Timestamp) 请注意,我们在这里对 schema(模式)进行了大幅调整。实际场景中,用户很可能还会有希望保留的 Trace 列,以及 `ResourceAttributes` 列(通常包含 Kubernetes 元数据)。Grafana 可以利用这些 Trace 列在日志与 Trace 之间提供跳转/关联功能——参见 ["Using Grafana"](/observability/grafana)。 ::: - 下面,我们创建一个物化视图 `otel_logs_mv`,它对 `otel_logs` 表执行上述 SELECT 查询,并将结果写入 `otel_logs_v2`。 ```sql @@ -417,7 +411,6 @@ SeverityNumber: 9 下面展示了一个等效的物化视图,它通过使用 JSON 函数从 `Body` 列中解析并提取各个字段: - ```sql CREATE MATERIALIZED VIEW otel_logs_mv TO otel_logs_v2 AS SELECT Body, @@ -440,7 +433,6 @@ SELECT Body, FROM otel_logs ``` - ### 注意类型 {#beware-types} 上面的物化视图依赖于隐式类型转换——尤其是在使用 `LogAttributes` 映射时。ClickHouse 通常会透明地将提取的值转换为目标表的类型,从而简化语法。不过,我们建议用户始终通过将视图中的 `SELECT` 语句与一个使用相同 schema 的目标表上的 [`INSERT INTO`](/sql-reference/statements/insert-into) 语句配合使用来测试视图。这样可以确认类型是否被正确处理。以下情况需要特别注意: @@ -495,7 +487,6 @@ groupArrayDistinctArray(mapKeys(LogAttributes)): ['remote_user','run_time','requ 我们不建议在 Map 列名中使用点号,将来可能会取消对此的支持。请改用 `_`。 ::: - ## 使用别名 {#using-aliases} 查询 `map` 类型列比查询普通列要慢——参见 ["加速查询"](#accelerating-queries)。此外,其语法更为复杂,用户书写起来也较为繁琐。为了解决这一问题,我们建议使用别名(ALIAS)列。 @@ -573,7 +564,6 @@ LIMIT 5 默认情况下,`SELECT *` 会排除 ALIAS 列。可以通过将 `asterisk_include_alias_columns` 设置为 `1` 来关闭此行为。 ::: - ## 优化类型 {#optimizing-types} 关于类型优化的 [ClickHouse 通用最佳实践](/data-modeling/schema-design#optimizing-types) 同样适用于本 ClickHouse 场景。 @@ -694,7 +684,6 @@ LIMIT 4; 4 行数据。耗时: 0.259 秒。 ``` - :::note 上面的查询内容较多。感兴趣的读者可以查看这篇精彩的[说明](https://clickhouse.com/blog/geolocating-ips-in-clickhouse-and-grafana#using-bit-functions-to-convert-ip-ranges-to-cidr-notation)。否则,只需知道上述查询会为一个 IP 范围计算出 CIDR 即可。 ::: @@ -775,7 +764,6 @@ SELECT dictGet('ip_trie', ('country_code', 'latitude', 'longitude'), CAST('85.24 回到我们原始的日志数据集,我们可以利用上述结果按国家对日志进行聚合。下面的示例假设我们使用的是之前物化视图生成的 schema,其中包含提取出的 `RemoteAddress` 列。 - ```sql SELECT dictGet('ip_trie', 'country_code', tuple(RemoteAddress)) AS country, formatReadableQuantity(count()) AS num_requests @@ -833,7 +821,6 @@ ORDER BY (ServiceName, Timestamp) 上述国家和坐标信息不仅支持按国家进行分组和过滤,还提供了更丰富的可视化能力。可参考 ["可视化地理数据"](/observability/grafana#visualizing-geo-data) 获取灵感。 - ### 使用正则表达式字典(User-Agent 解析) {#using-regex-dictionaries-user-agent-parsing} [User agent 字符串](https://en.wikipedia.org/wiki/User_agent) 的解析是一个经典的正则表达式问题,也是基于日志和追踪数据集中的常见需求。ClickHouse 通过正则表达式树字典(Regular Expression Tree Dictionaries)高效解析 user agent。 @@ -929,7 +916,6 @@ LAYOUT(regexp_tree); 在加载了这些字典之后,我们可以提供一个示例 user-agent 字符串,并测试我们新的字典提取能力: - ```sql WITH 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:127.0) Gecko/20100101 Firefox/127.0' AS user_agent SELECT @@ -1006,7 +992,6 @@ ORDER BY (ServiceName, Timestamp, Status) 在重启 collector 并按照前文所述步骤摄取结构化日志之后,我们就可以对新提取出的 Device、Browser 和 Os 列进行查询了。 - ```sql SELECT Device, Browser, Os FROM otel_logs_v2 @@ -1024,7 +1009,6 @@ Os: ('Other','0','0','0') 请注意这些 user agent 列中对 Tuple 的使用。对于层级结构在预先已知的复杂结构,推荐使用 Tuple。子列在提供对异构类型支持的同时,能够保持与常规列相同的性能(不同于 Map 的键)。 ::: - ### 延伸阅读 {#further-reading} 如需查看更多字典示例和详细信息,我们推荐阅读以下文章: @@ -1111,7 +1095,6 @@ FINAL 1 row in set. Elapsed: 0.039 sec. ``` - 通过存储查询结果,我们实际上将这里的行数从 1000 万(`otel_logs` 中)减少到了 113。关键在于,当有新的日志插入到 `otel_logs` 表时,其所属小时的最新数值会写入 `bytes_per_hour`,并在后台自动异步合并——由于我们每小时只保留一行,因此 `bytes_per_hour` 始终既数据量小又保持最新。 由于行合并是异步进行的,当用户查询时,每小时可能会存在多行。为了确保在查询时将所有尚未合并的行合并,我们有两个选项: @@ -1165,7 +1148,6 @@ LIMIT 5 在更大的数据集上执行更复杂的查询时,这种收益会更加显著。示例请参见[这里](https://github.com/ClickHouse/clickpy)。 ::: - #### 更复杂的示例 {#a-more-complex-example} 上面的示例使用 [SummingMergeTree](/engines/table-engines/mergetree-family/summingmergetree) 对每小时的简单计数进行聚合。若要获取超出简单求和的统计信息,则需要使用不同的目标表引擎:[AggregatingMergeTree](/engines/table-engines/mergetree-family/aggregatingmergetree)。 @@ -1244,7 +1226,6 @@ ORDER BY Hour DESC 请注意,这里我们使用 `GROUP BY` 而不是 `FINAL`。 - ### 使用物化视图(增量)实现快速查询 {#using-materialized-views-incremental--for-fast-lookups} 在选择 ClickHouse 排序键时,用户应结合自身的访问模式,将在过滤和聚合子句中经常使用的列包含在排序键中。但在可观测性场景下,这种方式可能会受到限制,因为用户的访问模式更加多样,无法用单一的一组列来概括。默认 OTel schema 中内置的一个示例最能说明这一点。以 traces 的默认 schema 为例: @@ -1316,7 +1297,6 @@ WHERE TraceId != '' GROUP BY TraceId ``` - 该视图可以有效确保表 `otel_traces_trace_id_ts` 记录了每个 trace 的最小和最大时间戳。该表按 `TraceId` 排序,使得这些时间戳可以被高效检索。随后,在查询主表 `otel_traces` 时可以利用这些时间戳范围。更具体地说,当通过 id 检索某个 trace 时,Grafana 会使用以下查询: ```sql @@ -1350,7 +1330,6 @@ LIMIT 1000 同样的方法也可以应用于类似的访问模式。我们在数据建模章节中[这里](/materialized-view/incremental-materialized-view#lookup-table)探讨了一个类似的示例。 - ### 使用投影 {#using-projections} ClickHouse 投影允许用户为表指定多个 `ORDER BY` 子句。 @@ -1460,7 +1439,6 @@ FORMAT `Null` 在上面的示例中,我们在投影中指定了先前查询中使用的列。这意味着只有这些指定的列会作为投影的一部分存储在磁盘上,并按照 Status 排序。或者,如果我们在这里使用 `SELECT *`,则所有列都会被包含在投影中并存储。这样虽然可以让更多查询(使用任意列子集)从该投影中获益,但会带来额外的存储开销。有关磁盘空间占用和压缩情况的衡量,请参见 ["Measuring table size & compression"](#measuring-table-size--compression)。 - ### Secondary/data skipping indices {#secondarydata-skipping-indices} 无论在 ClickHouse 中多么精细地调优主键,某些查询最终仍然仍需要对整张表进行全表扫描。虽然可以通过使用物化视图(以及对某些查询使用 projections)来缓解这一问题,但这需要额外的维护工作,并且要求用户了解这些对象的存在,才能在查询中真正加以利用。传统关系型数据库通常通过二级索引来解决这一问题,但在像 ClickHouse 这样的列式数据库中,这类索引并不高效。取而代之的是,ClickHouse 使用“跳过(Skip)索引”,通过允许数据库跳过包含无匹配值的大块数据,从而显著提升查询性能。 @@ -1616,7 +1594,6 @@ WHERE Referer LIKE '%ultra%' 通常只有当布隆过滤器本身比该列更小时,它才会更快。如果它更大,那么性能收益很可能可以忽略不计。使用以下查询将过滤器的大小与该列进行比较: - ```sql SELECT name, @@ -1654,7 +1631,6 @@ Bloom filter 可能需要进行较为细致的调优。我们建议参考[此处 关于二级跳过索引的更多细节可以在[此处](/optimize/skipping-indexes#skip-index-functions)找到。 - ### 从 Map 中提取数据 {#extracting-from-maps} `Map` 类型在 OTel schema 中非常常见。该类型要求键和值使用相同的数据类型,这对于存储 Kubernetes label 等元数据已经足够。请注意,当查询 `Map` 类型的某个子键时,会加载整个父列。如果该 map 拥有大量键,那么相比将该键单独建成一列,这会导致需要从磁盘读取更多数据,从而带来显著的查询开销。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md index 94866b94a7c..a9ea85b7c8c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/config.md @@ -14,15 +14,14 @@ import hyperdx_26 from '@site/static/images/use-cases/observability/hyperdx-26.p ClickStack 的每个组件都提供如下配置选项: - ## 修改设置 {#modifying-settings} ### Docker {#docker} -如果使用 [All in One](/use-cases/observability/clickstack/deployment/all-in-one)、[HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only) 或 [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only),只需通过环境变量传递所需配置,例如: +如果使用 [All in One](/use-cases/observability/clickstack/deployment/all-in-one)、[HyperDX Only](/use-cases/observability/clickstack/deployment/hyperdx-only) 或 [Local Mode](/use-cases/observability/clickstack/deployment/local-mode-only),只需通过环境变量传递所需设置,例如: ```shell -docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e HYPERDX_LOG_LEVEL='debug' -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` @@ -43,7 +42,6 @@ services: # ... 其他配置 ``` - ### Helm {#helm} #### 自定义配置(可选) {#customizing-values} @@ -97,7 +95,6 @@ ingress: value: abc ``` - ## HyperDX {#hyperdx} ### 数据源设置 {#datasource-settings} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md index 1ac3388ac3b..a3820e7ec0a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/dashboards.md @@ -31,7 +31,6 @@ ClickStack 支持对事件进行可视化,并在 HyperDX 中内置了图表功 可视化可以基于 traces、metrics、logs,或任意用户自定义的宽表事件 schema 创建。 - ## 创建可视化图表 {#creating-visualizations} HyperDX 中的 **Chart Explorer** 界面允许用户在时间维度上可视化指标、追踪和日志,从而轻松创建用于数据分析的快速可视化图表。该界面在创建仪表板时也会复用。下文将演示如何使用 Chart Explorer 创建一个可视化图表的完整流程。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md index 5e7788b7b79..5029ed6aa82 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/all-in-one.md @@ -23,7 +23,6 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo 此选项集成了身份验证功能,支持在不同会话和用户之间持久化仪表板、告警和已保存的搜索。 - ### 适用场景 {#suitable-for} * 演示 @@ -37,38 +36,42 @@ import hyperdx_logs from '@site/static/images/use-cases/observability/hyperdx-lo ### 使用 Docker 部署 {#deploy-with-docker} -以下命令将在端口 4317 和 4318 上运行一个 OpenTelemetry collector,并在端口 8080 上运行 HyperDX UI。 +以下命令将运行一个 OpenTelemetry collector(监听 4317 和 4318 端口)以及 HyperDX UI(监听 8080 端口)。 ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` +:::note 镜像名称更新 +ClickStack 镜像现在发布为 `clickhouse/clickstack-*`(此前为 `docker.hyperdx.io/hyperdx/*`)。 +::: + ### 访问 HyperDX UI {#navigate-to-hyperdx-ui} -访问 [http://localhost:8080](http://localhost:8080) 以打开 HyperDX UI。 +访问 [http://localhost:8080](http://localhost:8080) 即可打开 HyperDX UI。 -创建一个用户,并提供满足要求的用户名和密码。 +创建一个用户,并提供符合要求的用户名和密码。 点击 `Create` 后,将为集成的 ClickHouse 实例创建数据源。 -有关使用其他 ClickHouse 实例的示例,请参阅 [“创建 ClickHouse Cloud 连接”](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)。 +有关使用替代 ClickHouse 实例的示例,请参阅 ["创建 ClickHouse Cloud 连接"](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)。 ### 摄取数据 {#ingest-data} -要摄取数据,请参阅[“摄取数据”](/use-cases/observability/clickstack/ingesting-data)。 +要摄取数据,请参阅 ["摄取数据"](/use-cases/observability/clickstack/ingesting-data)。 ## 持久化数据和设置 {#persisting-data-and-settings} -要在容器重启后保留数据和设置,可以修改上面的 docker 命令,将路径 `/data/db`、`/var/lib/clickhouse` 和 `/var/log/clickhouse-server` 挂载到宿主机上。例如: +为了在容器重启后仍然保留数据和设置,用户可以修改上面的 docker 命令,将路径 `/data/db`、`/var/lib/clickhouse` 和 `/var/log/clickhouse-server` 挂载为卷。例如: ```shell -# 确保目录存在 {#ensure-directories-exist} +# 确保目录存在 mkdir -p .volumes/db .volumes/ch_data .volumes/ch_logs -# 修改命令以挂载路径 {#modify-command-to-mount-paths} +# 修改命令以挂载路径 docker run \ -p 8080:8080 \ -p 4317:4317 \ @@ -76,7 +79,7 @@ docker run \ -v "$(pwd)/.volumes/db:/data/db" \ -v "$(pwd)/.volumes/ch_data:/var/lib/clickhouse" \ -v "$(pwd)/.volumes/ch_logs:/var/log/clickhouse-server" \ - docker.hyperdx.io/hyperdx/hyperdx-all-in-one + clickhouse/clickstack-all-in-one:latest ``` @@ -89,18 +92,18 @@ docker run \ ## 自定义端口 {#customizing-ports-deploy} -如果需要自定义 HyperDX Local 运行时使用的应用程序端口(8080)或 API 端口(8000),需要修改 `docker run` 命令来映射相应端口,并设置一些环境变量。 +如果需要自定义 HyperDX Local 运行时使用的应用端口(8080)或 API 端口(8000),则需要修改 `docker run` 命令以转发相应端口,并设置若干环境变量。 -自定义 OpenTelemetry 端口时,只需修改端口映射参数即可。例如,将 `-p 4318:4318` 替换为 `-p 4999:4318`,即可将 OpenTelemetry HTTP 端口修改为 4999。 +自定义 OpenTelemetry 端口只需修改端口转发参数即可。例如,将 `-p 4318:4318` 替换为 `-p 4999:4318`,即可将 OpenTelemetry 的 HTTP 端口更改为 4999。 ```shell -docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -p 8080:8080 -p 4317:4317 -p 4999:4318 clickhouse/clickstack-all-in-one:latest ``` ## 使用 ClickHouse Cloud {#using-clickhouse-cloud} -此发行版可以配合 ClickHouse Cloud 使用。虽然本地 ClickHouse 实例仍然会被部署(但不会被使用),但可以通过设置环境变量 `CLICKHOUSE_ENDPOINT`、`CLICKHOUSE_USER` 和 `CLICKHOUSE_PASSWORD` 来将 OTel collector 配置为使用 ClickHouse Cloud 实例。 +此发行版可以与 ClickHouse Cloud 搭配使用。虽然本地 ClickHouse 实例仍会被部署(但不会被使用),但可以通过设置环境变量 `CLICKHOUSE_ENDPOINT`、`CLICKHOUSE_USER` 和 `CLICKHOUSE_PASSWORD`,将 OTel collector 配置为连接到 ClickHouse Cloud 实例。 例如: @@ -109,22 +112,22 @@ export CLICKHOUSE_ENDPOINT= export CLICKHOUSE_USER= export CLICKHOUSE_PASSWORD= -docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e CLICKHOUSE_ENDPOINT=${CLICKHOUSE_ENDPOINT} -e CLICKHOUSE_USER=default -e CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD} -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` -`CLICKHOUSE_ENDPOINT` 应为 ClickHouse Cloud 的 HTTPS 端点,并包含端口 `8443`,例如:`https://mxl4k3ul6a.us-east-2.aws.clickhouse.com:8443` +`CLICKHOUSE_ENDPOINT` 应设置为 ClickHouse Cloud 的 HTTPS 端点,并包含端口 `8443`,例如 `https://mxl4k3ul6a.us-east-2.aws.clickhouse.com:8443` -连接到 HyperDX UI 后,转到 [`Team Settings`](http://localhost:8080/team),为你的 ClickHouse Cloud 服务创建一个连接,然后再创建所需的数据源。有关示例流程,请参见[此处](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)。 +连接到 HyperDX UI 后,进入 [`Team Settings`](http://localhost:8080/team),创建到 ClickHouse Cloud 服务的连接,随后添加所需的数据源。示例流程参见[此处](/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)。 ## 配置 OpenTelemetry collector {#configuring-collector} -如有需要,可以修改 OTel collector 的配置;参见[「修改配置」](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration)。 +如有需要,你可以修改 OTel collector 的配置;详情请参阅[《Modifying configuration》](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration)。 例如: ```shell -docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 docker.hyperdx.io/hyperdx/hyperdx-all-in-one +docker run -e OTEL_AGENT_FEATURE_GATE_ARG='--feature-gates=clickhouse.json' -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 -p 4317:4317 -p 4318:4318 clickhouse/clickstack-all-in-one:latest ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md index 0f22edf8522..d0b531f16e0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/docker-compose.md @@ -33,7 +33,6 @@ import JSONSupport from '@site/i18n/zh/docusaurus-plugin-content-docs/current/us 这些端口支持与多种遥测源集成,使 OpenTelemetry collector 能够在生产环境中满足多样化的摄取需求。 - ### 适用场景 {#suitable-for} * 本地测试 @@ -117,7 +116,6 @@ HYPERDX_OPAMP_PORT=4320 HYPERDX_OTEL_EXPORTER_CLICKHOUSE_DATABASE=default ``` - ### 配置 OpenTelemetry collector {#configuring-collector} 如有需要,可以修改 OTel collector 的配置——请参阅[“修改配置”](/use-cases/observability/clickstack/ingesting-data/otel-collector#modifying-otel-collector-configuration)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md index 1f5f4707401..7231c03315b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-cloud.md @@ -34,7 +34,6 @@ helm install my-clickstack clickstack/clickstack \ --set otel.opampServerUrl="http://my-clickstack-clickstack-app.default.svc.cluster.local:4320" ``` - ### 其他 GKE 注意事项 {#other-gke-considerations} ```yaml @@ -53,7 +52,6 @@ clickhouse: - "10.0.0.0/8" # 其他配置的后备选项 ``` - ## Amazon EKS {#amazon-eks} 在 EKS 上进行部署时,可以考虑以下常见配置: @@ -79,7 +77,6 @@ hyperdx: enabled: true ``` - ## Azure AKS {#azure-aks} 适用于 AKS 部署: @@ -97,7 +94,6 @@ clickhouse: - "10.0.0.0/8" ``` - ## 生产环境云部署检查清单 {#production-cloud-deployment-checklist} 在任何云服务商上将 ClickStack 部署到生产环境之前: @@ -127,7 +123,6 @@ hyperdx: memory: 4Gi ``` - ### 高可用性 {#high-availability} ```yaml @@ -148,7 +143,6 @@ hyperdx: topologyKey: kubernetes.io/hostname ``` - ### 持久化存储 {#persistent-storage} 确保持久卷已配置好用于数据保留: @@ -167,7 +161,6 @@ clickhouse: * **EKS**: `gp3` 或 `io2` * **AKS**: `managed-premium` 或 `managed-csi` - ### 浏览器兼容性注意事项 {#browser-compatibility-notes} 对于仅使用 HTTP 的部署(开发/测试环境),某些浏览器可能会因为安全上下文要求而在使用加密 API 时出现错误。对于生产环境部署,请务必通过入口配置使用启用正确 TLS 证书的 HTTPS。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md index 2e0eb9cf19f..41519997862 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-configuration.md @@ -34,14 +34,12 @@ hyperdx: helm upgrade my-clickstack clickstack/clickstack -f values.yaml ``` - ### 方法 2:通过带有 --set 参数的 helm upgrade 命令进行更新 {#api-key-set-flag} ```shell helm upgrade my-clickstack clickstack/clickstack --set hyperdx.apiKey="your-api-key-here" ``` - ### 重启 Pod(容器组)以应用更改 {#restart-pods} 更新 API 密钥后,重启 Pod(容器组),使其加载新配置: @@ -54,7 +52,6 @@ kubectl rollout restart deployment my-clickstack-clickstack-app my-clickstack-cl 该 chart 会使用你的 API key 自动创建一个名为 `-app-secrets` 的 Kubernetes Secret。除非你打算使用外部 Secret,否则无需进行额外的 Secret 配置。 ::: - ## Secret 管理 {#secret-management} 对于 API 密钥或数据库凭据等敏感数据,请使用 Kubernetes Secret 资源进行管理。 @@ -83,7 +80,6 @@ data: kubectl apply -f secrets.yaml ``` - ### 创建自定义 Secret {#creating-a-custom-secret} 手动创建一个自定义的 Kubernetes Secret: @@ -93,7 +89,6 @@ kubectl create secret generic hyperdx-secret \ --from-literal=API_KEY=我的密钥 ``` - ### 在 values.yaml 中引用 Secret {#referencing-a-secret} ```yaml @@ -105,7 +100,6 @@ hyperdx: key: API_KEY ``` - ## 入口配置 {#ingress-setup} 要通过域名对外暴露 HyperDX 的 UI 和 API,请在 `values.yaml` 中启用入口配置。 @@ -124,7 +118,6 @@ hyperdx: `hyperdx.frontendUrl` 应当与入口(Ingress)的主机名匹配,并且包含协议(例如:`https://hyperdx.yourdomain.com`)。这样可以确保所有生成的链接、cookie 和重定向都能正常工作。 ::: - ### 启用 TLS(HTTPS) {#enabling-tls} 要通过 HTTPS 保护你的部署: @@ -149,7 +142,6 @@ hyperdx: tlsSecretName: "hyperdx-tls" ``` - ### 入口配置示例 {#example-ingress-configuration} 供参考,下面是生成的入口资源: @@ -181,7 +173,6 @@ spec: secretName: hyperdx-tls ``` - ### 常见入口问题 {#common-ingress-pitfalls} **路径与重写配置:** @@ -207,7 +198,6 @@ spec: kubectl -n ingress-nginx get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath="{.items[0].spec.containers[0].image}" ``` - ## OTel collector 入口 {#otel-collector-ingress} 如果需要通过入口将 OTel collector 的端点(用于 traces、metrics、logs)暴露出去,请使用 `additionalIngresses` 配置。这在需要从集群外部发送遥测数据,或为 collector 使用自定义域名时非常有用。 @@ -244,7 +234,6 @@ hyperdx: 如果不需要将 OTel collector 暴露到集群外部,可以跳过此配置。对于大多数用户,通用的入口配置已经足够。 ::: - ## 入口故障排查 {#troubleshooting-ingress} **检查入口资源:** @@ -282,7 +271,6 @@ curl -I https://hyperdx.yourdomain.com/_next/static/chunks/main-xxxx.js * 变更之后,清理浏览器缓存以及任何 CDN/代理缓存,以避免使用陈旧的资源 - ## 自定义配置 {#customizing-values} 可以使用 `--set` 标志来自定义配置: @@ -322,7 +310,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values.yaml ``` - ## 后续步骤 {#next-steps} - [部署选项](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options) - 外部系统和最小部署方案 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md index 1aa6cc3522e..80d2da7a315 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm-deployment-options.md @@ -56,7 +56,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-external-clickhouse.yaml ``` - ### 选项 2:外部 Secret(推荐用于生产环境) {#external-clickhouse-secret} 对于生产环境部署,如果希望将凭证与 Helm 配置分离: @@ -174,7 +173,6 @@ hyperdx: 有关连接 ClickHouse Cloud 的完整示例,请参阅[《创建 ClickHouse Cloud 连接》](/docs/use-cases/observability/clickstack/getting-started#create-a-cloud-connection)。 - ## 外部 OTel collector {#external-otel-collector} 如果你已经有现成的 OTel collector 基础设施: @@ -194,7 +192,6 @@ helm install my-clickstack clickstack/clickstack -f values-external-otel.yaml 有关如何通过入口暴露 OTel collector 端点的说明,请参阅 [入口配置](/docs/use-cases/observability/clickstack/deployment/helm-configuration#otel-collector-ingress)。 - ## 最小部署 {#minimal-deployment} 对于已有基础设施的组织,只需部署 HyperDX 即可: @@ -233,7 +230,6 @@ hyperdx: helm install my-clickstack clickstack/clickstack -f values-minimal.yaml ``` - ## 后续步骤 {#next-steps} - [配置指南](/docs/use-cases/observability/clickstack/deployment/helm-configuration) - API 密钥、机密信息和入口配置 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md index 0390d539821..6007ca337e5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/helm/helm.md @@ -36,7 +36,6 @@ HyperDX 的 Helm 图表可以在 [此处](https://github.com/hyperdxio/helm-char * TLS 和入口配置 * Secrets 管理和认证设置 - ### 适用场景 {#suitable-for} * 概念验证(PoC) @@ -262,7 +261,6 @@ helm install my-clickstack clickstack/clickstack -f values.yaml 对于在生产环境中使用基于 Secret 的配置、外部 OTel collector,或精简部署方案的场景,请参阅 [Deployment Options 指南](/docs/use-cases/observability/clickstack/deployment/helm-deployment-options)。 ::: - ## 生产环境说明 默认情况下,该 chart 还会安装 ClickHouse 和 OTel collector。不过,在生产环境中,建议分别单独管理 ClickHouse 和 OTel collector。 @@ -283,7 +281,6 @@ helm install my-clickstack clickstack/clickstack \ * [云上部署](/docs/use-cases/observability/clickstack/deployment/helm-cloud) - 云平台特定设置与生产环境检查清单 ::: - ## 任务配置 {#task-configuration} 默认情况下,chart 中配置了一个以 CronJob 形式运行的任务,用于检查是否需要触发告警。以下是其配置选项: @@ -308,7 +305,6 @@ helm upgrade my-clickstack clickstack/clickstack -f values.yaml helm search repo clickstack ``` - ## 卸载 ClickStack 若要移除该部署: @@ -319,23 +315,20 @@ helm uninstall my-clickstack 这将删除与该发布关联的所有资源,但持久化数据(如有)可能会保留。 - ## 故障排查 {#troubleshooting} -### 查看日志 +### 查看日志 {#customizing-values} ```shell kubectl logs -l app.kubernetes.io/name=clickstack ``` - -### 排查安装失败 +### 排查安装失败 {#using-secrets} ```shell helm install my-clickstack clickstack/clickstack --debug --dry-run ``` - ### 验证部署 ```shell @@ -379,7 +372,6 @@ helm install my-clickstack clickstack/clickstack \ --set "otel.env[0].value=--feature-gates=clickhouse.json" ``` - ## 相关文档 {#related-documentation} ### 部署指南 {#deployment-guides} diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md index 003b9c6e6a3..d786ab3da44 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-clickhouse-cloud.md @@ -38,7 +38,6 @@ import JSONSupport from '@site/i18n/zh/docusaurus-plugin-content-docs/current/us 在此模式下,数据摄取完全由用户负责。您可以使用自托管的 OpenTelemetry collector、通过客户端库直接摄取、ClickHouse 原生表引擎(例如 Kafka 或 S3)、ETL 流水线,或 ClickPipes(ClickHouse Cloud 的托管摄取服务)将数据摄取到 ClickHouse Cloud 中。这种方式是运行 ClickStack 最简单且性能最佳的方案。 - ### 适用场景 {#suitable-for} 此部署模式在以下场景中尤为适用: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md index aa53e404053..62ab67c971e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/hyperdx-only.md @@ -23,7 +23,6 @@ HyperDX 可以独立于其余组件使用,并且兼容任意数据 schema— 在此模式下,数据摄取完全由用户自行负责。你可以使用自己部署的 OpenTelemetry collector、从客户端库直接摄取、ClickHouse 原生表引擎(例如 Kafka 或 S3)、ETL 管道,或诸如 ClickPipes 等托管摄取服务,将数据摄取到 ClickHouse 中。这种方式提供了最大的灵活性,适合已经在运行 ClickHouse、并希望在其之上引入 HyperDX 以实现可视化、搜索和告警的团队。 - ### 适用对象 {#suitable-for} - 现有 ClickHouse 用户 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md index ff8ad4cce79..046137c968e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/deployment/local-mode-only.md @@ -23,7 +23,6 @@ import JSONSupport from '@site/i18n/zh/docusaurus-plugin-content-docs/current/us **但是,此版本的 HyperDX 未启用用户认证功能** - ### 适用场景 {#suitable-for} * 演示 @@ -37,31 +36,31 @@ import JSONSupport from '@site/i18n/zh/docusaurus-plugin-content-docs/current/us ### 使用 Docker 部署 - 本地模式会在 8080 端口上运行 HyperDX UI。 + 本地模式会在端口 8080 上部署 HyperDX UI。 ```shell - docker run -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local + docker run -p 8080:8080 clickhouse/clickstack-local:latest ``` ### 访问 HyperDX UI - 访问 [http://localhost:8080](http://localhost:8080) 以打开 HyperDX UI。 + 访问 [http://localhost:8080](http://localhost:8080) 即可打开 HyperDX UI。 - **在此部署模式下未启用身份验证,因此不会提示你创建用户。** + **系统不会提示您创建用户,因为在此部署模式下未启用身份验证。** - 连接到你自己的外部 ClickHouse 集群,例如 ClickHouse Cloud。 + 将其连接到您自己的外部 ClickHouse 集群,例如 ClickHouse Cloud。 - 创建一个数据源,保留所有默认值,并将 `Table` 字段设置为 `otel_logs`。其他设置会自动检测完成,然后你就可以点击 `Save New Source`。 + 创建一个 Source,保留所有默认值,并将 `Table` 字段设置为 `otel_logs`。其他设置应会自动检测完成,此时您可以点击 `Save New Source`。 - + -对于仅用于本地模式的镜像,用户只需要设置 `BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true` 参数,例如: +对于仅本地模式镜像,用户只需要设置参数 `BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true`,例如: ```shell -docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 docker.hyperdx.io/hyperdx/hyperdx-local +docker run -e BETA_CH_OTEL_JSON_SCHEMA_ENABLED=true -p 8080:8080 clickhouse/clickstack-local:latest ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md index 0f7aac918df..c022c54c851 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/use-cases/observability/clickstack/example-datasets/kubernetes.md @@ -23,7 +23,6 @@ import dashboard_kubernetes from '@site/static/images/use-cases/observability/hy