/
language_entities_gcs.py
91 lines (73 loc) · 3.51 KB
/
language_entities_gcs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Analyzing Entities (GCS)
# description: Analyzing Entities in text file stored in Cloud Storage
# [START language_entities_gcs]
from google.cloud import language_v2
def sample_analyze_entities(
gcs_content_uri: str = "gs://cloud-samples-data/language/entity.txt",
) -> None:
"""
Analyzes Entities in text file stored in Cloud Storage.
Args:
gcs_content_uri: Google Cloud Storage URI where the file content is located.
e.g. gs://[Your Bucket]/[Path to File]
"""
client = language_v2.LanguageServiceClient()
# Available types: PLAIN_TEXT, HTML
document_type_in_plain_text = language_v2.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language_code = "en"
document = {
"gcs_content_uri": gcs_content_uri,
"type_": document_type_in_plain_text,
"language_code": language_code,
}
# Available values: NONE, UTF8, UTF16, UTF32.
# See https://cloud.google.com/natural-language/docs/reference/rest/v2/EncodingType.
encoding_type = language_v2.EncodingType.UTF8
response = client.analyze_entities(
request={"document": document, "encoding_type": encoding_type}
)
for entity in response.entities:
print(f"Representative name for the entity: {entity.name}")
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al.
# See https://cloud.google.com/natural-language/docs/reference/rest/v2/Entity#type.
print(f"Entity type: {language_v2.Entity.Type(entity.type_).name}")
# Loop over the metadata associated with entity.
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(f"{metadata_name}: {metadata_value}")
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(f"Mention text: {mention.text.content}")
# Get the mention type, e.g. PROPER for proper noun
print(
"Mention type:" f" {language_v2.EntityMention.Type(mention.type_).name}"
)
# Get the probability score associated with the first mention of the entity in the (0, 1.0] range.
print(f"Probability score: {mention.probability}")
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(f"Language of the text: {response.language_code}")
# [END language_entities_gcs]