This repository has been archived by the owner on Nov 14, 2023. It is now read-only.
/
display-evaluation.v1beta1.js
112 lines (105 loc) · 3.84 KB
/
display-evaluation.v1beta1.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/**
* Copyright 2019, Google LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
`use strict`;
async function main(
projectId = 'YOUR_PROJECT_ID',
computeRegion = 'YOUR_REGION_NAME',
modelId = 'YOUR_MODEL_ID',
filter = 'YOUR_FILTER_EXPRESSION'
) {
// [START automl_natural_language_entity_display_evaluation]
const automl = require(`@google-cloud/automl`);
const math = require(`mathjs`);
const client = new automl.v1beta1.AutoMlClient();
/**
* Demonstrates using the AutoML client to display model evaluation.
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const projectId = '[PROJECT_ID]' e.g., "my-gcloud-project";
// const computeRegion = '[REGION_NAME]' e.g., "us-central1";
// const modelId = '[MODEL_ID]' e.g., "TEN5200971474357190656";
// const filter_ = '[FILTER_EXPRESSIONS]'
// e.g., "textExtractionModelMetadata:*";
// Get the full path of the model.
const modelFullId = client.modelPath(projectId, computeRegion, modelId);
// List all the model evaluations in the model by applying filter.
client
.listModelEvaluations({parent: modelFullId, filter: filter})
.then(respond => {
const response = respond[0];
// Iterate through the results.
let modelEvaluationId = ``;
for (const element of response) {
// There is evaluation for each class in a model and for overall model.
// Get only the evaluation of overall model.
if (!element.annotationSpecId) {
modelEvaluationId = element.name.split(`/`).pop(-1);
}
}
console.log(`Model Evaluation ID: ${modelEvaluationId}`);
// Resource name for the model evaluation.
const modelEvaluationFullId = client.modelEvaluationPath(
projectId,
computeRegion,
modelId,
modelEvaluationId
);
// Get a model evaluation.
client
.getModelEvaluation({name: modelEvaluationFullId})
.then(responses => {
const modelEvaluation = responses[0];
const extractMetrics =
modelEvaluation.textExtractionEvaluationMetrics;
const confidenceMetricsEntries =
extractMetrics.confidenceMetricsEntries;
// Showing model score based on threshold of 0.5
for (const confidenceMetricsEntry of confidenceMetricsEntries) {
if (confidenceMetricsEntry.confidenceThreshold === 0.5) {
console.log(
`Precision and recall are based ` +
`on a score threshold of 0.5 `
);
console.log(
`Model precision: ${math.round(
confidenceMetricsEntry.precision * 100,
2
)} %`
);
console.log(
`Model recall: ${math.round(
confidenceMetricsEntry.recall * 100,
2
)} %`
);
console.log(
`Model f1 score: ${math.round(
confidenceMetricsEntry.f1Score * 100,
2
)} %`
);
}
}
})
.catch(err => {
console.error(err);
});
})
.catch(err => {
console.error(err);
});
// [END automl_natural_language_entity_display_evaluation]
}
main(...process.argv.slice(2)).catch(console.error());