You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
203 lines
8.3 KiB
203 lines
8.3 KiB
6 years ago
|
// Copyright 2018 Google LLC.
|
||
|
//
|
||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
// you may not use this file except in compliance with the License.
|
||
|
// You may obtain a copy of the License at
|
||
|
//
|
||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||
|
//
|
||
|
// Unless required by applicable law or agreed to in writing, software
|
||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
// See the License for the specific language governing permissions and
|
||
|
// limitations under the License.
|
||
|
//
|
||
|
|
||
|
syntax = "proto3";
|
||
|
|
||
|
package google.cloud.automl.v1beta1;
|
||
|
|
||
|
import "google/api/annotations.proto";
|
||
|
import "google/cloud/automl/v1beta1/temporal.proto";
|
||
|
|
||
|
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
|
||
|
option java_outer_classname = "ClassificationProto";
|
||
|
option java_package = "com.google.cloud.automl.v1beta1";
|
||
|
option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
|
||
|
|
||
|
|
||
|
// Contains annotation details specific to classification.
|
||
|
message ClassificationAnnotation {
|
||
|
// Output only. A confidence estimate between 0.0 and 1.0. A higher value
|
||
|
// means greater confidence that the annotation is positive. If a user
|
||
|
// approves an annotation as negative or positive, the score value remains
|
||
|
// unchanged. If a user creates an annotation, the score is 0 for negative or
|
||
|
// 1 for positive.
|
||
|
float score = 1;
|
||
|
}
|
||
|
|
||
|
// Contains annotation details specific to video classification.
|
||
|
message VideoClassificationAnnotation {
|
||
|
// Output only. Expresses the type of video classification. Possible values:
|
||
|
//
|
||
|
// * `segment` - Classification done on a specified by user
|
||
|
// time segment of a video. AnnotationSpec is answered to be present
|
||
|
// in that time segment, if it is present in any part of it. The video
|
||
|
// ML model evaluations are done only for this type of classification.
|
||
|
//
|
||
|
// * `shot`- Shot-level classification.
|
||
|
// AutoML Video Intelligence determines the boundaries
|
||
|
// for each camera shot in the entire segment of the video that user
|
||
|
// specified in the request configuration. AutoML Video Intelligence
|
||
|
// then returns labels and their confidence scores for each detected
|
||
|
// shot, along with the start and end time of the shot.
|
||
|
// WARNING: Model evaluation is not done for this classification type,
|
||
|
// the quality of it depends on training data, but there are no
|
||
|
// metrics provided to describe that quality.
|
||
|
//
|
||
|
// * `1s_interval` - AutoML Video Intelligence returns labels and their
|
||
|
// confidence scores for each second of the entire segment of the video
|
||
|
// that user specified in the request configuration.
|
||
|
// WARNING: Model evaluation is not done for this classification type,
|
||
|
// the quality of it depends on training data, but there are no
|
||
|
// metrics provided to describe that quality.
|
||
|
string type = 1;
|
||
|
|
||
|
// Output only . The classification details of this annotation.
|
||
|
ClassificationAnnotation classification_annotation = 2;
|
||
|
|
||
|
// Output only . The time segment of the video to which the
|
||
|
// annotation applies.
|
||
|
TimeSegment time_segment = 3;
|
||
|
}
|
||
|
|
||
|
// Model evaluation metrics for classification problems.
|
||
|
// Note: For Video Classification this metrics only describe quality of the
|
||
|
// Video Classification predictions of "segment_classification" type.
|
||
|
message ClassificationEvaluationMetrics {
|
||
|
// Metrics for a single confidence threshold.
|
||
|
message ConfidenceMetricsEntry {
|
||
|
// Output only. Metrics are computed with an assumption that the model
|
||
|
// never returns predictions with score lower than this value.
|
||
|
float confidence_threshold = 1;
|
||
|
|
||
|
// Output only. Metrics are computed with an assumption that the model
|
||
|
// always returns at most this many predictions (ordered by their score,
|
||
|
// descendingly), but they all still need to meet the confidence_threshold.
|
||
|
int32 position_threshold = 14;
|
||
|
|
||
|
// Output only. Recall (True Positive Rate) for the given confidence
|
||
|
// threshold.
|
||
|
float recall = 2;
|
||
|
|
||
|
// Output only. Precision for the given confidence threshold.
|
||
|
float precision = 3;
|
||
|
|
||
|
// Output only. False Positive Rate for the given confidence threshold.
|
||
|
float false_positive_rate = 8;
|
||
|
|
||
|
// Output only. The harmonic mean of recall and precision.
|
||
|
float f1_score = 4;
|
||
|
|
||
|
// Output only. The Recall (True Positive Rate) when only considering the
|
||
|
// label that has the highest prediction score and not below the confidence
|
||
|
// threshold for each example.
|
||
|
float recall_at1 = 5;
|
||
|
|
||
|
// Output only. The precision when only considering the label that has the
|
||
|
// highest prediction score and not below the confidence threshold for each
|
||
|
// example.
|
||
|
float precision_at1 = 6;
|
||
|
|
||
|
// Output only. The False Positive Rate when only considering the label that
|
||
|
// has the highest prediction score and not below the confidence threshold
|
||
|
// for each example.
|
||
|
float false_positive_rate_at1 = 9;
|
||
|
|
||
|
// Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
|
||
|
float f1_score_at1 = 7;
|
||
|
|
||
|
// Output only. The number of model created labels that match a ground truth
|
||
|
// label.
|
||
|
int64 true_positive_count = 10;
|
||
|
|
||
|
// Output only. The number of model created labels that do not match a
|
||
|
// ground truth label.
|
||
|
int64 false_positive_count = 11;
|
||
|
|
||
|
// Output only. The number of ground truth labels that are not matched
|
||
|
// by a model created label.
|
||
|
int64 false_negative_count = 12;
|
||
|
|
||
|
// Output only. The number of labels that were not created by the model,
|
||
|
// but if they would, they would not match a ground truth label.
|
||
|
int64 true_negative_count = 13;
|
||
|
}
|
||
|
|
||
|
// Confusion matrix of the model running the classification.
|
||
|
message ConfusionMatrix {
|
||
|
// Output only. A row in the confusion matrix.
|
||
|
message Row {
|
||
|
// Output only. Value of the specific cell in the confusion matrix.
|
||
|
// The number of values each row has (i.e. the length of the row) is equal
|
||
|
// to the length of the annotation_spec_id field.
|
||
|
repeated int32 example_count = 1;
|
||
|
}
|
||
|
|
||
|
// Output only. IDs of the annotation specs used in the confusion matrix.
|
||
|
repeated string annotation_spec_id = 1;
|
||
|
|
||
|
// Output only. Rows in the confusion matrix. The number of rows is equal to
|
||
|
// the size of `annotation_spec_id`.
|
||
|
// `row[i].value[j]` is the number of examples that have ground truth of the
|
||
|
// `annotation_spec_id[i]` and are predicted as `annotation_spec_id[j]` by
|
||
|
// the model being evaluated.
|
||
|
repeated Row row = 2;
|
||
|
}
|
||
|
|
||
|
// Output only. The Area Under Precision-Recall Curve metric. Micro-averaged
|
||
|
// for the overall evaluation.
|
||
|
float au_prc = 1;
|
||
|
|
||
|
// Output only. The Area Under Precision-Recall Curve metric based on priors.
|
||
|
// Micro-averaged for the overall evaluation.
|
||
|
// Deprecated.
|
||
|
float base_au_prc = 2 [deprecated = true];
|
||
|
|
||
|
// Output only. The Area Under Receiver Operating Characteristic curve metric.
|
||
|
// Micro-averaged for the overall evaluation.
|
||
|
float au_roc = 6;
|
||
|
|
||
|
// Output only. The Log Loss metric.
|
||
|
float log_loss = 7;
|
||
|
|
||
|
// Output only. Metrics for each confidence_threshold in
|
||
|
// 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
|
||
|
// position_threshold = INT32_MAX_VALUE.
|
||
|
// Precision-recall curve is derived from them.
|
||
|
// The above metrics may also be supplied for additional values of
|
||
|
// position_threshold.
|
||
|
repeated ConfidenceMetricsEntry confidence_metrics_entry = 3;
|
||
|
|
||
|
// Output only. Confusion matrix of the evaluation.
|
||
|
// Only set for MULTICLASS classification problems where number
|
||
|
// of labels is no more than 10.
|
||
|
// Only set for model level evaluation, not for evaluation per label.
|
||
|
ConfusionMatrix confusion_matrix = 4;
|
||
|
|
||
|
// Output only. The annotation spec ids used for this evaluation.
|
||
|
repeated string annotation_spec_id = 5;
|
||
|
}
|
||
|
|
||
|
// Type of the classification problem.
|
||
|
enum ClassificationType {
|
||
|
// Should not be used, an un-set enum has this value by default.
|
||
|
CLASSIFICATION_TYPE_UNSPECIFIED = 0;
|
||
|
|
||
|
// At most one label is allowed per example.
|
||
|
MULTICLASS = 1;
|
||
|
|
||
|
// Multiple labels are allowed for one example.
|
||
|
MULTILABEL = 2;
|
||
|
}
|