You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
kratos/third_party/google/bigtable/v1/bigtable_service.proto

93 lines
3.6 KiB

// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
import "google/api/annotations.proto";
import "google/bigtable/v1/bigtable_data.proto";
import "google/bigtable/v1/bigtable_service_messages.proto";
import "google/protobuf/empty.proto";
option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
option java_generic_services = true;
option java_multiple_files = true;
option java_outer_classname = "BigtableServicesProto";
option java_package = "com.google.bigtable.v1";
// Service for reading from and writing to existing Bigtables.
service BigtableService {
// Streams back the contents of all requested rows, optionally applying
// the same Reader filter to each. Depending on their size, rows may be
// broken up across multiple responses, but atomicity of each row will still
// be preserved.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
option (google.api.http) = {
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read"
body: "*"
};
}
// Returns a sample of row keys in the table. The returned row keys will
// delimit contiguous sections of the table of approximately equal size,
// which can be used to break up the data for distributed tasks like
// mapreduces.
rpc SampleRowKeys(SampleRowKeysRequest)
returns (stream SampleRowKeysResponse) {
option (google.api.http) = {
get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys"
};
}
// Mutates a row atomically. Cells already present in the row are left
// unchanged unless explicitly changed by 'mutation'.
rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate"
body: "*"
};
}
// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
option (google.api.http) = {
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows"
body: "*"
};
}
// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest)
returns (CheckAndMutateRowResponse) {
option (google.api.http) = {
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate"
body: "*"
};
}
// Modifies a row atomically, reading the latest existing timestamp/value from
// the specified columns and writing a new value at
// max(existing timestamp, current server time) based on pre-defined
// read/modify/write rules. Returns the new contents of all modified cells.
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
option (google.api.http) = {
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite"
body: "*"
};
}
}