1 // Copyright 2018 Google LLC.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
18 package google.spanner.v1;
20 import "google/api/annotations.proto";
21 import "google/protobuf/empty.proto";
22 import "google/protobuf/struct.proto";
23 import "google/protobuf/timestamp.proto";
24 import "google/rpc/status.proto";
25 import "google/spanner/v1/keys.proto";
26 import "google/spanner/v1/mutation.proto";
27 import "google/spanner/v1/result_set.proto";
28 import "google/spanner/v1/transaction.proto";
29 import "google/spanner/v1/type.proto";
31 option csharp_namespace = "Google.Cloud.Spanner.V1";
32 option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner";
33 option java_multiple_files = true;
34 option java_outer_classname = "SpannerProto";
35 option java_package = "com.google.spanner.v1";
36 option php_namespace = "Google\\Cloud\\Spanner\\V1";
40 // The Cloud Spanner API can be used to manage sessions and execute
41 // transactions on data stored in Cloud Spanner databases.
43 // Creates a new session. A session can be used to perform
44 // transactions that read and/or modify data in a Cloud Spanner database.
45 // Sessions are meant to be reused for many consecutive
48 // Sessions can only execute one transaction at a time. To execute
49 // multiple concurrent read-write/write-only transactions, create
50 // multiple sessions. Note that standalone reads and queries use a
51 // transaction internally, and count toward the one transaction
54 // Cloud Spanner limits the number of sessions that can exist at any given
55 // time; thus, it is a good idea to delete idle and/or unneeded sessions.
56 // Aside from explicit deletes, Cloud Spanner can delete sessions for which no
57 // operations are sent for more than an hour. If a session is deleted,
58 // requests to it return `NOT_FOUND`.
60 // Idle sessions can be kept alive by sending a trivial SQL query
61 // periodically, e.g., `"SELECT 1"`.
62 rpc CreateSession(CreateSessionRequest) returns (Session) {
63 option (google.api.http) = {
64 post: "/v1/{database=projects/*/instances/*/databases/*}/sessions"
69 // Gets a session. Returns `NOT_FOUND` if the session does not exist.
70 // This is mainly useful for determining whether a session is still
72 rpc GetSession(GetSessionRequest) returns (Session) {
73 option (google.api.http) = {
74 get: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}"
78 // Lists all sessions in a given database.
79 rpc ListSessions(ListSessionsRequest) returns (ListSessionsResponse) {
80 option (google.api.http) = {
81 get: "/v1/{database=projects/*/instances/*/databases/*}/sessions"
85 // Ends a session, releasing server resources associated with it. This will
86 // asynchronously trigger cancellation of any operations that are running with
88 rpc DeleteSession(DeleteSessionRequest) returns (google.protobuf.Empty) {
89 option (google.api.http) = {
90 delete: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}"
94 // Executes an SQL statement, returning all results in a single reply. This
95 // method cannot be used to return a result set larger than 10 MiB;
96 // if the query yields more data than that, the query fails with
97 // a `FAILED_PRECONDITION` error.
99 // Operations inside read-write transactions might return `ABORTED`. If
100 // this occurs, the application should restart the transaction from
101 // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
104 // Larger result sets can be fetched in streaming fashion by calling
105 // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
107 rpc ExecuteSql(ExecuteSqlRequest) returns (ResultSet) {
108 option (google.api.http) = {
109 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql"
114 // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
115 // result set as a stream. Unlike
116 // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
117 // the size of the returned result set. However, no individual row in the
118 // result set can exceed 100 MiB, and no column value can exceed 10 MiB.
119 rpc ExecuteStreamingSql(ExecuteSqlRequest) returns (stream PartialResultSet) {
120 option (google.api.http) = {
121 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql"
126 // Executes a batch of SQL DML statements. This method allows many statements
127 // to be run with lower latency than submitting them sequentially with
128 // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
130 // Statements are executed in order, sequentially.
131 // [ExecuteBatchDmlResponse][Spanner.ExecuteBatchDmlResponse] will contain a
132 // [ResultSet][google.spanner.v1.ResultSet] for each DML statement that has successfully executed. If a
133 // statement fails, its error status will be returned as part of the
134 // [ExecuteBatchDmlResponse][Spanner.ExecuteBatchDmlResponse]. Execution will
135 // stop at the first failed statement; the remaining statements will not run.
137 // ExecuteBatchDml is expected to return an OK status with a response even if
138 // there was an error while processing one of the DML statements. Clients must
139 // inspect response.status to determine if there were any errors while
140 // processing the request.
142 // See more details in
143 // [ExecuteBatchDmlRequest][Spanner.ExecuteBatchDmlRequest] and
144 // [ExecuteBatchDmlResponse][Spanner.ExecuteBatchDmlResponse].
145 rpc ExecuteBatchDml(ExecuteBatchDmlRequest) returns (ExecuteBatchDmlResponse) {
146 option (google.api.http) = {
147 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml"
152 // Reads rows from the database using key lookups and scans, as a
153 // simple key/value style alternative to
154 // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
155 // used to return a result set larger than 10 MiB; if the read matches more
156 // data than that, the read fails with a `FAILED_PRECONDITION`
159 // Reads inside read-write transactions might return `ABORTED`. If
160 // this occurs, the application should restart the transaction from
161 // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
164 // Larger result sets can be yielded in streaming fashion by calling
165 // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
166 rpc Read(ReadRequest) returns (ResultSet) {
167 option (google.api.http) = {
168 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read"
173 // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
174 // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
175 // limit on the size of the returned result set. However, no individual row in
176 // the result set can exceed 100 MiB, and no column value can exceed
178 rpc StreamingRead(ReadRequest) returns (stream PartialResultSet) {
179 option (google.api.http) = {
180 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead"
185 // Begins a new transaction. This step can often be skipped:
186 // [Read][google.spanner.v1.Spanner.Read],
187 // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
188 // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
190 rpc BeginTransaction(BeginTransactionRequest) returns (Transaction) {
191 option (google.api.http) = {
192 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction"
197 // Commits a transaction. The request includes the mutations to be
198 // applied to rows in the database.
200 // `Commit` might return an `ABORTED` error. This can occur at any time;
201 // commonly, the cause is conflicts with concurrent
202 // transactions. However, it can also happen for a variety of other
203 // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
204 // the transaction from the beginning, re-using the same session.
205 rpc Commit(CommitRequest) returns (CommitResponse) {
206 option (google.api.http) = {
207 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit"
212 // Rolls back a transaction, releasing any locks it holds. It is a good
213 // idea to call this for any transaction that includes one or more
214 // [Read][google.spanner.v1.Spanner.Read] or
215 // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
216 // decides not to commit.
218 // `Rollback` returns `OK` if it successfully aborts the transaction, the
219 // transaction was already aborted, or the transaction is not
220 // found. `Rollback` never returns `ABORTED`.
221 rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) {
222 option (google.api.http) = {
223 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback"
228 // Creates a set of partition tokens that can be used to execute a query
229 // operation in parallel. Each of the returned partition tokens can be used
230 // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
231 // specify a subset of the query result to read. The same session and
232 // read-only transaction must be used by the PartitionQueryRequest used to
233 // create the partition tokens and the ExecuteSqlRequests that use the
236 // Partition tokens become invalid when the session used to create them
237 // is deleted, is idle for too long, begins a new transaction, or becomes too
238 // old. When any of these happen, it is not possible to resume the query, and
239 // the whole operation must be restarted from the beginning.
240 rpc PartitionQuery(PartitionQueryRequest) returns (PartitionResponse) {
241 option (google.api.http) = {
242 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery"
247 // Creates a set of partition tokens that can be used to execute a read
248 // operation in parallel. Each of the returned partition tokens can be used
249 // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
250 // subset of the read result to read. The same session and read-only
251 // transaction must be used by the PartitionReadRequest used to create the
252 // partition tokens and the ReadRequests that use the partition tokens. There
253 // are no ordering guarantees on rows returned among the returned partition
254 // tokens, or even within each individual StreamingRead call issued with a
257 // Partition tokens become invalid when the session used to create them
258 // is deleted, is idle for too long, begins a new transaction, or becomes too
259 // old. When any of these happen, it is not possible to resume the read, and
260 // the whole operation must be restarted from the beginning.
261 rpc PartitionRead(PartitionReadRequest) returns (PartitionResponse) {
262 option (google.api.http) = {
263 post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead"
269 // The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
270 message CreateSessionRequest {
271 // Required. The database in which the new session is created.
274 // The session to create.
278 // A session in the Cloud Spanner API.
280 // The name of the session. This is always system-assigned; values provided
281 // when creating a session are ignored.
284 // The labels for the session.
286 // * Label keys must be between 1 and 63 characters long and must conform to
287 // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
288 // * Label values must be between 0 and 63 characters long and must conform
289 // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
290 // * No more than 64 labels can be associated with a given session.
292 // See https://goo.gl/xmQnxf for more information on and examples of labels.
293 map<string, string> labels = 2;
295 // Output only. The timestamp when the session is created.
296 google.protobuf.Timestamp create_time = 3;
298 // Output only. The approximate timestamp when the session is last used. It is
299 // typically earlier than the actual last use time.
300 google.protobuf.Timestamp approximate_last_use_time = 4;
303 // The request for [GetSession][google.spanner.v1.Spanner.GetSession].
304 message GetSessionRequest {
305 // Required. The name of the session to retrieve.
309 // The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
310 message ListSessionsRequest {
311 // Required. The database in which to list sessions.
314 // Number of sessions to be returned in the response. If 0 or less, defaults
315 // to the server's maximum allowed page size.
318 // If non-empty, `page_token` should contain a
319 // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
321 // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
322 string page_token = 3;
324 // An expression for filtering the results of the request. Filter rules are
325 // case insensitive. The fields eligible for filtering are:
327 // * `labels.key` where key is the name of a label
329 // Some examples of using filters are:
331 // * `labels.env:*` --> The session has the label "env".
332 // * `labels.env:dev` --> The session has the label "env" and the value of
333 // the label contains the string "dev".
337 // The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
338 message ListSessionsResponse {
339 // The list of requested sessions.
340 repeated Session sessions = 1;
342 // `next_page_token` can be sent in a subsequent
343 // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
344 // of the matching sessions.
345 string next_page_token = 2;
348 // The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
349 message DeleteSessionRequest {
350 // Required. The name of the session to delete.
354 // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
355 // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
356 message ExecuteSqlRequest {
357 // Mode in which the statement must be processed.
359 // The default mode. Only the statement results are returned.
362 // This mode returns only the query plan, without any results or
363 // execution statistics information.
366 // This mode returns both the query plan and the execution statistics along
371 // Required. The session in which the SQL query should be performed.
374 // The transaction to use. If none is provided, the default is a
375 // temporary read-only transaction with strong concurrency.
377 // The transaction to use.
379 // For queries, if none is provided, the default is a temporary read-only
380 // transaction with strong concurrency.
382 // Standard DML statements require a ReadWrite transaction. Single-use
383 // transactions are not supported (to avoid replay). The caller must
384 // either supply an existing transaction ID or begin a new transaction.
386 // Partitioned DML requires an existing PartitionedDml transaction ID.
387 TransactionSelector transaction = 2;
389 // Required. The SQL string.
392 // The SQL string can contain parameter placeholders. A parameter
393 // placeholder consists of `'@'` followed by the parameter
394 // name. Parameter names consist of any combination of letters,
395 // numbers, and underscores.
397 // Parameters can appear anywhere that a literal value is expected. The same
398 // parameter name can be used more than once, for example:
399 // `"WHERE id > @msg_id AND id < @msg_id + 100"`
401 // It is an error to execute an SQL statement with unbound parameters.
403 // Parameter values are specified using `params`, which is a JSON
404 // object whose keys are parameter names, and whose values are the
405 // corresponding parameter values.
406 google.protobuf.Struct params = 4;
408 // It is not always possible for Cloud Spanner to infer the right SQL type
409 // from a JSON value. For example, values of type `BYTES` and values
410 // of type `STRING` both appear in
411 // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
413 // In these cases, `param_types` can be used to specify the exact
414 // SQL type for some or all of the SQL statement parameters. See the
415 // definition of [Type][google.spanner.v1.Type] for more information
417 map<string, Type> param_types = 5;
419 // If this request is resuming a previously interrupted SQL statement
420 // execution, `resume_token` should be copied from the last
421 // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
422 // interruption. Doing this enables the new SQL statement execution to resume
423 // where the last one left off. The rest of the request parameters must
424 // exactly match the request that yielded this token.
425 bytes resume_token = 6;
427 // Used to control the amount of debugging information returned in
428 // [ResultSetStats][google.spanner.v1.ResultSetStats]. If
429 // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
430 // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
432 // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
433 QueryMode query_mode = 7;
435 // If present, results will be restricted to the specified partition
436 // previously created using PartitionQuery(). There must be an exact
437 // match for the values of fields common to this message and the
438 // PartitionQueryRequest message used to create this partition_token.
439 bytes partition_token = 8;
441 // A per-transaction sequence number used to identify this request. This
442 // makes each request idempotent such that if the request is received multiple
443 // times, at most one will succeed.
445 // The sequence number must be monotonically increasing within the
446 // transaction. If a request arrives for the first time with an out-of-order
447 // sequence number, the transaction may be aborted. Replays of previously
448 // handled requests will yield the same response as the first execution.
450 // Required for DML statements. Ignored for queries.
454 // The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]
455 message ExecuteBatchDmlRequest {
456 // A single DML statement.
458 // Required. The DML string.
461 // The DML string can contain parameter placeholders. A parameter
462 // placeholder consists of `'@'` followed by the parameter
463 // name. Parameter names consist of any combination of letters,
464 // numbers, and underscores.
466 // Parameters can appear anywhere that a literal value is expected. The
467 // same parameter name can be used more than once, for example:
468 // `"WHERE id > @msg_id AND id < @msg_id + 100"`
470 // It is an error to execute an SQL statement with unbound parameters.
472 // Parameter values are specified using `params`, which is a JSON
473 // object whose keys are parameter names, and whose values are the
474 // corresponding parameter values.
475 google.protobuf.Struct params = 2;
477 // It is not always possible for Cloud Spanner to infer the right SQL type
478 // from a JSON value. For example, values of type `BYTES` and values
479 // of type `STRING` both appear in [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as JSON strings.
481 // In these cases, `param_types` can be used to specify the exact
482 // SQL type for some or all of the SQL statement parameters. See the
483 // definition of [Type][google.spanner.v1.Type] for more information
485 map<string, Type> param_types = 3;
488 // Required. The session in which the DML statements should be performed.
491 // The transaction to use. A ReadWrite transaction is required. Single-use
492 // transactions are not supported (to avoid replay). The caller must either
493 // supply an existing transaction ID or begin a new transaction.
494 TransactionSelector transaction = 2;
496 // The list of statements to execute in this batch. Statements are executed
497 // serially, such that the effects of statement i are visible to statement
498 // i+1. Each statement must be a DML statement. Execution will stop at the
499 // first failed statement; the remaining statements will not run.
501 // REQUIRES: statements_size() > 0.
502 repeated Statement statements = 3;
504 // A per-transaction sequence number used to identify this request. This is
505 // used in the same space as the seqno in
506 // [ExecuteSqlRequest][Spanner.ExecuteSqlRequest]. See more details
507 // in [ExecuteSqlRequest][Spanner.ExecuteSqlRequest].
511 // The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list
512 // of [ResultSet][google.spanner.v1.ResultSet], one for each DML statement that has successfully executed.
513 // If a statement fails, the error is returned as part of the response payload.
514 // Clients can determine whether all DML statements have run successfully, or if
515 // a statement failed, using one of the following approaches:
517 // 1. Check if 'status' field is OkStatus.
518 // 2. Check if result_sets_size() equals the number of statements in
519 // [ExecuteBatchDmlRequest][Spanner.ExecuteBatchDmlRequest].
521 // Example 1: A request with 5 DML statements, all executed successfully.
522 // Result: A response with 5 ResultSets, one for each statement in the same
523 // order, and an OK status.
525 // Example 2: A request with 5 DML statements. The 3rd statement has a syntax
527 // Result: A response with 2 ResultSets, for the first 2 statements that
528 // run successfully, and a syntax error (INVALID_ARGUMENT) status. From
529 // result_set_size() client can determine that the 3rd statement has failed.
530 message ExecuteBatchDmlResponse {
531 // ResultSets, one for each statement in the request that ran successfully, in
532 // the same order as the statements in the request. Each [ResultSet][google.spanner.v1.ResultSet] will
533 // not contain any rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each [ResultSet][google.spanner.v1.ResultSet] will
534 // contain the number of rows modified by the statement.
536 // Only the first ResultSet in the response contains a valid
537 // [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
538 repeated ResultSet result_sets = 1;
540 // If all DML statements are executed successfully, status will be OK.
541 // Otherwise, the error status of the first failed statement.
542 google.rpc.Status status = 2;
545 // Options for a PartitionQueryRequest and
546 // PartitionReadRequest.
547 message PartitionOptions {
548 // **Note:** This hint is currently ignored by PartitionQuery and
549 // PartitionRead requests.
551 // The desired data size for each partition generated. The default for this
552 // option is currently 1 GiB. This is only a hint. The actual size of each
553 // partition may be smaller or larger than this size request.
554 int64 partition_size_bytes = 1;
556 // **Note:** This hint is currently ignored by PartitionQuery and
557 // PartitionRead requests.
559 // The desired maximum number of partitions to return. For example, this may
560 // be set to the number of workers available. The default for this option
561 // is currently 10,000. The maximum value is currently 200,000. This is only
562 // a hint. The actual number of partitions returned may be smaller or larger
563 // than this maximum count request.
564 int64 max_partitions = 2;
567 // The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
568 message PartitionQueryRequest {
569 // Required. The session used to create the partitions.
572 // Read only snapshot transactions are supported, read/write and single use
573 // transactions are not.
574 TransactionSelector transaction = 2;
576 // The query request to generate partitions for. The request will fail if
577 // the query is not root partitionable. The query plan of a root
578 // partitionable query has a single distributed union operator. A distributed
579 // union operator conceptually divides one or more tables into multiple
580 // splits, remotely evaluates a subquery independently on each split, and
581 // then unions all results.
583 // This must not contain DML commands, such as INSERT, UPDATE, or
585 // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a
586 // PartitionedDml transaction for large, partition-friendly DML operations.
589 // The SQL query string can contain parameter placeholders. A parameter
590 // placeholder consists of `'@'` followed by the parameter
591 // name. Parameter names consist of any combination of letters,
592 // numbers, and underscores.
594 // Parameters can appear anywhere that a literal value is expected. The same
595 // parameter name can be used more than once, for example:
596 // `"WHERE id > @msg_id AND id < @msg_id + 100"`
598 // It is an error to execute an SQL query with unbound parameters.
600 // Parameter values are specified using `params`, which is a JSON
601 // object whose keys are parameter names, and whose values are the
602 // corresponding parameter values.
603 google.protobuf.Struct params = 4;
605 // It is not always possible for Cloud Spanner to infer the right SQL type
606 // from a JSON value. For example, values of type `BYTES` and values
607 // of type `STRING` both appear in
608 // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
610 // In these cases, `param_types` can be used to specify the exact
611 // SQL type for some or all of the SQL query parameters. See the
612 // definition of [Type][google.spanner.v1.Type] for more information
614 map<string, Type> param_types = 5;
616 // Additional options that affect how many partitions are created.
617 PartitionOptions partition_options = 6;
620 // The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
621 message PartitionReadRequest {
622 // Required. The session used to create the partitions.
625 // Read only snapshot transactions are supported, read/write and single use
626 // transactions are not.
627 TransactionSelector transaction = 2;
629 // Required. The name of the table in the database to be read.
632 // If non-empty, the name of an index on
633 // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
634 // instead of the table primary key when interpreting
635 // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
636 // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
637 // for further information.
640 // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
641 // returned for each row matching this request.
642 repeated string columns = 5;
644 // Required. `key_set` identifies the rows to be yielded. `key_set` names the
645 // primary keys of the rows in
646 // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
647 // [index][google.spanner.v1.PartitionReadRequest.index] is present. If
648 // [index][google.spanner.v1.PartitionReadRequest.index] is present, then
649 // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
650 // index keys in [index][google.spanner.v1.PartitionReadRequest.index].
652 // It is not an error for the `key_set` to name rows that do not
653 // exist in the database. Read yields nothing for nonexistent rows.
656 // Additional options that affect how many partitions are created.
657 PartitionOptions partition_options = 9;
660 // Information returned for each partition returned in a
661 // PartitionResponse.
663 // This token can be passed to Read, StreamingRead, ExecuteSql, or
664 // ExecuteStreamingSql requests to restrict the results to those identified by
665 // this partition token.
666 bytes partition_token = 1;
669 // The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
670 // or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
671 message PartitionResponse {
672 // Partitions created by this request.
673 repeated Partition partitions = 1;
675 // Transaction created by this request.
676 Transaction transaction = 2;
679 // The request for [Read][google.spanner.v1.Spanner.Read] and
680 // [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
681 message ReadRequest {
682 // Required. The session in which the read should be performed.
685 // The transaction to use. If none is provided, the default is a
686 // temporary read-only transaction with strong concurrency.
687 TransactionSelector transaction = 2;
689 // Required. The name of the table in the database to be read.
692 // If non-empty, the name of an index on
693 // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
694 // the table primary key when interpreting
695 // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
696 // See [key_set][google.spanner.v1.ReadRequest.key_set] for further
700 // The columns of [table][google.spanner.v1.ReadRequest.table] to be returned
701 // for each row matching this request.
702 repeated string columns = 5;
704 // Required. `key_set` identifies the rows to be yielded. `key_set` names the
705 // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
706 // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
707 // If [index][google.spanner.v1.ReadRequest.index] is present, then
708 // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
709 // in [index][google.spanner.v1.ReadRequest.index].
711 // If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
712 // field is empty, rows are yielded in table primary key order (if
713 // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
714 // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
715 // [partition_token][google.spanner.v1.ReadRequest.partition_token] field is
716 // not empty, rows will be yielded in an unspecified order.
718 // It is not an error for the `key_set` to name rows that do not
719 // exist in the database. Read yields nothing for nonexistent rows.
722 // If greater than zero, only the first `limit` rows are yielded. If `limit`
723 // is zero, the default is no limit. A limit cannot be specified if
724 // `partition_token` is set.
727 // If this request is resuming a previously interrupted read,
728 // `resume_token` should be copied from the last
729 // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
730 // interruption. Doing this enables the new read to resume where the last read
731 // left off. The rest of the request parameters must exactly match the request
732 // that yielded this token.
733 bytes resume_token = 9;
735 // If present, results will be restricted to the specified partition
736 // previously created using PartitionRead(). There must be an exact
737 // match for the values of fields common to this message and the
738 // PartitionReadRequest message used to create this partition_token.
739 bytes partition_token = 10;
743 // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
744 message BeginTransactionRequest {
745 // Required. The session in which the transaction runs.
748 // Required. Options for the new transaction.
749 TransactionOptions options = 2;
752 // The request for [Commit][google.spanner.v1.Spanner.Commit].
753 message CommitRequest {
754 // Required. The session in which the transaction to be committed is running.
757 // Required. The transaction in which to commit.
759 // Commit a previously-started transaction.
760 bytes transaction_id = 2;
762 // Execute mutations in a temporary transaction. Note that unlike
763 // commit of a previously-started transaction, commit with a
764 // temporary transaction is non-idempotent. That is, if the
765 // `CommitRequest` is sent to Cloud Spanner more than once (for
766 // instance, due to retries in the application, or in the
767 // transport library), it is possible that the mutations are
768 // executed more than once. If this is undesirable, use
769 // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
770 // [Commit][google.spanner.v1.Spanner.Commit] instead.
771 TransactionOptions single_use_transaction = 3;
774 // The mutations to be executed when this transaction commits. All
775 // mutations are applied atomically, in the order they appear in
777 repeated Mutation mutations = 4;
780 // The response for [Commit][google.spanner.v1.Spanner.Commit].
781 message CommitResponse {
782 // The Cloud Spanner timestamp at which the transaction committed.
783 google.protobuf.Timestamp commit_timestamp = 1;
786 // The request for [Rollback][google.spanner.v1.Spanner.Rollback].
787 message RollbackRequest {
788 // Required. The session in which the transaction to roll back is running.
791 // Required. The transaction to roll back.
792 bytes transaction_id = 2;