From 0518ee7edd5a7df5d4ac8e00e4eb3998f84678d8 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Thu, 10 Dec 2020 15:12:14 -0800 Subject: [PATCH 1/2] changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- .gitignore | 1 + docs/spanner_admin_database_v1/types.rst | 1 + docs/spanner_admin_instance_v1/types.rst | 1 + docs/spanner_v1/types.rst | 1 + .../proto/spanner_database_admin.proto | 5 + .../database_admin/transports/grpc.py | 4 + .../database_admin/transports/grpc_asyncio.py | 4 + .../types/spanner_database_admin.py | 8 + .../instance_admin/transports/grpc.py | 4 + .../instance_admin/transports/grpc_asyncio.py | 4 + .../cloud/spanner_v1/proto/transaction.proto | 278 +++++++++++++++- google/cloud/spanner_v1/proto/type.proto | 2 +- .../services/spanner/transports/grpc.py | 4 + .../spanner/transports/grpc_asyncio.py | 4 + google/cloud/spanner_v1/types/transaction.py | 301 +++++++++++++++++- google/cloud/spanner_v1/types/type.py | 2 +- ...ixup_spanner_admin_database_v1_keywords.py | 1 + ...ixup_spanner_admin_instance_v1_keywords.py | 1 + scripts/fixup_spanner_v1_keywords.py | 1 + synth.metadata | 151 ++++++++- .../test_database_admin.py | 3 + .../test_instance_admin.py | 3 + tests/unit/gapic/spanner_v1/test_spanner.py | 3 + 23 files changed, 778 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index ac787a3b95..b9daa52f11 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,7 @@ pip-log.txt # Built documentation docs/_build +bigquery/docs/generated docs.metadata # Virtual environment diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types.rst index da44c33458..fe6c27778b 100644 --- a/docs/spanner_admin_database_v1/types.rst +++ b/docs/spanner_admin_database_v1/types.rst @@ -3,3 +3,4 @@ Types for Google Cloud Spanner Admin Database v1 API .. automodule:: google.cloud.spanner_admin_database_v1.types :members: + :show-inheritance: diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types.rst index b496dfc681..250cf6bf9b 100644 --- a/docs/spanner_admin_instance_v1/types.rst +++ b/docs/spanner_admin_instance_v1/types.rst @@ -3,3 +3,4 @@ Types for Google Cloud Spanner Admin Instance v1 API .. automodule:: google.cloud.spanner_admin_instance_v1.types :members: + :show-inheritance: diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types.rst index 15b938d7f3..c7ff7e6c71 100644 --- a/docs/spanner_v1/types.rst +++ b/docs/spanner_v1/types.rst @@ -3,3 +3,4 @@ Types for Google Cloud Spanner v1 API .. automodule:: google.cloud.spanner_v1.types :members: + :show-inheritance: diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto index af440c1a36..db6192bc02 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto @@ -514,6 +514,11 @@ message UpdateDatabaseDdlMetadata { // succeeded so far, where `commit_timestamps[i]` is the commit // timestamp for the statement `statements[i]`. repeated google.protobuf.Timestamp commit_timestamps = 3; + + // Output only. When true, indicates that the operation is throttled e.g + // due to resource constraints. When resources become available the operation + // will resume and this field will be false again. + bool throttled = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } // The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py index 0f8d56f05a..6e49fadc2d 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -113,6 +113,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -120,6 +122,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -156,6 +159,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py index 45f2e2d9e6..0c652f165e 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -158,6 +158,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -165,6 +167,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -201,6 +204,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index b2b5939f5b..e99d200906 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -287,6 +287,12 @@ class UpdateDatabaseDdlMetadata(proto.Message): Reports the commit timestamps of all statements that have succeeded so far, where ``commit_timestamps[i]`` is the commit timestamp for the statement ``statements[i]``. + throttled (bool): + Output only. When true, indicates that the + operation is throttled e.g due to resource + constraints. When resources become available the + operation will resume and this field will be + false again. """ database = proto.Field(proto.STRING, number=1) @@ -297,6 +303,8 @@ class UpdateDatabaseDdlMetadata(proto.Message): proto.MESSAGE, number=3, message=timestamp.Timestamp, ) + throttled = proto.Field(proto.BOOL, number=4) + class DropDatabaseRequest(proto.Message): r"""The request for diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index a758bb6ad4..8315956a64 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -126,6 +126,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -133,6 +135,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -169,6 +172,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index 91fb40d1e7..2ff6bbac7f 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -171,6 +171,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -178,6 +180,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -214,6 +217,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 5c6f494474..0bcbfcf900 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,9 +28,283 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// TransactionOptions are used to specify different types of transactions. +// # Transactions // -// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction +// +// Each session can have at most one active transaction at a time. After the +// active transaction is completed, the session can immediately be +// re-used for the next transaction. It is not necessary to create a +// new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports three transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// 3. Partitioned DML. This type of transaction is used to execute +// a single Partitioned DML statement. Partitioned DML partitions +// the key space and runs the DML statement over each partition +// in parallel using separate, internal transactions that commit +// independently. Partitioned DML transactions do not need to be +// committed. +// +// For transactions that only read, snapshot read-only transactions +// provide simpler semantics and are almost always faster. In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of not +// taking locks, they also do not abort, so retry loops are not needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is externally +// consistent. +// +// Clients should attempt to minimize the amount of time a transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read locks +// active as long as the transaction continues to do reads, and the +// transaction has not been terminated by +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of +// inactivity at the client may cause Cloud Spanner to release a +// transaction's locks and abort it. +// +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL statements followed by +// [Commit][google.spanner.v1.Spanner.Commit]. At any time before +// [Commit][google.spanner.v1.Spanner.Commit], the client can send a +// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the +// transaction. +// +// ### Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees +// that the transaction has not modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other than +// between Cloud Spanner transactions themselves. +// +// ### Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry the +// whole transaction again. To maximize the chances of successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's lock +// priority increases with each consecutive abort, meaning that each +// attempt has a slightly better chance of success than the previous. +// +// Under some circumstances (e.g., many transactions attempting to +// modify the same row(s)), a transaction can abort many times in a +// short period before successfully committing. Thus, it is not a good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time spent +// retrying. +// +// ### Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads or +// SQL queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they +// don't hold on to locks indefinitely. In that case, the commit will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support writes. +// +// Snapshot transactions do not take locks. Instead, they work by +// choosing a Cloud Spanner timestamp, then executing all reads at that +// timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default garbage +// collection policy is generous enough that most applications do not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ### Strong +// +// Strong reads are guaranteed to see the effects of all transactions +// that have committed before the start of the read. Furthermore, all +// rows yielded by a single read are consistent with each other -- if +// any part of the read observes a transaction, all parts of the read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact read +// timestamp. +// +// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. +// +// ### Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done by +// transactions with a larger commit timestamp. They will block until +// all conflicting transactions that may be assigned commit timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than the +// equivalent boundedly stale concurrency modes. On the other hand, +// boundedly stale reads usually return fresher results. +// +// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and +// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. +// +// ### Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see the +// transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of +// which rows will be read, it can only be used with single-use +// read-only transactions. +// +// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and +// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. +// +// ### Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after they +// are one hour old. Because of this, Cloud Spanner cannot perform reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries whose +// timestamp become too old while executing. Reads and SQL queries with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +// +// ## Partitioned DML Transactions +// +// Partitioned DML transactions are used to execute DML statements with a +// different execution strategy that provides different, and often better, +// scalability properties for large, table-wide operations than DML in a +// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, +// should prefer using ReadWrite transactions. +// +// Partitioned DML partitions the keyspace and runs the DML statement on each +// partition in separate, internal transactions. These transactions commit +// automatically when complete, and run independently from one another. +// +// To reduce lock contention, this execution strategy only acquires read locks +// on rows that match the WHERE clause of the statement. Additionally, the +// smaller per-partition transactions hold locks for less time. +// +// That said, Partitioned DML is not a drop-in replacement for standard DML used +// in ReadWrite transactions. +// +// - The DML statement must be fully-partitionable. Specifically, the statement +// must be expressible as the union of many statements which each access only +// a single row of the table. +// +// - The statement is not applied atomically to all rows of the table. Rather, +// the statement is applied atomically to partitions of the table, in +// independent transactions. Secondary index rows are updated atomically +// with the base table rows. +// +// - Partitioned DML does not guarantee exactly-once execution semantics +// against a partition. The statement will be applied at least once to each +// partition. It is strongly recommended that the DML statement should be +// idempotent to avoid unexpected results. For instance, it is potentially +// dangerous to run a statement such as +// `UPDATE table SET column = column + 1` as it could be run multiple times +// against some rows. +// +// - The partitions are committed automatically - there is no support for +// Commit or Rollback. If the call returns an error, or if the client issuing +// the ExecuteSql call dies, it is possible that some rows had the statement +// executed on them successfully. It is also possible that statement was +// never executed against other rows. +// +// - Partitioned DML transactions may only contain the execution of a single +// DML statement via ExecuteSql or ExecuteStreamingSql. +// +// - If any error is encountered during the execution of the partitioned DML +// operation (for instance, a UNIQUE INDEX violation, division by zero, or a +// value that cannot be stored due to schema constraints), then the +// operation is stopped at that point and an error is returned. It is +// possible that at this point, some partitions have been committed (or even +// committed multiple times), and other partitions have not been run at all. +// +// Given the above, Partitioned DML is good fit for large, database-wide, +// operations that are idempotent, such as deleting old rows from a very large +// table. message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/proto/type.proto b/google/cloud/spanner_v1/proto/type.proto index 1b863c0fdf..1e5e5ff313 100644 --- a/google/cloud/spanner_v1/proto/type.proto +++ b/google/cloud/spanner_v1/proto/type.proto @@ -50,7 +50,7 @@ message StructType { // SQL queries, it is the column alias (e.g., `"Word"` in the // query `"SELECT 'hello' AS Word"`), or the column name (e.g., // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some - // columns might have an empty name (e.g., `"SELECT + // columns might have an empty name (e.g., !"SELECT // UPPER(ColName)"`). Note that a query result can contain // multiple fields with the same name. string name = 1; diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index 620a971775..49cabd3896 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -106,6 +106,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -113,6 +115,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -149,6 +152,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 79ab4a1f94..22b5b4c4f6 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -151,6 +151,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -158,6 +160,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -194,6 +197,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 7b50f228e5..305990ddb6 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -29,9 +29,304 @@ class TransactionOptions(proto.Message): - r"""TransactionOptions are used to specify different types of transactions. - - For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction + r"""Transactions + ============ + + Each session can have at most one active transaction at a time. + After the active transaction is completed, the session can + immediately be re-used for the next transaction. It is not necessary + to create a new session for each transaction. + + Transaction Modes + ================= + + Cloud Spanner supports three transaction modes: + + 1. Locking read-write. This type of transaction is the only way to + write data into Cloud Spanner. These transactions rely on + pessimistic locking and, if necessary, two-phase commit. Locking + read-write transactions may abort, requiring the application to + retry. + + 2. Snapshot read-only. This transaction type provides guaranteed + consistency across several reads, but does not allow writes. + Snapshot read-only transactions can be configured to read at + timestamps in the past. Snapshot read-only transactions do not + need to be committed. + + 3. Partitioned DML. This type of transaction is used to execute a + single Partitioned DML statement. Partitioned DML partitions the + key space and runs the DML statement over each partition in + parallel using separate, internal transactions that commit + independently. Partitioned DML transactions do not need to be + committed. + + For transactions that only read, snapshot read-only transactions + provide simpler semantics and are almost always faster. In + particular, read-only transactions do not take locks, so they do not + conflict with read-write transactions. As a consequence of not + taking locks, they also do not abort, so retry loops are not needed. + + Transactions may only read/write data in a single database. They + may, however, read/write data in different tables within that + database. + + Locking Read-Write Transactions + ------------------------------- + + Locking transactions may be used to atomically read-modify-write + data anywhere in a database. This type of transaction is externally + consistent. + + Clients should attempt to minimize the amount of time a transaction + is active. Faster transactions commit with higher probability and + cause less contention. Cloud Spanner attempts to keep read locks + active as long as the transaction continues to do reads, and the + transaction has not been terminated by + [Commit][google.spanner.v1.Spanner.Commit] or + [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of + inactivity at the client may cause Cloud Spanner to release a + transaction's locks and abort it. + + Conceptually, a read-write transaction consists of zero or more + reads or SQL statements followed by + [Commit][google.spanner.v1.Spanner.Commit]. At any time before + [Commit][google.spanner.v1.Spanner.Commit], the client can send a + [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the + transaction. + + Semantics + ~~~~~~~~~ + + Cloud Spanner can commit the transaction if all read locks it + acquired are still valid at commit time, and it is able to acquire + write locks for all writes. Cloud Spanner can abort the transaction + for any reason. If a commit attempt returns ``ABORTED``, Cloud + Spanner guarantees that the transaction has not modified any user + data in Cloud Spanner. + + Unless the transaction commits, Cloud Spanner makes no guarantees + about how long the transaction's locks were held for. It is an error + to use Cloud Spanner locks for any sort of mutual exclusion other + than between Cloud Spanner transactions themselves. + + Retrying Aborted Transactions + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + When a transaction aborts, the application can choose to retry the + whole transaction again. To maximize the chances of successfully + committing the retry, the client should execute the retry in the + same session as the original attempt. The original session's lock + priority increases with each consecutive abort, meaning that each + attempt has a slightly better chance of success than the previous. + + Under some circumstances (e.g., many transactions attempting to + modify the same row(s)), a transaction can abort many times in a + short period before successfully committing. Thus, it is not a good + idea to cap the number of retries a transaction can attempt; + instead, it is better to limit the total amount of wall time spent + retrying. + + Idle Transactions + ~~~~~~~~~~~~~~~~~ + + A transaction is considered idle if it has no outstanding reads or + SQL queries and has not started a read or SQL query within the last + 10 seconds. Idle transactions can be aborted by Cloud Spanner so + that they don't hold on to locks indefinitely. In that case, the + commit will fail with error ``ABORTED``. + + If this behavior is undesirable, periodically executing a simple SQL + query in the transaction (e.g., ``SELECT 1``) prevents the + transaction from becoming idle. + + Snapshot Read-Only Transactions + ------------------------------- + + Snapshot read-only transactions provides a simpler method than + locking read-write transactions for doing several consistent reads. + However, this type of transaction does not support writes. + + Snapshot transactions do not take locks. Instead, they work by + choosing a Cloud Spanner timestamp, then executing all reads at that + timestamp. Since they do not acquire locks, they do not block + concurrent read-write transactions. + + Unlike locking read-write transactions, snapshot read-only + transactions never abort. They can fail if the chosen read timestamp + is garbage collected; however, the default garbage collection policy + is generous enough that most applications do not need to worry about + this in practice. + + Snapshot read-only transactions do not need to call + [Commit][google.spanner.v1.Spanner.Commit] or + [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not + permitted to do so). + + To execute a snapshot transaction, the client specifies a timestamp + bound, which tells Cloud Spanner how to choose a read timestamp. + + The types of timestamp bound are: + + - Strong (the default). + - Bounded staleness. + - Exact staleness. + + If the Cloud Spanner database to be read is geographically + distributed, stale read-only transactions can execute more quickly + than strong or read-write transaction, because they are able to + execute far from the leader replica. + + Each type of timestamp bound is discussed in detail below. + + Strong + ~~~~~~ + + Strong reads are guaranteed to see the effects of all transactions + that have committed before the start of the read. Furthermore, all + rows yielded by a single read are consistent with each other -- if + any part of the read observes a transaction, all parts of the read + see the transaction. + + Strong reads are not repeatable: two consecutive strong read-only + transactions might return inconsistent results if there are + concurrent writes. If consistency across reads is required, the + reads should be executed within a transaction or at an exact read + timestamp. + + See + [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. + + Exact Staleness + ~~~~~~~~~~~~~~~ + + These timestamp bounds execute reads at a user-specified timestamp. + Reads at a timestamp are guaranteed to see a consistent prefix of + the global transaction history: they observe modifications done by + all transactions with a commit timestamp <= the read timestamp, and + observe none of the modifications done by transactions with a larger + commit timestamp. They will block until all conflicting transactions + that may be assigned commit timestamps <= the read timestamp have + finished. + + The timestamp can either be expressed as an absolute Cloud Spanner + commit timestamp or a staleness relative to the current time. + + These modes do not require a "negotiation phase" to pick a + timestamp. As a result, they execute slightly faster than the + equivalent boundedly stale concurrency modes. On the other hand, + boundedly stale reads usually return fresher results. + + See + [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] + and + [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. + + Bounded Staleness + ~~~~~~~~~~~~~~~~~ + + Bounded staleness modes allow Cloud Spanner to pick the read + timestamp, subject to a user-provided staleness bound. Cloud Spanner + chooses the newest timestamp within the staleness bound that allows + execution of the reads at the closest available replica without + blocking. + + All rows yielded are consistent with each other -- if any part of + the read observes a transaction, all parts of the read see the + transaction. Boundedly stale reads are not repeatable: two stale + reads, even if they use the same staleness bound, can execute at + different timestamps and thus return inconsistent results. + + Boundedly stale reads execute in two phases: the first phase + negotiates a timestamp among all replicas needed to serve the read. + In the second phase, reads are executed at the negotiated timestamp. + + As a result of the two phase execution, bounded staleness reads are + usually a little slower than comparable exact staleness reads. + However, they are typically able to return fresher results, and are + more likely to execute at the closest replica. + + Because the timestamp negotiation requires up-front knowledge of + which rows will be read, it can only be used with single-use + read-only transactions. + + See + [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] + and + [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. + + Old Read Timestamps and Garbage Collection + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Cloud Spanner continuously garbage collects deleted and overwritten + data in the background to reclaim storage space. This process is + known as "version GC". By default, version GC reclaims versions + after they are one hour old. Because of this, Cloud Spanner cannot + perform reads at read timestamps more than one hour in the past. + This restriction also applies to in-progress reads and/or SQL + queries whose timestamp become too old while executing. Reads and + SQL queries with too-old read timestamps fail with the error + ``FAILED_PRECONDITION``. + + Partitioned DML Transactions + ---------------------------- + + Partitioned DML transactions are used to execute DML statements with + a different execution strategy that provides different, and often + better, scalability properties for large, table-wide operations than + DML in a ReadWrite transaction. Smaller scoped statements, such as + an OLTP workload, should prefer using ReadWrite transactions. + + Partitioned DML partitions the keyspace and runs the DML statement + on each partition in separate, internal transactions. These + transactions commit automatically when complete, and run + independently from one another. + + To reduce lock contention, this execution strategy only acquires + read locks on rows that match the WHERE clause of the statement. + Additionally, the smaller per-partition transactions hold locks for + less time. + + That said, Partitioned DML is not a drop-in replacement for standard + DML used in ReadWrite transactions. + + - The DML statement must be fully-partitionable. Specifically, the + statement must be expressible as the union of many statements + which each access only a single row of the table. + + - The statement is not applied atomically to all rows of the table. + Rather, the statement is applied atomically to partitions of the + table, in independent transactions. Secondary index rows are + updated atomically with the base table rows. + + - Partitioned DML does not guarantee exactly-once execution + semantics against a partition. The statement will be applied at + least once to each partition. It is strongly recommended that the + DML statement should be idempotent to avoid unexpected results. + For instance, it is potentially dangerous to run a statement such + as ``UPDATE table SET column = column + 1`` as it could be run + multiple times against some rows. + + - The partitions are committed automatically - there is no support + for Commit or Rollback. If the call returns an error, or if the + client issuing the ExecuteSql call dies, it is possible that some + rows had the statement executed on them successfully. It is also + possible that statement was never executed against other rows. + + - Partitioned DML transactions may only contain the execution of a + single DML statement via ExecuteSql or ExecuteStreamingSql. + + - If any error is encountered during the execution of the + partitioned DML operation (for instance, a UNIQUE INDEX + violation, division by zero, or a value that cannot be stored due + to schema constraints), then the operation is stopped at that + point and an error is returned. It is possible that at this + point, some partitions have been committed (or even committed + multiple times), and other partitions have not been run at all. + + Given the above, Partitioned DML is good fit for large, + database-wide, operations that are idempotent, such as deleting old + rows from a very large table. Attributes: read_write (~.transaction.TransactionOptions.ReadWrite): diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index 19a0ffe5be..16f79ea8ef 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -97,7 +97,7 @@ class Field(proto.Message): the query ``"SELECT 'hello' AS Word"``), or the column name (e.g., ``"ColName"`` in the query ``"SELECT ColName FROM Table"``). Some columns might have an - empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a + empty name (e.g., !"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. type_ (~.gs_type.Type): The type of the field. diff --git a/scripts/fixup_spanner_admin_database_v1_keywords.py b/scripts/fixup_spanner_admin_database_v1_keywords.py index 9f1a9bb9f1..96334a9f32 100644 --- a/scripts/fixup_spanner_admin_database_v1_keywords.py +++ b/scripts/fixup_spanner_admin_database_v1_keywords.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC diff --git a/scripts/fixup_spanner_admin_instance_v1_keywords.py b/scripts/fixup_spanner_admin_instance_v1_keywords.py index 0871592c96..eb5507ec97 100644 --- a/scripts/fixup_spanner_admin_instance_v1_keywords.py +++ b/scripts/fixup_spanner_admin_instance_v1_keywords.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index 7c83aaf33d..bb76ae0e8c 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC diff --git a/synth.metadata b/synth.metadata index bba4518649..e7a5def667 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,8 +3,16 @@ { "git": { "name": ".", - "remote": "git@github.com:larkee/python-spanner.git", - "sha": "1d3e65af688c31937b0110223679607c19c328e9" + "remote": "https://github.com/googleapis/python-spanner.git", + "sha": "af5a3c65fbf81a93c1b4d4a8a9f65f06e96df325" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "53eb2512a55caabcbad1898225080a2a3dfcb6aa", + "internalRef": "346818879" } }, { @@ -50,5 +58,144 @@ "generator": "bazel" } } + ], + "generatedFiles": [ + ".flake8", + ".github/CONTRIBUTING.md", + ".github/ISSUE_TEMPLATE/bug_report.md", + ".github/ISSUE_TEMPLATE/feature_request.md", + ".github/ISSUE_TEMPLATE/support_request.md", + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/release-please.yml", + ".github/snippet-bot.yml", + ".gitignore", + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/populate-secrets.sh", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh", + ".trampolinerc", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.rst", + "LICENSE", + "MANIFEST.in", + "docs/_static/custom.css", + "docs/_templates/layout.html", + "docs/conf.py", + "docs/multiprocessing.rst", + "docs/spanner_admin_database_v1/services.rst", + "docs/spanner_admin_database_v1/types.rst", + "docs/spanner_admin_instance_v1/services.rst", + "docs/spanner_admin_instance_v1/types.rst", + "docs/spanner_v1/services.rst", + "docs/spanner_v1/types.rst", + "google/cloud/spanner_admin_database_v1/__init__.py", + "google/cloud/spanner_admin_database_v1/proto/backup.proto", + "google/cloud/spanner_admin_database_v1/proto/common.proto", + "google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto", + "google/cloud/spanner_admin_database_v1/py.typed", + "google/cloud/spanner_admin_database_v1/services/__init__.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/client.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py", + "google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py", + "google/cloud/spanner_admin_database_v1/types/__init__.py", + "google/cloud/spanner_admin_database_v1/types/backup.py", + "google/cloud/spanner_admin_database_v1/types/common.py", + "google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py", + "google/cloud/spanner_admin_instance_v1/__init__.py", + "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto", + "google/cloud/spanner_admin_instance_v1/py.typed", + "google/cloud/spanner_admin_instance_v1/services/__init__.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/__init__.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py", + "google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py", + "google/cloud/spanner_admin_instance_v1/types/__init__.py", + "google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py", + "google/cloud/spanner_v1/proto/keys.proto", + "google/cloud/spanner_v1/proto/mutation.proto", + "google/cloud/spanner_v1/proto/query_plan.proto", + "google/cloud/spanner_v1/proto/result_set.proto", + "google/cloud/spanner_v1/proto/spanner.proto", + "google/cloud/spanner_v1/proto/transaction.proto", + "google/cloud/spanner_v1/proto/type.proto", + "google/cloud/spanner_v1/py.typed", + "google/cloud/spanner_v1/services/__init__.py", + "google/cloud/spanner_v1/services/spanner/__init__.py", + "google/cloud/spanner_v1/services/spanner/async_client.py", + "google/cloud/spanner_v1/services/spanner/client.py", + "google/cloud/spanner_v1/services/spanner/pagers.py", + "google/cloud/spanner_v1/services/spanner/transports/__init__.py", + "google/cloud/spanner_v1/services/spanner/transports/base.py", + "google/cloud/spanner_v1/services/spanner/transports/grpc.py", + "google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py", + "google/cloud/spanner_v1/types/__init__.py", + "google/cloud/spanner_v1/types/keys.py", + "google/cloud/spanner_v1/types/mutation.py", + "google/cloud/spanner_v1/types/query_plan.py", + "google/cloud/spanner_v1/types/result_set.py", + "google/cloud/spanner_v1/types/spanner.py", + "google/cloud/spanner_v1/types/transaction.py", + "google/cloud/spanner_v1/types/type.py", + "renovate.json", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/samples/noxfile.py", + "scripts/decrypt-secrets.sh", + "scripts/fixup_spanner_admin_database_v1_keywords.py", + "scripts/fixup_spanner_admin_instance_v1_keywords.py", + "scripts/fixup_spanner_v1_keywords.py", + "scripts/readme-gen/readme_gen.py", + "scripts/readme-gen/templates/README.tmpl.rst", + "scripts/readme-gen/templates/auth.tmpl.rst", + "scripts/readme-gen/templates/auth_api_key.tmpl.rst", + "scripts/readme-gen/templates/install_deps.tmpl.rst", + "scripts/readme-gen/templates/install_portaudio.tmpl.rst", + "setup.cfg", + "testing/.gitignore", + "tests/unit/gapic/spanner_admin_database_v1/__init__.py", + "tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py", + "tests/unit/gapic/spanner_admin_instance_v1/__init__.py", + "tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py", + "tests/unit/gapic/spanner_v1/__init__.py", + "tests/unit/gapic/spanner_v1/test_spanner.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index ea79f63e86..753e8f330e 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -4725,6 +4725,7 @@ def test_database_admin_grpc_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None def test_database_admin_grpc_asyncio_transport_channel(): @@ -4736,6 +4737,7 @@ def test_database_admin_grpc_asyncio_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( @@ -4784,6 +4786,7 @@ def test_database_admin_transport_channel_mtls_with_client_cert_source(transport quota_project_id=None, ) assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index 0db8185b79..cca2e25400 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -3082,6 +3082,7 @@ def test_instance_admin_grpc_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None def test_instance_admin_grpc_asyncio_transport_channel(): @@ -3093,6 +3094,7 @@ def test_instance_admin_grpc_asyncio_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( @@ -3141,6 +3143,7 @@ def test_instance_admin_transport_channel_mtls_with_client_cert_source(transport quota_project_id=None, ) assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index d891f27d94..7767ae5141 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -3190,6 +3190,7 @@ def test_spanner_grpc_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None def test_spanner_grpc_asyncio_transport_channel(): @@ -3201,6 +3202,7 @@ def test_spanner_grpc_asyncio_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( @@ -3246,6 +3248,7 @@ def test_spanner_transport_channel_mtls_with_client_cert_source(transport_class) quota_project_id=None, ) assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( From 8c442e282d14fdfd8d8993386f959bae5ba8c9b8 Mon Sep 17 00:00:00 2001 From: larkee Date: Fri, 11 Dec 2020 13:16:10 +1100 Subject: [PATCH 2/2] chore: revert breaking docstrings and unneeded bigquery doc ignore --- .gitignore | 1 - .../cloud/spanner_v1/proto/transaction.proto | 278 +--------------- google/cloud/spanner_v1/proto/type.proto | 2 +- google/cloud/spanner_v1/types/transaction.py | 301 +----------------- google/cloud/spanner_v1/types/type.py | 2 +- 5 files changed, 7 insertions(+), 577 deletions(-) diff --git a/.gitignore b/.gitignore index b9daa52f11..ac787a3b95 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ pip-log.txt # Built documentation docs/_build -bigquery/docs/generated docs.metadata # Virtual environment diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 0bcbfcf900..5c6f494474 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,283 +28,9 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// # Transactions +// TransactionOptions are used to specify different types of transactions. // -// -// Each session can have at most one active transaction at a time. After the -// active transaction is completed, the session can immediately be -// re-used for the next transaction. It is not necessary to create a -// new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// ### Semantics -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// ### Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (e.g., many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time spent -// retrying. -// -// ### Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. In that case, the commit will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ### Strong -// -// Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// ### Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// ### Bounded Staleness -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// ### Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. +// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/proto/type.proto b/google/cloud/spanner_v1/proto/type.proto index 1e5e5ff313..1b863c0fdf 100644 --- a/google/cloud/spanner_v1/proto/type.proto +++ b/google/cloud/spanner_v1/proto/type.proto @@ -50,7 +50,7 @@ message StructType { // SQL queries, it is the column alias (e.g., `"Word"` in the // query `"SELECT 'hello' AS Word"`), or the column name (e.g., // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some - // columns might have an empty name (e.g., !"SELECT + // columns might have an empty name (e.g., `"SELECT // UPPER(ColName)"`). Note that a query result can contain // multiple fields with the same name. string name = 1; diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 305990ddb6..7b50f228e5 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -29,304 +29,9 @@ class TransactionOptions(proto.Message): - r"""Transactions - ============ - - Each session can have at most one active transaction at a time. - After the active transaction is completed, the session can - immediately be re-used for the next transaction. It is not necessary - to create a new session for each transaction. - - Transaction Modes - ================= - - Cloud Spanner supports three transaction modes: - - 1. Locking read-write. This type of transaction is the only way to - write data into Cloud Spanner. These transactions rely on - pessimistic locking and, if necessary, two-phase commit. Locking - read-write transactions may abort, requiring the application to - retry. - - 2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. - Snapshot read-only transactions can be configured to read at - timestamps in the past. Snapshot read-only transactions do not - need to be committed. - - 3. Partitioned DML. This type of transaction is used to execute a - single Partitioned DML statement. Partitioned DML partitions the - key space and runs the DML statement over each partition in - parallel using separate, internal transactions that commit - independently. Partitioned DML transactions do not need to be - committed. - - For transactions that only read, snapshot read-only transactions - provide simpler semantics and are almost always faster. In - particular, read-only transactions do not take locks, so they do not - conflict with read-write transactions. As a consequence of not - taking locks, they also do not abort, so retry loops are not needed. - - Transactions may only read/write data in a single database. They - may, however, read/write data in different tables within that - database. - - Locking Read-Write Transactions - ------------------------------- - - Locking transactions may be used to atomically read-modify-write - data anywhere in a database. This type of transaction is externally - consistent. - - Clients should attempt to minimize the amount of time a transaction - is active. Faster transactions commit with higher probability and - cause less contention. Cloud Spanner attempts to keep read locks - active as long as the transaction continues to do reads, and the - transaction has not been terminated by - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of - inactivity at the client may cause Cloud Spanner to release a - transaction's locks and abort it. - - Conceptually, a read-write transaction consists of zero or more - reads or SQL statements followed by - [Commit][google.spanner.v1.Spanner.Commit]. At any time before - [Commit][google.spanner.v1.Spanner.Commit], the client can send a - [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the - transaction. - - Semantics - ~~~~~~~~~ - - Cloud Spanner can commit the transaction if all read locks it - acquired are still valid at commit time, and it is able to acquire - write locks for all writes. Cloud Spanner can abort the transaction - for any reason. If a commit attempt returns ``ABORTED``, Cloud - Spanner guarantees that the transaction has not modified any user - data in Cloud Spanner. - - Unless the transaction commits, Cloud Spanner makes no guarantees - about how long the transaction's locks were held for. It is an error - to use Cloud Spanner locks for any sort of mutual exclusion other - than between Cloud Spanner transactions themselves. - - Retrying Aborted Transactions - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - When a transaction aborts, the application can choose to retry the - whole transaction again. To maximize the chances of successfully - committing the retry, the client should execute the retry in the - same session as the original attempt. The original session's lock - priority increases with each consecutive abort, meaning that each - attempt has a slightly better chance of success than the previous. - - Under some circumstances (e.g., many transactions attempting to - modify the same row(s)), a transaction can abort many times in a - short period before successfully committing. Thus, it is not a good - idea to cap the number of retries a transaction can attempt; - instead, it is better to limit the total amount of wall time spent - retrying. - - Idle Transactions - ~~~~~~~~~~~~~~~~~ - - A transaction is considered idle if it has no outstanding reads or - SQL queries and has not started a read or SQL query within the last - 10 seconds. Idle transactions can be aborted by Cloud Spanner so - that they don't hold on to locks indefinitely. In that case, the - commit will fail with error ``ABORTED``. - - If this behavior is undesirable, periodically executing a simple SQL - query in the transaction (e.g., ``SELECT 1``) prevents the - transaction from becoming idle. - - Snapshot Read-Only Transactions - ------------------------------- - - Snapshot read-only transactions provides a simpler method than - locking read-write transactions for doing several consistent reads. - However, this type of transaction does not support writes. - - Snapshot transactions do not take locks. Instead, they work by - choosing a Cloud Spanner timestamp, then executing all reads at that - timestamp. Since they do not acquire locks, they do not block - concurrent read-write transactions. - - Unlike locking read-write transactions, snapshot read-only - transactions never abort. They can fail if the chosen read timestamp - is garbage collected; however, the default garbage collection policy - is generous enough that most applications do not need to worry about - this in practice. - - Snapshot read-only transactions do not need to call - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not - permitted to do so). - - To execute a snapshot transaction, the client specifies a timestamp - bound, which tells Cloud Spanner how to choose a read timestamp. - - The types of timestamp bound are: - - - Strong (the default). - - Bounded staleness. - - Exact staleness. - - If the Cloud Spanner database to be read is geographically - distributed, stale read-only transactions can execute more quickly - than strong or read-write transaction, because they are able to - execute far from the leader replica. - - Each type of timestamp bound is discussed in detail below. - - Strong - ~~~~~~ - - Strong reads are guaranteed to see the effects of all transactions - that have committed before the start of the read. Furthermore, all - rows yielded by a single read are consistent with each other -- if - any part of the read observes a transaction, all parts of the read - see the transaction. - - Strong reads are not repeatable: two consecutive strong read-only - transactions might return inconsistent results if there are - concurrent writes. If consistency across reads is required, the - reads should be executed within a transaction or at an exact read - timestamp. - - See - [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - - Exact Staleness - ~~~~~~~~~~~~~~~ - - These timestamp bounds execute reads at a user-specified timestamp. - Reads at a timestamp are guaranteed to see a consistent prefix of - the global transaction history: they observe modifications done by - all transactions with a commit timestamp <= the read timestamp, and - observe none of the modifications done by transactions with a larger - commit timestamp. They will block until all conflicting transactions - that may be assigned commit timestamps <= the read timestamp have - finished. - - The timestamp can either be expressed as an absolute Cloud Spanner - commit timestamp or a staleness relative to the current time. - - These modes do not require a "negotiation phase" to pick a - timestamp. As a result, they execute slightly faster than the - equivalent boundedly stale concurrency modes. On the other hand, - boundedly stale reads usually return fresher results. - - See - [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] - and - [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. - - Bounded Staleness - ~~~~~~~~~~~~~~~~~ - - Bounded staleness modes allow Cloud Spanner to pick the read - timestamp, subject to a user-provided staleness bound. Cloud Spanner - chooses the newest timestamp within the staleness bound that allows - execution of the reads at the closest available replica without - blocking. - - All rows yielded are consistent with each other -- if any part of - the read observes a transaction, all parts of the read see the - transaction. Boundedly stale reads are not repeatable: two stale - reads, even if they use the same staleness bound, can execute at - different timestamps and thus return inconsistent results. - - Boundedly stale reads execute in two phases: the first phase - negotiates a timestamp among all replicas needed to serve the read. - In the second phase, reads are executed at the negotiated timestamp. - - As a result of the two phase execution, bounded staleness reads are - usually a little slower than comparable exact staleness reads. - However, they are typically able to return fresher results, and are - more likely to execute at the closest replica. - - Because the timestamp negotiation requires up-front knowledge of - which rows will be read, it can only be used with single-use - read-only transactions. - - See - [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] - and - [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. - - Old Read Timestamps and Garbage Collection - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Cloud Spanner continuously garbage collects deleted and overwritten - data in the background to reclaim storage space. This process is - known as "version GC". By default, version GC reclaims versions - after they are one hour old. Because of this, Cloud Spanner cannot - perform reads at read timestamps more than one hour in the past. - This restriction also applies to in-progress reads and/or SQL - queries whose timestamp become too old while executing. Reads and - SQL queries with too-old read timestamps fail with the error - ``FAILED_PRECONDITION``. - - Partitioned DML Transactions - ---------------------------- - - Partitioned DML transactions are used to execute DML statements with - a different execution strategy that provides different, and often - better, scalability properties for large, table-wide operations than - DML in a ReadWrite transaction. Smaller scoped statements, such as - an OLTP workload, should prefer using ReadWrite transactions. - - Partitioned DML partitions the keyspace and runs the DML statement - on each partition in separate, internal transactions. These - transactions commit automatically when complete, and run - independently from one another. - - To reduce lock contention, this execution strategy only acquires - read locks on rows that match the WHERE clause of the statement. - Additionally, the smaller per-partition transactions hold locks for - less time. - - That said, Partitioned DML is not a drop-in replacement for standard - DML used in ReadWrite transactions. - - - The DML statement must be fully-partitionable. Specifically, the - statement must be expressible as the union of many statements - which each access only a single row of the table. - - - The statement is not applied atomically to all rows of the table. - Rather, the statement is applied atomically to partitions of the - table, in independent transactions. Secondary index rows are - updated atomically with the base table rows. - - - Partitioned DML does not guarantee exactly-once execution - semantics against a partition. The statement will be applied at - least once to each partition. It is strongly recommended that the - DML statement should be idempotent to avoid unexpected results. - For instance, it is potentially dangerous to run a statement such - as ``UPDATE table SET column = column + 1`` as it could be run - multiple times against some rows. - - - The partitions are committed automatically - there is no support - for Commit or Rollback. If the call returns an error, or if the - client issuing the ExecuteSql call dies, it is possible that some - rows had the statement executed on them successfully. It is also - possible that statement was never executed against other rows. - - - Partitioned DML transactions may only contain the execution of a - single DML statement via ExecuteSql or ExecuteStreamingSql. - - - If any error is encountered during the execution of the - partitioned DML operation (for instance, a UNIQUE INDEX - violation, division by zero, or a value that cannot be stored due - to schema constraints), then the operation is stopped at that - point and an error is returned. It is possible that at this - point, some partitions have been committed (or even committed - multiple times), and other partitions have not been run at all. - - Given the above, Partitioned DML is good fit for large, - database-wide, operations that are idempotent, such as deleting old - rows from a very large table. + r"""TransactionOptions are used to specify different types of transactions. + + For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction Attributes: read_write (~.transaction.TransactionOptions.ReadWrite): diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index 16f79ea8ef..19a0ffe5be 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -97,7 +97,7 @@ class Field(proto.Message): the query ``"SELECT 'hello' AS Word"``), or the column name (e.g., ``"ColName"`` in the query ``"SELECT ColName FROM Table"``). Some columns might have an - empty name (e.g., !"SELECT UPPER(ColName)"`). Note that a + empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. type_ (~.gs_type.Type): The type of the field.