feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper library for this command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

View File

@@ -0,0 +1,355 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable app-profiles API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import exceptions
def Describe(app_profile_ref):
"""Describe an app profile.
Args:
app_profile_ref: A resource reference to the app profile to describe.
Returns:
App profile resource object.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesAppProfilesGetRequest(
name=app_profile_ref.RelativeName()
)
return client.projects_instances_appProfiles.Get(msg)
def List(instance_ref):
"""List app profiles.
Args:
instance_ref: A resource reference of the instance to list app profiles for.
Returns:
Generator of app profile resource objects.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesAppProfilesListRequest(
parent=instance_ref.RelativeName()
)
return list_pager.YieldFromList(
client.projects_instances_appProfiles,
msg,
field='appProfiles',
batch_size_attribute=None,
)
def Delete(app_profile_ref, force=False):
"""Delete an app profile.
Args:
app_profile_ref: A resource reference to the app profile to delete.
force: bool, Whether to ignore API warnings and delete forcibly.
Returns:
Empty response.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesAppProfilesDeleteRequest(
name=app_profile_ref.RelativeName(), ignoreWarnings=force
)
return client.projects_instances_appProfiles.Delete(msg)
def _AppProfileChecks(
cluster=None,
multi_cluster=False,
restrict_to=None,
transactional_writes=None,
row_affinity=False,
data_boost=False,
):
"""Create an app profile.
Args:
cluster: string, The cluster id for the new app profile to route to using
single cluster routing.
multi_cluster: bool, Whether this app profile should route to multiple
clusters, instead of single cluster.
restrict_to: list[string] The list of cluster ids for the new app profile to
route to using multi cluster routing.
transactional_writes: bool, Whether this app profile has transactional
writes enabled. This is only possible when using single cluster routing.
row_affinity: bool, Whether to use row affinity sticky routing.
data_boost: bool, If the app profile should use Data Boost Read-only
Isolation.
Raises:
ConflictingArgumentsException:
If both cluster and multi_cluster are present.
If both multi_cluster and transactional_writes are present.
If both cluster and row_affinity are present.
If both cluster and restrict_to are present.
If both multi_cluster and data_boost are present.
If both transactional_writes and data_boost are present.
OneOfArgumentsRequiredException: If neither cluster or multi_cluster are
present.
"""
if multi_cluster and cluster:
raise exceptions.ConflictingArgumentsException('--route-to', '--route-any')
if not multi_cluster and not cluster:
raise exceptions.OneOfArgumentsRequiredException(
['--route-to', '--route-any'],
'Either --route-to or --route-any must be specified.',
)
if multi_cluster and transactional_writes:
raise exceptions.ConflictingArgumentsException(
'--route-any', '--transactional-writes'
)
if cluster and row_affinity:
raise exceptions.ConflictingArgumentsException(
'--route-to', '--row-affinity'
)
if cluster and restrict_to:
raise exceptions.ConflictingArgumentsException(
'--route-to', '--restrict-to'
)
# Data Boost.
if multi_cluster and data_boost:
raise exceptions.ConflictingArgumentsException(
'--route-any', '--data-boost'
)
if transactional_writes and data_boost:
raise exceptions.ConflictingArgumentsException(
'--transactional-writes', '--data-boost'
)
def Create(
app_profile_ref,
cluster=None,
description='',
multi_cluster=False,
restrict_to=None,
transactional_writes=None,
row_affinity=False,
priority=None,
data_boost=False,
data_boost_compute_billing_owner=None,
force=False,
):
"""Create an app profile.
Args:
app_profile_ref: A resource reference of the new app profile.
cluster: string, The cluster id for the new app profile to route to using
single cluster routing.
description: string, A description of the new app profile.
multi_cluster: bool, Whether this app profile should route to multiple
clusters, instead of single cluster.
restrict_to: list[string] The list of cluster ids for the new app profile to
route to using multi cluster routing.
transactional_writes: bool, Whether this app profile has transactional
writes enabled. This is only possible when using single cluster routing.
row_affinity: bool, Whether to use row affinity sticky routing.
priority: string, The request priority of the new app profile.
data_boost: bool, If the app profile should use Standard Isolation.
data_boost_compute_billing_owner: string, The compute billing owner for Data
Boost.
force: bool, Whether to ignore API warnings and create forcibly.
Raises:
ConflictingArgumentsException,
OneOfArgumentsRequiredException:
See _AppProfileChecks(...)
Returns:
Created app profile resource object.
"""
_AppProfileChecks(
cluster=cluster,
multi_cluster=multi_cluster,
restrict_to=restrict_to,
transactional_writes=transactional_writes,
row_affinity=row_affinity,
data_boost=data_boost,
)
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
instance_ref = app_profile_ref.Parent()
multi_cluster_routing = None
single_cluster_routing = None
if multi_cluster:
multi_cluster_routing = msgs.MultiClusterRoutingUseAny(
clusterIds=restrict_to or [],
rowAffinity=msgs.RowAffinity() if row_affinity else None,
)
elif cluster:
single_cluster_routing = msgs.SingleClusterRouting(
clusterId=cluster,
allowTransactionalWrites=transactional_writes,
)
standard_isolation = None
data_boost_isolation = None
if priority:
priority_enum = msgs.StandardIsolation.PriorityValueValuesEnum(priority)
standard_isolation = msgs.StandardIsolation(priority=priority_enum)
elif data_boost:
data_boost_enum = (
msgs.DataBoostIsolationReadOnly.ComputeBillingOwnerValueValuesEnum(
data_boost_compute_billing_owner
)
)
data_boost_isolation = msgs.DataBoostIsolationReadOnly(
computeBillingOwner=data_boost_enum
)
msg = msgs.BigtableadminProjectsInstancesAppProfilesCreateRequest(
appProfile=msgs.AppProfile(
description=description,
multiClusterRoutingUseAny=multi_cluster_routing,
singleClusterRouting=single_cluster_routing,
standardIsolation=standard_isolation,
dataBoostIsolationReadOnly=data_boost_isolation,
),
appProfileId=app_profile_ref.Name(),
parent=instance_ref.RelativeName(),
ignoreWarnings=force,
)
return client.projects_instances_appProfiles.Create(msg)
def Update(
app_profile_ref,
cluster=None,
description='',
multi_cluster=False,
restrict_to=None,
transactional_writes=None,
row_affinity=None,
priority=None,
data_boost=False,
data_boost_compute_billing_owner=None,
force=False,
):
"""Update an app profile.
Args:
app_profile_ref: A resource reference of the app profile to update.
cluster: string, The cluster id for the app profile to route to using single
cluster routing.
description: string, A description of the app profile.
multi_cluster: bool, Whether this app profile should route to multiple
clusters, instead of single cluster.
restrict_to: list[string] The list of cluster IDs for the app profile to
route to using multi cluster routing.
transactional_writes: bool, Whether this app profile has transactional
writes enabled. This is only possible when using single cluster routing.
row_affinity: bool, Whether to use row affinity sticky routing. If None,
then no change should be made.
priority: string, The request priority of the new app profile.
data_boost: bool, If the app profile should use Standard Isolation.
data_boost_compute_billing_owner: string, The compute billing owner for Data
Boost.
force: bool, Whether to ignore API warnings and create forcibly.
Raises:
ConflictingArgumentsException,
OneOfArgumentsRequiredException:
See _AppProfileChecks(...)
Returns:
Long running operation.
"""
_AppProfileChecks(
cluster=cluster,
multi_cluster=multi_cluster,
restrict_to=restrict_to,
transactional_writes=transactional_writes,
row_affinity=row_affinity,
data_boost=data_boost,
)
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
changed_fields = []
app_profile = msgs.AppProfile()
if cluster:
changed_fields.append('singleClusterRouting.clusterId')
if transactional_writes is not None:
changed_fields.append('singleClusterRouting.allowTransactionalWrites')
app_profile.singleClusterRouting = msgs.SingleClusterRouting(
clusterId=cluster,
allowTransactionalWrites=transactional_writes,
)
elif multi_cluster:
if restrict_to:
changed_fields.append('multiClusterRoutingUseAny.clusterIds')
if row_affinity is not None:
changed_fields.append('multiClusterRoutingUseAny.rowAffinity')
app_profile.multiClusterRoutingUseAny = msgs.MultiClusterRoutingUseAny(
clusterIds=restrict_to or [],
rowAffinity=msgs.RowAffinity() if row_affinity else None,
)
# If the only update is from single cluster to default multi cluster config,
# then set the field mask to be the entire proto message.
if (
app_profile.multiClusterRoutingUseAny
== msgs.MultiClusterRoutingUseAny()
):
changed_fields.append('multiClusterRoutingUseAny')
if description:
changed_fields.append('description')
app_profile.description = description
if priority:
priority_enum = msgs.StandardIsolation.PriorityValueValuesEnum(priority)
changed_fields.append('standardIsolation.priority')
app_profile.standardIsolation = msgs.StandardIsolation(
priority=priority_enum
)
elif data_boost:
data_boost_enum = (
msgs.DataBoostIsolationReadOnly.ComputeBillingOwnerValueValuesEnum(
data_boost_compute_billing_owner
)
)
changed_fields.append('dataBoostIsolationReadOnly')
app_profile.dataBoostIsolationReadOnly = msgs.DataBoostIsolationReadOnly(
computeBillingOwner=data_boost_enum
)
msg = msgs.BigtableadminProjectsInstancesAppProfilesPatchRequest(
appProfile=app_profile,
name=app_profile_ref.RelativeName(),
updateMask=','.join(changed_fields),
ignoreWarnings=force,
)
return client.projects_instances_appProfiles.Patch(msg)

View File

@@ -0,0 +1,502 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable authorized views API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import binascii
import copy
import io
import json
import textwrap
from apitools.base.py import encoding
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.api_lib.util import exceptions
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_diff
from googlecloudsdk.core.util import edit
import six
CREATE_HELP = textwrap.dedent("""\
To create an authorized view, specify a JSON or YAML formatted
representation of a valid authorized view protobuf.
Lines beginning with "#" are ignored.
Example:
{
"subsetView":
{
"rowPrefixes": ["store1#"],
"familySubsets":
{
"column_family_name":
{
"qualifiers":["address"],
"qualifierPrefixes":["tel"]
}
}
},
"deletionProtection": true
}
""")
UPDATE_HELP = textwrap.dedent("""\
Please pecify a JSON or YAML formatted representation of the new authorized
view. Lines beginning with "#" are ignored.
Example:
{
"subsetView":
{
"rowPrefixes": ["store1#"],
"familySubsets":
{
"column_family_name":
{
"qualifiers":["address"],
"qualifierPrefixes":["tel"]
}
}
},
"deletionProtection": true
}
Current authorized view:
""")
def ModifyCreateAuthorizedViewRequest(unused_ref, args, req):
"""Parse argument and construct create authorized view request.
Args:
unused_ref: the gcloud resource (unused).
args: input arguments.
req: the real request to be sent to backend service.
Returns:
The real request to be sent to backend service.
"""
if args.definition_file:
req.authorizedView = ParseAuthorizedViewFromYamlOrJsonDefinitionFile(
args.definition_file, args.pre_encoded
)
else:
# If no definition_file is provided, EDITOR will be executed with a
# commented prompt for the user to fill out the authorized view definition.
req.authorizedView = PromptForAuthorizedViewDefinition(
is_create=True, pre_encoded=args.pre_encoded
)
# The name field should be ignored and omitted from the request as it is
# taken from the command line.
req.authorizedView.name = None
# By specifying the request_id_field for the authorized view resource in the
# declarative yaml file, the req.authorizedViewId and the req.parent will be
# automatically mapped.
return req
def PromptForAuthorizedViewDefinition(
is_create, pre_encoded, current_authorized_view=None
):
"""Prompt user to fill out a JSON/YAML format representation of an authorized view.
Returns the parsed authorized view proto message from user's response.
Args:
is_create: True if the prompt is for creating an authorized view. False if
the prompt is for updating an authorized view.
pre_encoded: True if all binary fields in the authorized view definition are
already Base64-encoded. We skip the step of applying Base64 encoding in
this case.
current_authorized_view: The current authorized view definition. Only used
in the update case to be included as part of the initial commented prompt.
Returns:
an authorized view proto message with fields filled accordingly.
Raises:
ChildProcessError if the user did not save the temporary file.
ChildProcessError if there is a problem running the editor.
ValueError if the user's response does not follow YAML or JSON format.
ValueError if the YAML/JSON object cannot be parsed as a valid authorized
View.
"""
authorized_view_message_type = util.GetAdminMessages().AuthorizedView
if is_create:
help_text = BuildCreateAuthorizedViewFileContents()
else:
help_text = BuildUpdateAuthorizedViewFileContents(
current_authorized_view, pre_encoded
)
try:
content = edit.OnlineEdit(help_text)
except edit.NoSaveException:
raise ChildProcessError("Edit aborted by user.")
except edit.EditorException as e:
raise ChildProcessError(
"There was a problem applying your changes. [{0}].".format(e)
)
try:
authorized_view_to_parse = yaml.load(content)
if not pre_encoded:
Base64EncodingYamlAuthorizedViewDefinition(authorized_view_to_parse)
authorized_view = encoding.PyValueToMessage(
authorized_view_message_type, authorized_view_to_parse
)
except yaml.YAMLParseError as e:
raise ValueError(
"Provided response is not a properly formatted YAML or JSON file."
" [{0}].".format(e)
)
except AttributeError as e:
raise ValueError(
"Provided response cannot be parsed as a valid authorized view. [{0}]."
.format(e)
)
return authorized_view
def ParseAuthorizedViewFromYamlOrJsonDefinitionFile(file_path, pre_encoded):
"""Create an authorized view proto message from a YAML or JSON formatted definition file.
Args:
file_path: Path to the YAML or JSON definition file.
pre_encoded: True if all binary fields in the authorized view definition are
already Base64-encoded. We skip the step of applying Base64 encoding in
this case.
Returns:
an authorized view proto message with fields filled accordingly.
Raises:
BadArgumentException if the file cannot be read.
BadArgumentException if the file does not follow YAML or JSON format.
ValueError if the YAML/JSON object cannot be parsed as a valid authorized
view.
"""
authorized_view_message_type = util.GetAdminMessages().AuthorizedView
try:
authorized_view_to_parse = yaml.load_path(file_path)
if not pre_encoded:
Base64EncodingYamlAuthorizedViewDefinition(authorized_view_to_parse)
authorized_view = encoding.PyValueToMessage(
authorized_view_message_type, authorized_view_to_parse
)
except (yaml.FileLoadError, yaml.YAMLParseError) as e:
raise calliope_exceptions.BadArgumentException("--definition-file", e)
except AttributeError as e:
raise ValueError(
"File [{0}] cannot be parsed as a valid authorized view. [{1}].".format(
file_path, e
)
)
return authorized_view
def Base64EncodingYamlAuthorizedViewDefinition(yaml_authorized_view):
"""Apply base64 encoding to all binary fields in the authorized view definition in YAML format."""
if not yaml_authorized_view or "subsetView" not in yaml_authorized_view:
return yaml_authorized_view
yaml_subset_view = yaml_authorized_view["subsetView"]
if "rowPrefixes" in yaml_subset_view:
yaml_subset_view["rowPrefixes"] = [
Utf8ToBase64(s) for s in yaml_subset_view.get("rowPrefixes", [])
]
if "familySubsets" in yaml_subset_view:
for family_name, family_subset in yaml_subset_view["familySubsets"].items():
if "qualifiers" in family_subset:
family_subset["qualifiers"] = [
Utf8ToBase64(s) for s in family_subset.get("qualifiers", [])
]
if "qualifierPrefixes" in family_subset:
family_subset["qualifierPrefixes"] = [
Utf8ToBase64(s) for s in family_subset.get("qualifierPrefixes", [])
]
yaml_subset_view["familySubsets"][family_name] = family_subset
return yaml_authorized_view
def Base64DecodingYamlAuthorizedViewDefinition(yaml_authorized_view):
"""Apply base64 decoding to all binary fields in the authorized view definition in YAML format."""
if not yaml_authorized_view or "subsetView" not in yaml_authorized_view:
return yaml_authorized_view
yaml_subset_view = yaml_authorized_view["subsetView"]
if "rowPrefixes" in yaml_subset_view:
yaml_subset_view["rowPrefixes"] = [
Base64ToUtf8(s) for s in yaml_subset_view.get("rowPrefixes", [])
]
if "familySubsets" in yaml_subset_view:
for family_name, family_subset in yaml_subset_view["familySubsets"].items():
if "qualifiers" in family_subset:
family_subset["qualifiers"] = [
Base64ToUtf8(s) for s in family_subset.get("qualifiers", [])
]
if "qualifierPrefixes" in family_subset:
family_subset["qualifierPrefixes"] = [
Base64ToUtf8(s) for s in family_subset.get("qualifierPrefixes", [])
]
yaml_subset_view["familySubsets"][family_name] = family_subset
return yaml_authorized_view
def Utf8ToBase64(s):
"""Encode a utf-8 string as a base64 string."""
return six.ensure_text(base64.b64encode(six.ensure_binary(s)))
def Base64ToUtf8(s):
"""Decode a base64 string as a utf-8 string."""
try:
return six.ensure_text(base64.b64decode(s))
except (TypeError, binascii.Error) as error:
raise ValueError(
"Error decoding base64 string [{0}] in the current authorized view"
" definition into utf-8. [{1}].".format(s, error)
)
def CheckOnlyAsciiCharactersInAuthorizedView(authorized_view):
"""Raises a ValueError if the view contains non-ascii characters."""
if authorized_view is None or authorized_view.subsetView is None:
return
subset_view = authorized_view.subsetView
if subset_view.rowPrefixes is not None:
for row_prefix in subset_view.rowPrefixes:
CheckAscii(row_prefix)
if subset_view.familySubsets is not None:
for additional_property in subset_view.familySubsets.additionalProperties:
family_subset = additional_property.value
for qualifier in family_subset.qualifiers:
CheckAscii(qualifier)
for qualifier_prefix in family_subset.qualifierPrefixes:
CheckAscii(qualifier_prefix)
def CheckAscii(s):
"""Check if a string is ascii."""
try:
s.decode("ascii")
except UnicodeError as error:
raise ValueError(
"Non-ascii characters [{0}] found in the current authorized view"
" definition, please use --pre-encoded instead. [{1}].".format(s, error)
)
def BuildCreateAuthorizedViewFileContents():
"""Builds the help text for creating an authorized view as the initial file content."""
buf = io.StringIO()
for line in CREATE_HELP.splitlines():
buf.write("#")
if line:
buf.write(" ")
buf.write(line)
buf.write("\n")
buf.write("\n")
return buf.getvalue()
def ModifyUpdateAuthorizedViewRequest(original_ref, args, req):
"""Parse argument and construct update authorized view request.
Args:
original_ref: the gcloud resource.
args: input arguments.
req: the real request to be sent to backend service.
Returns:
The real request to be sent to backend service.
"""
current_authorized_view = None
if args.definition_file:
req.authorizedView = ParseAuthorizedViewFromYamlOrJsonDefinitionFile(
args.definition_file, args.pre_encoded
)
else:
# If no definition_file is provided, EDITOR will be executed with a
# commented prompt for the user to fill out the authorized view definition.
current_authorized_view = GetCurrentAuthorizedView(
original_ref.RelativeName(), not args.pre_encoded
)
req.authorizedView = PromptForAuthorizedViewDefinition(
is_create=False,
pre_encoded=args.pre_encoded,
current_authorized_view=current_authorized_view,
)
if req.authorizedView.subsetView is not None:
req = AddFieldToUpdateMask("subset_view", req)
if req.authorizedView.deletionProtection is not None:
req = AddFieldToUpdateMask("deletion_protection", req)
if args.interactive:
if current_authorized_view is None:
current_authorized_view = GetCurrentAuthorizedView(
original_ref.RelativeName(), check_ascii=False
)
# This essentially merges the requested authorized view to the original
# authorized view based on the update mask.
new_authorized_view = copy.deepcopy(current_authorized_view)
if req.authorizedView.subsetView is not None:
new_authorized_view.subsetView = req.authorizedView.subsetView
if req.authorizedView.deletionProtection is not None:
new_authorized_view.deletionProtection = (
req.authorizedView.deletionProtection
)
# Get the diff between the original authorized view and the new authorized
# view.
buf = io.StringIO()
differ = resource_diff.ResourceDiff(
original=current_authorized_view, changed=new_authorized_view
)
differ.Print("default", out=buf)
if buf.getvalue():
console_io.PromptContinue(
message=(
"Difference between the current authorized view and the new"
" authorized view:\n"
)
+ buf.getvalue(),
cancel_on_no=True,
)
else:
console_io.PromptContinue(
message="The authorized view will NOT change with this update.",
cancel_on_no=True,
)
# The name field should be ignored and omitted from the request as it is
# taken from the command line.
req.authorizedView.name = None
if args.ignore_warnings:
req.ignoreWarnings = True
return req
def GetCurrentAuthorizedView(authorized_view_name, check_ascii):
"""Get the current authorized view resource object given the authorized view name.
Args:
authorized_view_name: The name of the authorized view.
check_ascii: True if we should check to make sure that the returned
authorized view contains only ascii characters.
Returns:
The view resource object.
Raises:
ValueError if check_ascii is true and the current authorized view definition
contains invalid non-ascii characters.
"""
client = util.GetAdminClient()
request = util.GetAdminMessages().BigtableadminProjectsInstancesTablesAuthorizedViewsGetRequest(
name=authorized_view_name
)
try:
authorized_view = client.projects_instances_tables_authorizedViews.Get(
request
)
if check_ascii:
CheckOnlyAsciiCharactersInAuthorizedView(authorized_view)
return authorized_view
except api_exceptions.HttpError as error:
raise exceptions.HttpException(error)
def SerializeToJsonOrYaml(
authorized_view, pre_encoded, serialized_format="json"
):
"""Serializes a authorized view protobuf to either JSON or YAML."""
authorized_view_dict = encoding.MessageToDict(authorized_view)
if not pre_encoded:
authorized_view_dict = Base64DecodingYamlAuthorizedViewDefinition(
authorized_view_dict
)
if serialized_format == "json":
return six.text_type(json.dumps(authorized_view_dict, indent=2))
if serialized_format == "yaml":
return six.text_type(yaml.dump(authorized_view_dict))
def BuildUpdateAuthorizedViewFileContents(current_authorized_view, pre_encoded):
"""Builds the help text for updating an existing authorized view.
Args:
current_authorized_view: The current authorized view resource object.
pre_encoded: When pre_encoded is False, user is passing utf-8 values for
binary fields in the authorized view definition and expecting us to apply
base64 encoding. Thus, we should also display utf-8 values in the help
text, which requires base64 decoding the binary fields in the
`current_authorized_view`.
Returns:
A string containing the help text for update authorized view.
"""
buf = io.StringIO()
for line in UPDATE_HELP.splitlines():
buf.write("#")
if line:
buf.write(" ")
buf.write(line)
buf.write("\n")
serialized_original_authorized_view = SerializeToJsonOrYaml(
current_authorized_view, pre_encoded
)
for line in serialized_original_authorized_view.splitlines():
buf.write("#")
if line:
buf.write(" ")
buf.write(line)
buf.write("\n")
buf.write("\n")
return buf.getvalue()
def AddFieldToUpdateMask(field, req):
"""Adding a new field to the update mask of the UpdateAuthorizedViewRequest.
Args:
field: the field to be updated.
req: the original UpdateAuthorizedViewRequest.
Returns:
req: the UpdateAuthorizedViewRequest with update mask refreshed.
"""
update_mask = req.updateMask
if update_mask:
if update_mask.count(field) == 0:
req.updateMask = update_mask + "," + field
else:
req.updateMask = field
return req

View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable backups API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core.util import times
# General Utils
class NoFieldSpecified(core_exceptions.Error):
"""Error for calling update command with no args that represent fields."""
def ParseExpireTime(expiration_value):
"""Parse flag value into Datetime format for expireTime."""
# expiration_value could be in Datetime format or Duration format.
# backend timezone is UTC.
datetime = times.ParseDuration(expiration_value).GetRelativeDateTime(
times.Now(times.UTC)
)
parsed_datetime = times.FormatDateTime(
datetime, '%Y-%m-%dT%H:%M:%S.%6f%Ez', tzinfo=times.UTC
)
return parsed_datetime
def FormatDatetime(datetime_value: str) -> str:
"""Parse a string datetime value into a formatted string."""
parsed_time = arg_parsers.Datetime.ParseUtcTime(datetime_value)
return parsed_time.strftime('%Y-%m-%dT%H:%M:%SZ')
# TODO: b/353357876 - We can replace both of these flags since we can represent
# both formats using gcloud's arg_parser.
def GetExpireTime(args):
"""Parse flags for expire time."""
if args.expiration_date:
return args.expiration_date
elif args.retention_period:
return ParseExpireTime(args.retention_period)
def GetHotToStandardTime(args):
"""Parse flags for hot to standard time."""
if not args.hot_to_standard_time:
return args.hot_to_standard_time
return FormatDatetime(args.hot_to_standard_time)
# Create Command Utils
def ModifyCreateRequest(backup_ref, args, req):
"""Parse argument and construct create backup request."""
req.backup.sourceTable = f'projects/{backup_ref.projectsId}/instances/{backup_ref.instancesId}/tables/{args.table}'
req.backup.expireTime = GetExpireTime(args)
req.backup.hotToStandardTime = GetHotToStandardTime(args)
req.backupId = args.backup
req.parent = backup_ref.Parent().RelativeName()
return req
# Update Command Utils
def ResetDefaultMaskField(unused_instance_ref, unused_args, req):
req.updateMask = ''
return req
def AddFieldToUpdateMask(field, req):
update_mask = req.updateMask
if update_mask:
if update_mask.count(field) == 0:
req.updateMask = update_mask + ',' + field
else:
req.updateMask = field
return req
def AddBackupFieldsToUpdateMask(unused_backup_ref, args, req):
"""Add backup fields to updateMask in the patch request."""
expire_time = GetExpireTime(args)
if expire_time is not None:
req.backup.expireTime = expire_time
req = AddFieldToUpdateMask('expire_time', req)
hot_to_standard_time = GetHotToStandardTime(args)
if hot_to_standard_time is not None:
req = AddFieldToUpdateMask('hot_to_standard_time', req)
# We don't have to explicitly check if `hot_to_standard_time` is an empty
# string because even though it can also be None, we have already checked
# that it is not None.
#
# `hot_to_standard_time` is a string flag, so this means that we can
# simply check whether the flag is falsy to determine if it is an empty
# string.
#
# An empty string means that the user wants to clear the
# `hot_to_standard_time` field.
if not hot_to_standard_time:
req.backup.hotToStandardTime = None
else:
req.backup.hotToStandardTime = hot_to_standard_time
return req
def CopyBackup(source_backup_ref, destination_backup_ref, args):
"""Copy a backup."""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
copy_backup_request = msgs.CopyBackupRequest(
backupId=destination_backup_ref.Name(),
sourceBackup=source_backup_ref.RelativeName(),
)
copy_backup_request.expireTime = GetExpireTime(args)
req = msgs.BigtableadminProjectsInstancesClustersBackupsCopyRequest(
parent=destination_backup_ref.Parent().RelativeName(),
copyBackupRequest=copy_backup_request,
)
return client.projects_instances_clusters_backups.Copy(req)

View File

@@ -0,0 +1,183 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable clusters API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.bigtable import util
def Delete(cluster_ref):
"""Delete a cluster.
Args:
cluster_ref: A resource reference to the cluster to delete.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
msg = msgs.BigtableadminProjectsInstancesClustersDeleteRequest(
name=cluster_ref.RelativeName())
client.projects_instances_clusters.Delete(msg)
def Create(cluster_ref, cluster):
"""Create a cluster.
Args:
cluster_ref: A resource reference to the cluster to create.
cluster: A Cluster msg object to create.
Returns:
Long running operation.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
msg = msgs.BigtableadminProjectsInstancesClustersCreateRequest(
cluster=cluster,
clusterId=cluster_ref.Name(),
parent=cluster_ref.Parent().RelativeName())
return client.projects_instances_clusters.Create(msg)
def BuildClusterAutoscalingConfig(min_nodes=None,
max_nodes=None,
cpu_target=None,
storage_target=None):
"""Build a ClusterAutoscalingConfig field."""
msgs = util.GetAdminMessages()
limits = msgs.AutoscalingLimits(
minServeNodes=min_nodes, maxServeNodes=max_nodes)
targets = msgs.AutoscalingTargets(
cpuUtilizationPercent=cpu_target,
storageUtilizationGibPerNode=storage_target)
return msgs.ClusterAutoscalingConfig(
autoscalingLimits=limits, autoscalingTargets=targets)
def BuildClusterConfig(autoscaling_min=None,
autoscaling_max=None,
autoscaling_cpu_target=None,
autoscaling_storage_target=None):
"""Build a ClusterConfig field."""
msgs = util.GetAdminMessages()
return msgs.ClusterConfig(
clusterAutoscalingConfig=BuildClusterAutoscalingConfig(
min_nodes=autoscaling_min,
max_nodes=autoscaling_max,
cpu_target=autoscaling_cpu_target,
storage_target=autoscaling_storage_target))
def BuildPartialUpdateClusterRequest(msgs,
name=None,
nodes=None,
autoscaling_min=None,
autoscaling_max=None,
autoscaling_cpu_target=None,
autoscaling_storage_target=None,
update_mask=None):
"""Build a PartialUpdateClusterRequest."""
cluster = msgs.Cluster(name=name, serveNodes=nodes)
if (autoscaling_min is not None or autoscaling_max is not None or
autoscaling_cpu_target is not None or
autoscaling_storage_target is not None):
cluster.clusterConfig = BuildClusterConfig(
autoscaling_min=autoscaling_min,
autoscaling_max=autoscaling_max,
autoscaling_cpu_target=autoscaling_cpu_target,
autoscaling_storage_target=autoscaling_storage_target)
return msgs.BigtableadminProjectsInstancesClustersPartialUpdateClusterRequest(
cluster=cluster, name=name, updateMask=update_mask)
def PartialUpdate(cluster_ref,
nodes=None,
autoscaling_min=None,
autoscaling_max=None,
autoscaling_cpu_target=None,
autoscaling_storage_target=None,
disable_autoscaling=False):
"""Partially update a cluster.
Args:
cluster_ref: A resource reference to the cluster to update.
nodes: int, the number of nodes in this cluster.
autoscaling_min: int, the minimum number of nodes for autoscaling.
autoscaling_max: int, the maximum number of nodes for autoscaling.
autoscaling_cpu_target: int, the target CPU utilization percent for
autoscaling.
autoscaling_storage_target: int, the target storage utilization gibibytes
per node for autoscaling.
disable_autoscaling: bool, True means disable autoscaling if it is currently
enabled. False means change nothing whether it is currently enabled or
not.
Returns:
Long running operation.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
if disable_autoscaling:
if (autoscaling_min is not None or autoscaling_max is not None or
autoscaling_cpu_target is not None or
autoscaling_storage_target is not None):
raise ValueError('autoscaling arguments cannot be set together with '
'disable_autoscaling')
return client.projects_instances_clusters.PartialUpdateCluster(
# To disable autoscaling, set clusterConfig to empty, but include it in
# update_mask.
BuildPartialUpdateClusterRequest(
msgs=msgs,
name=cluster_ref.RelativeName(),
nodes=nodes,
update_mask='serve_nodes,cluster_config.cluster_autoscaling_config'
))
changed_fields = []
if nodes is not None:
changed_fields.append('serve_nodes')
if autoscaling_min is not None:
changed_fields.append(
'cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes'
)
if autoscaling_max is not None:
changed_fields.append(
'cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes'
)
if autoscaling_cpu_target is not None:
changed_fields.append(
'cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent'
)
if autoscaling_storage_target is not None:
changed_fields.append(
'cluster_config.cluster_autoscaling_config.autoscaling_targets.storage_utilization_gib_per_node'
)
update_mask = ','.join(changed_fields)
return client.projects_instances_clusters.PartialUpdateCluster(
BuildPartialUpdateClusterRequest(
msgs=msgs,
name=cluster_ref.RelativeName(),
nodes=nodes,
autoscaling_min=autoscaling_min,
autoscaling_max=autoscaling_max,
autoscaling_cpu_target=autoscaling_cpu_target,
autoscaling_storage_target=autoscaling_storage_target,
update_mask=update_mask))

View File

@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable instance API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.command_lib.iam import iam_util
def Upgrade(instance):
"""Upgrade development instance to production.
Args:
instance: str instance id to upgrade.
Returns:
Operation: the partial update's LRO response.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
instance_ref = util.GetInstanceRef(instance)
instance = msgs.Instance(type=msgs.Instance.TypeValueValuesEnum.PRODUCTION)
return client.projects_instances.PartialUpdateInstance(
msgs.BigtableadminProjectsInstancesPartialUpdateInstanceRequest(
instance=instance,
name=instance_ref.RelativeName(),
updateMask='type'))
def GetIamPolicy(instance_ref):
"""Get IAM policy for a given instance."""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
req = msgs.BigtableadminProjectsInstancesGetIamPolicyRequest(
resource=instance_ref.RelativeName(),
getIamPolicyRequest=msgs.GetIamPolicyRequest(
options=msgs.GetPolicyOptions(requestedPolicyVersion=iam_util
.MAX_LIBRARY_IAM_SUPPORTED_VERSION)))
return client.projects_instances.GetIamPolicy(req)
def SetIamPolicy(instance_ref, policy):
"""Sets the given policy on the instance, overwriting what exists."""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
policy.version = iam_util.MAX_LIBRARY_IAM_SUPPORTED_VERSION
req = msgs.BigtableadminProjectsInstancesSetIamPolicyRequest(
resource=instance_ref.RelativeName(),
setIamPolicyRequest=msgs.SetIamPolicyRequest(policy=policy))
return client.projects_instances.SetIamPolicy(req)

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable logical-views API helper."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.bigtable import util
def Create(logical_view_ref, query, deletion_protection):
"""Create a logical view.
Args:
logical_view_ref: A resource reference to the logical view to create.
query: The query of the logical view.
deletion_protection: The deletion protection of the logical view.
Returns:
Created logical view resource object.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
instance_ref = logical_view_ref.Parent()
logical_view = msgs.LogicalView(query=query)
if deletion_protection is not None:
logical_view.deletionProtection = deletion_protection
msg = msgs.BigtableadminProjectsInstancesLogicalViewsCreateRequest(
logicalView=logical_view,
logicalViewId=logical_view_ref.Name(),
parent=instance_ref.RelativeName(),
)
return client.projects_instances_logicalViews.Create(msg)
def Delete(logical_view_ref):
"""Delete a logical view.
Args:
logical_view_ref: A resource reference to the logical view to delete.
Returns:
Empty response.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesLogicalViewsDeleteRequest(
name=logical_view_ref.RelativeName()
)
return client.projects_instances_logicalViews.Delete(msg)
def Describe(logical_view_ref):
"""Describe a logical view.
Args:
logical_view_ref: A resource reference to the logical view to describe.
Returns:
Logical view resource object.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesLogicalViewsGetRequest(
name=logical_view_ref.RelativeName()
)
return client.projects_instances_logicalViews.Get(msg)
def List(instance_ref):
"""List logical views.
Args:
instance_ref: A resource reference of the instance to list logical views
for.
Returns:
Generator of logical view resource objects.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesLogicalViewsListRequest(
parent=instance_ref.RelativeName()
)
return list_pager.YieldFromList(
client.projects_instances_logicalViews,
msg,
field='logicalViews',
batch_size_attribute=None,
)
def Update(logical_view_ref, query, deletion_protection):
"""Update a logical view.
Args:
logical_view_ref: A resource reference to the logical view to update.
query: The new query of the logical view.
deletion_protection: The new deletion protection of the logical view.
Returns:
Long running operation.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
changed_fields = []
logical_view = msgs.LogicalView()
if query:
changed_fields.append('query')
logical_view.query = query
if deletion_protection is not None:
changed_fields.append('deletion_protection')
logical_view.deletionProtection = deletion_protection
msg = msgs.BigtableadminProjectsInstancesLogicalViewsPatchRequest(
logicalView=logical_view,
name=logical_view_ref.RelativeName(),
updateMask=','.join(changed_fields),
)
return client.projects_instances_logicalViews.Patch(msg)

View File

@@ -0,0 +1,194 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable materialized-views API helper."""
from typing import Generator
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.core import resources
from googlecloudsdk.generated_clients.apis.bigtableadmin.v2 import bigtableadmin_v2_messages
def Describe(
materialized_view_ref: resources.Resource, view: str
) -> bigtableadmin_v2_messages.MaterializedView:
"""Describe a materialized view.
Args:
materialized_view_ref: A resource reference to the materialized view to
describe.
view: Specifies what type of information to return about the view. Valid
values are 'schema', 'replication', and 'full'.
Returns:
materialized view resource object.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
get_materialized_view_request = (
msgs.BigtableadminProjectsInstancesMaterializedViewsGetRequest
)
view_enum_type = get_materialized_view_request.ViewValueValuesEnum
mapper = arg_utils.ChoiceEnumMapper(
'--view',
view_enum_type,
custom_mappings={
'SCHEMA_VIEW': 'schema',
'REPLICATION_VIEW': 'replication',
'FULL': 'full',
},
)
view_enum = mapper.GetEnumForChoice(view)
if view_enum is None:
view_enum = view_enum_type.SCHEMA_VIEW
msg = get_materialized_view_request(
name=materialized_view_ref.RelativeName(), view=view_enum
)
return client.projects_instances_materializedViews.Get(msg)
def Create(
materialized_view_ref: resources.Resource,
query: str,
deletion_protection: bool,
) -> bigtableadmin_v2_messages.MaterializedView:
"""Create a materialized view.
Args:
materialized_view_ref: A resource reference to the materialized view to
create.
query: The query of the materialized view.
deletion_protection: Whether the materialized view is protected from
deletion.
Returns:
Created materialized view resource object.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
instance_ref = materialized_view_ref.Parent()
materialized_view = msgs.MaterializedView(query=query)
if deletion_protection is not None:
materialized_view.deletionProtection = deletion_protection
msg = msgs.BigtableadminProjectsInstancesMaterializedViewsCreateRequest(
materializedView=materialized_view,
materializedViewId=materialized_view_ref.Name(),
parent=instance_ref.RelativeName(),
)
return client.projects_instances_materializedViews.Create(msg)
def Delete(
materialized_view_ref: resources.Resource,
) -> None:
"""Delete a materialized view.
Args:
materialized_view_ref: A resource reference to the materialized view to
delete.
"""
client = util.GetAdminClient()
msg = util.GetAdminMessages().BigtableadminProjectsInstancesMaterializedViewsDeleteRequest(
name=materialized_view_ref.RelativeName()
)
client.projects_instances_materializedViews.Delete(msg)
def List(
instance_ref: resources.Resource,
view: str,
) -> Generator[bigtableadmin_v2_messages.MaterializedView, None, None]:
"""List materialized views.
Args:
instance_ref: A resource reference of the instance to list materialized
views for.
view: Specifies what type of information to return about the views. The only
supported value at this time is 'schema'.
Returns:
Generator of materialized view resource objects.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
list_materialized_view_request = (
msgs.BigtableadminProjectsInstancesMaterializedViewsListRequest
)
view_enum_type = list_materialized_view_request.ViewValueValuesEnum
mapper = arg_utils.ChoiceEnumMapper(
'--view',
view_enum_type,
custom_mappings={
'SCHEMA_VIEW': 'schema',
'REPLICATION_VIEW': 'replication',
'FULL': 'full',
},
)
view_enum = mapper.GetEnumForChoice(view)
# Only the schema view is supported for list materialized views at this time.
if view_enum != view_enum_type.SCHEMA_VIEW:
raise exceptions.BadArgumentException(
view,
"Only 'schema' view is supported for list materialized views.",
)
msg = list_materialized_view_request(
parent=instance_ref.RelativeName(),
view=view_enum,
)
return list_pager.YieldFromList(
client.projects_instances_materializedViews,
msg,
field='materializedViews',
batch_size_attribute=None,
)
def Update(
materialized_view_ref: resources.Resource, deletion_protection: bool
) -> bigtableadmin_v2_messages.MaterializedView:
"""Update a materialized view.
Args:
materialized_view_ref: A resource reference to the materialized view to
update.
deletion_protection: Whether the materialized view is protected from
deletion.
Returns:
Updated materialized view resource object.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
msg = msgs.BigtableadminProjectsInstancesMaterializedViewsPatchRequest(
materializedView=msgs.MaterializedView(
deletionProtection=deletion_protection
),
name=materialized_view_ref.RelativeName(),
updateMask='deletion_protection',
)
return client.projects_instances_materializedViews.Patch(msg)

View File

@@ -0,0 +1,121 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable memory layers API helper."""
from googlecloudsdk.api_lib.bigtable import util
MEMORY_LAYER_SUFFIX = '/memoryLayer'
def Describe(cluster_ref, client=None, msgs=None):
"""Describe a memory layer.
Args:
cluster_ref: A resource reference to the cluster of the memory layer to
describe.
client: The API client.
msgs: The API messages.
Returns:
Memory layer resource object.
"""
if client is None:
client = util.GetAdminClient()
if msgs is None:
msgs = util.GetAdminMessages()
memory_layer_name = cluster_ref.RelativeName() + MEMORY_LAYER_SUFFIX
msg = msgs.BigtableadminProjectsInstancesClustersGetMemoryLayerRequest(
name=memory_layer_name
)
return client.projects_instances_clusters.GetMemoryLayer(msg)
def Update(
cluster_ref,
client=None,
msgs=None,
*,
storage_size_gib=None,
max_request_units_per_second=None,
):
"""Update a memory layer.
Args:
cluster_ref: A resource reference to the cluster to update.
client: The API client.
msgs: The API messages.
storage_size_gib: The storage size of the memory layer in gibibytes.
max_request_units_per_second: The maximum number of request units per second
that can be used by the memory layer.
Returns:
Long running operation.
"""
if client is None:
client = util.GetAdminClient()
if msgs is None:
msgs = util.GetAdminMessages()
memory_layer = msgs.MemoryLayer()
fixed_capacity = msgs.FixedCapacity()
changed_fields = []
if storage_size_gib is not None:
fixed_capacity.storageSizeGib = storage_size_gib
changed_fields.append('memory_config.fixed_capacity.storage_size_gib')
if max_request_units_per_second is not None:
fixed_capacity.maxRequestUnitsPerSecond = max_request_units_per_second
changed_fields.append(
'memory_config.fixed_capacity.max_request_units_per_second'
)
memory_layer.memoryConfig = msgs.MemoryConfig(fixedCapacity=fixed_capacity)
memory_layer_name = cluster_ref.RelativeName() + MEMORY_LAYER_SUFFIX
msg = msgs.BigtableadminProjectsInstancesClustersUpdateMemoryLayerRequest(
memoryLayer=memory_layer,
name=memory_layer_name,
updateMask=','.join(changed_fields),
)
return client.projects_instances_clusters.UpdateMemoryLayer(msg)
def Disable(cluster_ref, client=None, msgs=None):
"""Disable a memory layer.
Args:
cluster_ref: A resource reference to the cluster to disable memory layer on.
client: The API client.
msgs: The API messages.
Returns:
Long running operation.
"""
if client is None:
client = util.GetAdminClient()
if msgs is None:
msgs = util.GetAdminMessages()
memory_layer = msgs.MemoryLayer()
memory_layer_name = cluster_ref.RelativeName() + MEMORY_LAYER_SUFFIX
msg = msgs.BigtableadminProjectsInstancesClustersUpdateMemoryLayerRequest(
memoryLayer=memory_layer,
name=memory_layer_name,
updateMask='memory_config',
)
return client.projects_instances_clusters.UpdateMemoryLayer(msg)

View File

@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable operations API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
def ModifyDescribeRequest(unused_operation_ref, args, req):
"""Check input and construct describe request if needed."""
operation_name = args.operation
# operation_string = operation_name
project_id = properties.VALUES.core.project.Get() # default project id
if operation_name.startswith('operations/projects'):
return req
# Assuming that if the operation name is not complete, it's only missing
# the operations/projects/{} prefix.
operation_name_with_prefix = ('operations/projects/' +
project_id + '/' + operation_name)
req.name = operation_name_with_prefix
return req

View File

@@ -0,0 +1,122 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable schema bundles API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from cloudsdk.google.protobuf import descriptor_pb2
from cloudsdk.google.protobuf import text_format
from googlecloudsdk.calliope import parser_extensions
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
from googlecloudsdk.core.util import files
from googlecloudsdk.generated_clients.apis.bigtableadmin.v2 import bigtableadmin_v2_messages
def ModifyCreateSchemaBundleRequest(
unused_ref: resources.Resource,
args: parser_extensions.Namespace,
req: bigtableadmin_v2_messages.BigtableadminProjectsInstancesTablesSchemaBundlesCreateRequest,
) -> (
bigtableadmin_v2_messages.BigtableadminProjectsInstancesTablesSchemaBundlesCreateRequest
):
"""Parse argument and construct create schema bundle request.
This function is used to modify the create schema bundle request to include
the proto descriptors file content if provided.
Args:
unused_ref: the gcloud resource (unused).
args: input arguments.
req: the real request to be sent to backend service.
Returns:
The modified request to be sent to backend service.
Raises:
ValueError: if the proto descriptors file is invalid.
"""
if args.proto_descriptors_file:
proto_desc_content = files.ReadBinaryFileContents(
args.proto_descriptors_file
)
# Validates that the file contains a valid/parsable FileDescriptorSet.
descriptor_pb2.FileDescriptorSet.FromString(proto_desc_content)
req.schemaBundle.protoSchema.protoDescriptors = proto_desc_content
# By specifying the request_id_field for the schema bundle resource in the
# declarative yaml file, the req.schemaBundleId and the req.parent will be
# automatically mapped, therefore no change regarding them is needed here.
return req
def ModifyUpdateSchemaBundleRequest(
unused_ref: resources.Resource,
args: parser_extensions.Namespace,
req: bigtableadmin_v2_messages.BigtableadminProjectsInstancesTablesSchemaBundlesPatchRequest,
) -> (
bigtableadmin_v2_messages.BigtableadminProjectsInstancesTablesSchemaBundlesPatchRequest
):
"""Parse argument and construct update schema bundle request.
This function is used to modify the update schema bundle request to include
the proto descriptors file content if provided.
Args:
unused_ref: the gcloud resource (unused).
args: input arguments.
req: the real request to be sent to backend service.
Returns:
The modified request to be sent to backend service.
Raises:
ValueError: if the proto descriptors file is invalid.
"""
if args.proto_descriptors_file:
proto_desc_content = files.ReadBinaryFileContents(
args.proto_descriptors_file
)
# Validates that the file contains a valid/parsable FileDescriptorSet.
descriptor_pb2.FileDescriptorSet.FromString(proto_desc_content)
req.schemaBundle.protoSchema.protoDescriptors = proto_desc_content
if args.ignore_warnings:
req.ignoreWarnings = True
return req
def PrintParsedProtoDescriptorsInGetResponse(response, _):
"""Parse the proto descriptors in the Get response and print it.
Args:
response: the response from the backend service.
_: unused.
Returns:
The original response.
"""
if (
response.protoSchema is not None
and response.protoSchema.protoDescriptors is not None
):
descriptors = descriptor_pb2.FileDescriptorSet.FromString(
response.protoSchema.protoDescriptors
)
log.status.Print(text_format.MessageToString(descriptors))
return response

View File

@@ -0,0 +1,689 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable tables API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
from apitools.base.py import encoding
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import times
import six
def ParseSingleRule(rule):
"""Parses GC rules from a rule string.
Args:
rule: A string representing a GC rule, e.g. `maxage=10d`
Returns:
A GcRule object.
Raises:
BadArgumentExpection: the input is mal-formatted.
"""
rule_parts = rule.split('=')
if len(rule_parts) != 2 or not rule_parts[1]:
raise exceptions.BadArgumentException(
'--column-families',
'Invalid union or intersection rule: {0}'.format(rule),
)
if rule_parts[0] == 'maxage':
return util.GetAdminMessages().GcRule(
maxAge=ConvertDurationToSeconds(rule_parts[1], '--column-families')
)
elif rule_parts[0] == 'maxversions':
return util.GetAdminMessages().GcRule(maxNumVersions=int(rule_parts[1]))
else:
raise exceptions.BadArgumentException(
'--column-families',
'Invalid union or intersection rule: {0}'.format(rule),
)
def ParseBinaryRule(rule_list):
"""Parses GC rules from a rule list of 2 elements.
Args:
rule_list: A string list containing 2 elements.
Returns:
A list of GcRule objects.
Raises:
BadArgumentExpection: the input list is mal-formatted.
"""
if len(rule_list) != 2:
# Only support binary rule
raise exceptions.BadArgumentException(
'--column-families',
'Invalid union or intersection rule: ' + ' '.join(rule_list),
)
results = []
for rule in rule_list:
results.append(ParseSingleRule(rule))
return results
def ParseExpr(expr):
"""Parses family name and GC rules from the string expression.
Args:
expr: A string express contains family name and optional GC rules in the
format of `family_name[:gc_rule]`, such as `my_family:maxage=10d`.
Returns:
A family name and a GcRule object defined in the Bigtable admin API.
Raises:
BadArgumentExpection: the input string is mal-formatted.
"""
expr_list = expr.split(':')
family = expr_list[0]
expr_list_len = len(expr_list)
if expr_list_len > 2 or family != family.strip():
raise exceptions.BadArgumentException(
'--column-families',
'Input column family ({0}) is mal-formatted.'.format(expr),
)
# Without GC rules
if expr_list_len == 1:
# No GC rule is allowed
return family, None
# With GC rules
if not expr_list[1]:
raise exceptions.BadArgumentException(
'--column-families',
'Input column family ({0}) is mal-formatted.'.format(expr),
)
gc_rule = expr_list[1]
union_list = gc_rule.split('||')
intersection_list = gc_rule.split('&&')
# Only do 1-level of parsing, don't support nested expression.
if len(union_list) == 2 and len(intersection_list) == 1:
# Union rule
return family, util.GetAdminMessages().GcRule(
union=util.GetAdminMessages().Union(rules=ParseBinaryRule(union_list))
)
elif len(union_list) == 1 and len(intersection_list) == 2:
# Intersection rule
return family, util.GetAdminMessages().GcRule(
intersection=util.GetAdminMessages().Intersection(
rules=ParseBinaryRule(intersection_list)
)
)
elif len(union_list) == 1 and len(intersection_list) == 1:
# Either empty or a simple rule
if gc_rule:
return family, ParseSingleRule(gc_rule)
else:
raise exceptions.BadArgumentException(
'--column-families',
'Input column family ({0}) is mal-formatted.'.format(expr),
)
def UpdateRequestWithInput(original_ref, args, req):
"""Parse argument and construct create table request.
Args:
original_ref: the gcloud resource.
args: input arguments.
req: the real request to be sent to backend service.
Returns:
req: the real request to be sent to backend service.
"""
req.createTableRequest.tableId = args.table
req.parent = original_ref.Parent().RelativeName()
return req
def MakeSplits(split_list):
"""Convert a string list to a Split object.
Args:
split_list: A list that contains strings representing splitting points.
Returns:
A Split object.
"""
results = []
for split in split_list:
results.append(util.GetAdminMessages().Split(key=split.encode('utf-8')))
return results
def ConvertDurationToSeconds(duration, arg_name):
"""Convert a string of duration in any form to seconds.
Args:
duration: A string of any valid form of duration, such as `10d`, `1w`, `36h`
arg_name: The name of the argument that the duration is passed in.
Returns:
A string of duration counted in seconds, such as `1000s`
Raises:
BadArgumentExpection: the input duration is mal-formatted.
"""
try:
return times.FormatDurationForJson(times.ParseDuration(duration))
except times.DurationSyntaxError as duration_error:
raise exceptions.BadArgumentException(
arg_name,
str(duration_error),
)
except times.DurationValueError as duration_error:
raise exceptions.BadArgumentException(
arg_name,
str(duration_error),
)
def ParseColumnFamilies(family_list):
"""Parses column families value object from the string list.
Args:
family_list: A list that contains one or more strings representing family
name and optional GC rules in the format of `family_name[:gc_rule]`, such
as `my_family_1,my_family_2:maxage=10d`.
Returns:
A column families value object.
"""
results = []
for expr in family_list:
family, gc_rule = ParseExpr(expr)
column_family = util.GetAdminMessages().ColumnFamily(gcRule=gc_rule)
results.append(
util.GetAdminMessages().Table.ColumnFamiliesValue.AdditionalProperty(
key=family, value=column_family
)
)
return util.GetAdminMessages().Table.ColumnFamiliesValue(
additionalProperties=results
)
def ParseChangeStreamRetentionPeriod(retention_period):
"""Parses change stream retention period from the string.
Args:
retention_period: Change stream retention period in the format of `3d` for 3
days.
Returns:
A string of duration counted in seconds, such as `259200s`
"""
return ConvertDurationToSeconds(
retention_period, '--change-stream-retention-period'
)
def ParseTieredStorageConfigDuration(duration):
"""Parses tiered storage config duration from the string.
Args:
duration: Tiered storage config duration in the format of a valid gcloud
datetime duration string, such as `10d`, `1w`, `36h`.
Returns:
A string of duration counted in seconds, such as `259200s`
"""
return ConvertDurationToSeconds(
duration, '--tiered-storage-infrequent-access-older-than'
)
def AddFieldToUpdateMask(field, req):
"""Adding a new field to the update mask of the updateTableRequest.
Args:
field: the field to be updated.
req: the original updateTableRequest.
Returns:
req: the updateTableRequest with update mask refreshed.
"""
update_mask = req.updateMask
if update_mask:
if update_mask.count(field) == 0:
req.updateMask = update_mask + ',' + field
else:
req.updateMask = field
return req
def RefreshUpdateMask(unused_ref, args, req):
"""Refresh the update mask of the updateTableRequest according to the input arguments.
Args:
unused_ref: the gcloud resource (unused).
args: the input arguments.
req: the original updateTableRequest.
Returns:
req: the updateTableRequest with update mask refreshed.
"""
if args.clear_change_stream_retention_period:
req = AddFieldToUpdateMask('changeStreamConfig', req)
if args.change_stream_retention_period:
req = AddFieldToUpdateMask('changeStreamConfig.retentionPeriod', req)
if args.enable_automated_backup or args.disable_automated_backup:
req = AddFieldToUpdateMask('automatedBackupPolicy', req)
if args.automated_backup_retention_period:
req = AddFieldToUpdateMask('automatedBackupPolicy.retentionPeriod', req)
# TODO: b/418228423 - Remove this check once the flag is released in GA.
#
# We need this to verify that the flag exists in this release track.
if hasattr(args, 'clear_tiered_storage_config'):
if args.clear_tiered_storage_config:
req = AddFieldToUpdateMask('tieredStorageConfig', req)
if args.tiered_storage_infrequent_access_older_than:
req = AddFieldToUpdateMask(
'tieredStorageConfig.infrequentAccess.includeIfOlderThan', req
)
if args.row_key_schema_definition_file or args.clear_row_key_schema:
req = AddFieldToUpdateMask('rowKeySchema', req)
return req
def AddAdditionalArgs():
"""Adds additional flags."""
return (
AddChangeStreamConfigUpdateTableArgs()
+ AddAutomatedBackupPolicyUpdateTableArgs()
)
def AddAdditionalArgsAlphaBeta():
"""Adds additional flags for alpha and beta."""
return AddAdditionalArgs() + AddTieredStorageConfigUpdateTableArgs()
def AddChangeStreamConfigUpdateTableArgs():
"""Adds the change stream commands to update table CLI.
This can't be defined in the yaml because that automatically generates the
inverse for any boolean args and we don't want the nonsensical
`no-clear-change-stream-retention-period`. We use store_const to only allow
`clear-change-stream-retention-period` or `change-stream-retention-period`
arguments
Returns:
Argument group containing change stream args
"""
argument_group = base.ArgumentGroup(mutex=True)
argument_group.AddArgument(
base.Argument(
'--clear-change-stream-retention-period',
help=(
'This disables the change stream and eventually removes the'
' change stream data.'
),
action='store_const',
const=True,
)
)
argument_group.AddArgument(
base.Argument(
'--change-stream-retention-period',
help=(
'The length of time to retain change stream data for the table, '
'in the range of [1 day, 7 days]. Acceptable units are days (d), '
'hours (h), minutes (m), and seconds (s). If not already '
'specified, enables a change stream for the table. Examples: `5d`'
' or `48h`.'
),
)
)
return [argument_group]
def AddTieredStorageConfigUpdateTableArgs():
"""Adds the tiered storage config commands to update table CLI.
This can't be defined in the yaml because that automatically generates the
inverse for any boolean args and we don't want the nonsensical
`no-clear-tiered-storage-config`. We use store_const to only allow
`clear-tiered-storage-config`.
Returns:
Argument group containing tiered storage config args
"""
argument_group = base.ArgumentGroup(mutex=True)
argument_group.AddArgument(
base.Argument(
'--clear-tiered-storage-config',
help='Disables the tiered storage config.',
action='store_const',
const=True,
)
)
argument_group.AddArgument(
base.Argument(
'--tiered-storage-infrequent-access-older-than',
help=(
'The age at which data should be moved to infrequent access'
' storage.\n\nSee `$ gcloud topic datetimes` for information on'
' absolute duration formats.'
),
)
)
return [argument_group]
def AddAutomatedBackupPolicyCreateTableArgs():
"""Adds automated backup policy commands to create table CLI.
This can't be defined in the yaml because that automatically generates the
inverse for any boolean args and we don't want the nonsensical
`no-enable-automated-backup`. We use store_const to only allow
`enable-automated-backup` argument.
Returns:
Argument group containing automated backup args.
"""
argument_group = base.ArgumentGroup(mutex=True)
argument_group.AddArgument(
base.Argument(
'--enable-automated-backup',
help=(
'Once set, enables the default automated backup policy'
' (retention_period=7d, frequency=1d) for the table.'
),
action='store_const',
const=True,
)
)
argument_group.AddArgument(
base.Argument(
'--automated-backup-retention-period',
help=(
'The retention period of automated backup in the format of `30d`'
' for 30 days. Min retention period is `3d` and max is `90d`.'
' Setting this flag will enable automated backup for the table.'
),
)
)
return [argument_group]
def AddAutomatedBackupPolicyUpdateTableArgs():
"""Adds automated backup policy commands to update table CLI."""
argument_group = base.ArgumentGroup(mutex=True)
argument_group.AddArgument(
base.Argument(
'--enable-automated-backup',
help=(
'Once set, enables the default automated backup policy'
' (retention_period=7d, frequency=1d) for the table. Note: If a'
' table has automated backup enabled, this flag resets it to the'
' default policy.'
),
action='store_const',
const=True,
)
)
argument_group.AddArgument(
base.Argument(
'--disable-automated-backup',
help='Once set, disables automated backup policy for the table.',
action='store_const',
const=True,
)
)
argument_group.AddArgument(
base.Argument(
'--automated-backup-retention-period',
help=(
'The retention period of automated backup in the format of `30d`'
' for 30 days. Min retention period is `3d` and max is `90d`.'
' Setting this flag will enable automated backup for the table.'
),
)
)
return [argument_group]
def HandleChangeStreamArgs(unused_ref, args, req):
if args.change_stream_retention_period:
req.table.changeStreamConfig = CreateChangeStreamConfig(
args.change_stream_retention_period
)
return req
def HandleAutomatedBackupPolicyCreateTableArgs(unused_ref, args, req):
"""Handles automated backup policy args for create table CLI."""
if args.enable_automated_backup:
req.createTableRequest.table.automatedBackupPolicy = (
CreateDefaultAutomatedBackupPolicy()
)
if args.automated_backup_retention_period:
req.createTableRequest.table.automatedBackupPolicy = (
# Keeping the frequency as None to be consistent with the UpdateTable
# command.
CreateAutomatedBackupPolicy(
args.automated_backup_retention_period, None
)
)
return req
def HandleAutomatedBackupPolicyUpdateTableArgs(unused_ref, args, req):
"""Handle automated backup policy args for update table CLI.
If `enable_automated_backup` flag is set, add default policy to table. If
`disable_automated_backup` flag is set, keep table.automatedBackupPolicy as
empty, together with the update_mask, it will clear automated backup policy.
If `automated_backup_retention_period` flag is set, add policy with given
retention period to table.
Args:
unused_ref: the gcloud resource (unused).
args: the input arguments.
req: the original updateTableRequest.
Returns:
req: the updateTableRequest with automated backup policy handled.
"""
if args.enable_automated_backup:
req.table.automatedBackupPolicy = CreateDefaultAutomatedBackupPolicy()
if args.automated_backup_retention_period:
req.table.automatedBackupPolicy = CreateAutomatedBackupPolicy(
args.automated_backup_retention_period, None
)
return req
def HandleTieredStorageArgs(unused_ref, args, req):
"""Handle tiered storage args for update table CLI.
Args:
unused_ref: the gcloud resource (unused).
args: the input arguments.
req: the original updateTableRequest.
Returns:
req: the updateTableRequest with tiered storage config handled.
"""
# TODO: b/418228423 - Remove this check once the flag is released in GA.
#
# We need this to verify that the flag exists in this release track.
if not hasattr(args, 'clear_tiered_storage_config'):
return req
if args.clear_tiered_storage_config:
req.table.tieredStorageConfig = None
if args.tiered_storage_infrequent_access_older_than:
req.table.tieredStorageConfig = util.GetAdminMessages().TieredStorageConfig(
infrequentAccess=util.GetAdminMessages().TieredStorageRule(
includeIfOlderThan=ParseTieredStorageConfigDuration(
args.tiered_storage_infrequent_access_older_than
)
)
)
return req
def CreateChangeStreamConfig(duration):
return util.GetAdminMessages().ChangeStreamConfig(
retentionPeriod=ConvertDurationToSeconds(
duration, '--change-stream-retention-period'
)
)
def CreateAutomatedBackupPolicy(retention_period, frequency):
"""Constructs AutomatedBackupPolicy message with given values.
Args:
retention_period: The retention period of the automated backup policy.
frequency: The frequency of the automated backup policy.
Returns:
AutomatedBackupPolicy with the specified policy config.
"""
policy = util.GetAdminMessages().AutomatedBackupPolicy()
if retention_period:
policy.retentionPeriod = ConvertDurationToSeconds(
retention_period, '--automated-backup-retention-period'
)
if frequency:
policy.frequency = ConvertDurationToSeconds(
frequency, '--automated-backup-frequency'
)
return policy
def CreateDefaultAutomatedBackupPolicy():
"""Constructs AutomatedBackupPolicy message with default values.
The default values are: retention_period=7d, frequency=1d
Returns:
AutomatedBackupPolicy with default policy config.
"""
return CreateAutomatedBackupPolicy('7d', '1d')
def Utf8ToBase64(s):
"""Encode a utf-8 string as a base64 string."""
return six.ensure_text(base64.b64encode(six.ensure_binary(s)))
def HandleRowKeySchemaCreateTableArgs(unused_ref, args, req):
"""Handles row key schema create table args."""
if args.row_key_schema_definition_file:
req.createTableRequest.table.rowKeySchema = (
ParseRowKeySchemaFromDefinitionFile(
args.row_key_schema_definition_file,
args.row_key_schema_pre_encoded_bytes,
)
)
return req
def HandleRowKeySchemaUpdateTableArgs(unused_ref, args, req):
"""Handles row key schema update table args."""
if args.row_key_schema_definition_file:
req.table.rowKeySchema = ParseRowKeySchemaFromDefinitionFile(
args.row_key_schema_definition_file,
args.row_key_schema_pre_encoded_bytes,
)
if args.clear_row_key_schema:
req.ignoreWarnings = True
return req
def Base64EncodeBinaryFieldsInRowKeySchema(row_key_schema):
"""Encodes binary fields in the row key schema in Base64."""
# We don't need to check for missing encoding here, as the admin API will
# return an error if the encoding is missing.
if (
not row_key_schema
or 'encoding' not in row_key_schema
or 'delimitedBytes' not in row_key_schema['encoding']
or 'delimiter' not in row_key_schema['encoding']['delimitedBytes']
or not row_key_schema['encoding']['delimitedBytes']['delimiter']
):
return row_key_schema
row_key_schema['encoding']['delimitedBytes']['delimiter'] = Utf8ToBase64(
row_key_schema['encoding']['delimitedBytes']['delimiter']
)
return row_key_schema
def ParseRowKeySchemaFromDefinitionFile(definition_file, pre_encoded):
"""Parses row key schema from the definition file.
Args:
definition_file: The path to the definition file. File must be in YAML or
JSON format.
pre_encoded: Whether all the binary fields in the row key schema (e.g.
encoding.delimited_bytes.delimiter) are pre-encoded in Base64.
Returns:
A struct type object representing the row key schema.
Raises:
BadArgumentException if the definition file is not found, can't be
read, or is not a valid YAML or JSON file.
ValueError if the YAML/JSON object cannot be parsed as a valid row key
schema.
"""
row_key_schema_msg_type = (
util.GetAdminMessages().GoogleBigtableAdminV2TypeStruct
)
try:
row_key_schema_to_parse = yaml.load_path(definition_file)
if not pre_encoded:
Base64EncodeBinaryFieldsInRowKeySchema(row_key_schema_to_parse)
parsed_row_key_schema = encoding.PyValueToMessage(
row_key_schema_msg_type, row_key_schema_to_parse
)
except (yaml.FileLoadError, yaml.YAMLParseError) as e:
raise exceptions.BadArgumentException('--row-key-schema-definition-file', e)
except AttributeError as e:
raise ValueError(
'File [{0}] cannot be parsed as a valid row key schema. [{1}]'.format(
definition_file, e
)
)
return parsed_row_key_schema

View File

@@ -0,0 +1,215 @@
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that is used to support our commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
def GetAdminClient():
"""Shortcut to get the latest Bigtable Admin client."""
return apis.GetClientInstance('bigtableadmin', 'v2')
def GetAdminMessages():
"""Shortcut to get the latest Bigtable Admin messages."""
return apis.GetMessagesModule('bigtableadmin', 'v2')
def ProjectUrl():
return '/'.join(['projects', properties.VALUES.core.project.Get()])
def LocationUrl(location):
return '/'.join([ProjectUrl(), 'locations', location])
def _Await(result_service, operation_ref, message):
client = GetAdminClient()
poller = waiter.CloudOperationPoller(result_service, client.operations)
return waiter.WaitFor(poller, operation_ref, message)
def AwaitCluster(operation_ref, message):
"""Waits for cluster long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances_clusters, operation_ref, message)
def AwaitInstance(operation_ref, message):
"""Waits for instance long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances, operation_ref, message)
def AwaitAppProfile(operation_ref, message):
"""Waits for app profile long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances_appProfiles, operation_ref, message)
def AwaitTable(operation_ref, message):
"""Waits for table long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances_tables, operation_ref, message)
def AwaitBackup(operation_ref, message):
"""Waits for backup long running operation to complete."""
client = GetAdminClient()
return _Await(
client.projects_instances_clusters_backups, operation_ref, message
)
def AwaitLogicalView(operation_ref, message):
"""Waits for logical view long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances_logicalViews, operation_ref, message)
def AwaitMaterializedView(operation_ref, message):
"""Waits for materialized view long running operation to complete."""
client = GetAdminClient()
return _Await(
client.projects_instances_materializedViews, operation_ref, message
)
def AwaitMemoryLayer(operation_ref, message):
"""Waits for memory layer long running operation to complete."""
client = GetAdminClient()
return _Await(client.projects_instances_clusters, operation_ref, message)
def GetAppProfileRef(instance, app_profile):
"""Get a resource reference to an app profile."""
return resources.REGISTRY.Parse(
app_profile,
params={
'projectsId': properties.VALUES.core.project.GetOrFail,
'instancesId': instance,
},
collection='bigtableadmin.projects.instances.appProfiles',
)
def GetClusterRef(instance, cluster):
"""Get a resource reference to a cluster."""
return resources.REGISTRY.Parse(
cluster,
params={
'projectsId': properties.VALUES.core.project.GetOrFail,
'instancesId': instance,
},
collection='bigtableadmin.projects.instances.clusters',
)
def GetLogicalViewRef(instance, logical_view):
"""Get a resource reference to a logical view."""
return resources.REGISTRY.Parse(
logical_view,
params={
'projectsId': properties.VALUES.core.project.GetOrFail,
'instancesId': instance,
},
collection='bigtableadmin.projects.instances.logicalViews',
)
def GetOperationRef(operation):
"""Get a resource reference to a long running operation."""
return resources.REGISTRY.ParseRelativeName(
operation.name, 'bigtableadmin.operations'
)
def GetInstanceRef(instance):
"""Get a resource reference to an instance."""
return resources.REGISTRY.Parse(
instance,
params={
'projectsId': properties.VALUES.core.project.GetOrFail,
},
collection='bigtableadmin.projects.instances',
)
def GetTableRef(instance, table):
"""Get a resource reference to a table."""
return resources.REGISTRY.Parse(
table,
params={
'projectsId': properties.VALUES.core.project.GetOrFail,
'instancesId': instance,
},
collection='bigtableadmin.projects.instances.tables',
)
WARNING_TYPE_PREFIX = 'CLOUD_BIGTABLE_APP_PROFILE_WARNING'
def FormatErrorMessages(exception):
"""Format app profile error message from API and raise new exception.
The error messages returned from the backend API are not formatted well when
using the default format. This raises a new generic exception with a well
formatted error message built from the original response.
Args:
exception: HttpError raised by API.
Raises:
exceptions.HttpException: Reformatted error raised by API.
"""
response = json.loads(exception.content)
if (
response.get('error') is None
or response.get('error').get('details') is None
):
raise exception
errors = ['Errors:']
warnings = ['Warnings (use --force to ignore):']
for detail in response['error']['details']:
violations = detail.get('violations', [])
for violation in violations:
if violation.get('type').startswith(WARNING_TYPE_PREFIX):
warnings.append(violation.get('description'))
else:
errors.append(violation.get('description'))
error_msg = ''
if len(warnings) > 1:
error_msg += '\n\t'.join(warnings)
if len(errors) > 1:
error_msg += '\n\t'.join(errors)
if not error_msg:
raise exception
raise exceptions.HttpException(
exception, '{}\n{}'.format(response['error']['message'], error_msg)
)