feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for Cloud Run v2 conditions."""
from typing import Sequence
from googlecloudsdk.generated_clients.gapic_clients.run_v2.types import condition as condition_objects
_ready_condition_type = 'Ready'
def IsConditionReady(condition: condition_objects.Condition):
return (
condition.state == condition_objects.Condition.State.CONDITION_SUCCEEDED
)
def IsConditionFailed(condition: condition_objects.Condition):
return condition.state == condition_objects.Condition.State.CONDITION_FAILED
def _GetReadyCondition(conditions: Sequence[condition_objects.Condition]):
for condition in conditions:
if condition.type == _ready_condition_type:
return condition
return None
def GetTerminalCondition(resource):
"""Returns the terminal condition of a resource.
Args:
resource: A Cloud Run v2 resource to get the terminal condition of.
Returns:
A condition object representing the terminal condition of the resource, or
None if the resource does not have a terminal condition.
"""
return (
resource.terminal_condition
if hasattr(resource, 'terminal_condition')
else _GetReadyCondition(resource.conditions)
)

View File

@@ -0,0 +1,85 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around serverless_operations DeleteFoo for surfaces."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.command_lib.run import exceptions as serverless_exceptions
from googlecloudsdk.command_lib.run.v2 import conditions
from googlecloudsdk.core.console import progress_tracker
class DeletionPoller(waiter.OperationPoller):
"""Polls for deletion of a resource."""
def __init__(self, getter):
"""Supply getter as the resource getter."""
self._getter = getter
self._ret = None
def IsDone(self, obj):
if obj is None:
return True
terminal_condition = conditions.GetTerminalCondition(self._ret)
return terminal_condition is None or conditions.IsConditionFailed(
terminal_condition
)
def Poll(self, ref):
try:
self._ret = self._getter(ref)
except api_exceptions.HttpNotFoundError: # Some getters let this through.
self._ret = None
return self._ret
def GetMessage(self):
if not self._ret:
return ''
terminal_condition = conditions.GetTerminalCondition(self._ret)
if terminal_condition and not conditions.IsConditionReady(
terminal_condition
):
return terminal_condition.message or ''
return ''
def GetResult(self, obj):
return obj
def Delete(ref, getter, deleter, async_):
"""Deletes a resource for a surface, including a pretty progress tracker."""
if async_:
deleter(ref)
return
poller = DeletionPoller(getter)
with progress_tracker.ProgressTracker(
message='Deleting [{}]'.format(ref.Name()),
detail_message_callback=poller.GetMessage,
):
deleter(ref)
res = waiter.PollUntilDone(poller, ref)
if res:
if poller.GetMessage():
raise serverless_exceptions.DeletionFailedError(
'Failed to delete [{}]: {}.'.format(ref.Name(), poller.GetMessage())
)
else:
raise serverless_exceptions.DeletionFailedError(
'Failed to delete [{}].'.format(ref.Name())
)

View File

@@ -0,0 +1,668 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsers given command arguments for the Cloud Run V2 command surface into configuration changes."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import exceptions
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run.config_changes import GenerateVolumeName
from googlecloudsdk.command_lib.run.v2 import config_changes
from googlecloudsdk.command_lib.util.args import repeated
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
def SecretsFlags():
"""Creates flags for creating, updating, and deleting secrets."""
return flags.MapFlagsNoFile(
group_help=(
'Specify secrets to provide as environment variables. '
"For example: '--set-secrets=ENV=mysecret:latest,"
"OTHER_ENV=othersecret:1' "
'will create an environment variable named ENV whose value is the '
"latest version of secret 'mysecret' and an environment variable "
"OTHER_ENV whose value is version of 1 of secret 'othersecret'."
),
flag_name='secrets',
)
def AddSecretsFlags(parser):
"""Adds flags for creating, updating, and deleting secrets."""
SecretsFlags().AddToParser(parser)
def AddCloudSQLFlags(parser):
"""Add flags for setting CloudSQL stuff."""
repeated.AddPrimitiveArgs(
parser,
'WorkerPool',
'cloudsql-instances',
'Cloud SQL instances',
auto_group_help=False,
additional_help="""\
These flags modify the Cloud SQL instances this WorkerPool connects to.
You can specify a name of a Cloud SQL instance if it's in the same
project and region as your Cloud Run worker pool; otherwise specify
<project>:<region>:<instance> for the instance.""",
)
def _PrependClientNameAndVersionChange(args, changes):
"""Set client name and version regardless of whether or not it was specified."""
if 'client_name' in args:
is_either_specified = args.IsSpecified('client_name') or args.IsSpecified(
'client_version'
)
changes.insert(
0,
config_changes.SetClientNameAndVersionChange(
client_name=args.client_name if is_either_specified else 'gcloud',
client_version=args.client_version
if is_either_specified
else config.CLOUD_SDK_VERSION,
),
)
def _GetResourceLimitsChanges(args, non_ingress_type=False):
"""Returns the resource limits changes for the given args."""
changes = []
if 'memory' in args and args.memory:
changes.append(
config_changes.ResourceLimitsChange(
memory=args.memory, non_ingress_type=non_ingress_type
)
)
if 'cpu' in args and args.cpu:
changes.append(
config_changes.ResourceLimitsChange(
cpu=args.cpu, non_ingress_type=non_ingress_type
)
)
if 'gpu' in args and args.gpu:
changes.append(
config_changes.ResourceLimitsChange(
gpu=args.gpu, non_ingress_type=non_ingress_type
)
)
if args.gpu == '0':
changes.append(config_changes.GpuTypeChange(gpu_type=''))
return changes
def _GetLabelChanges(args):
"""Returns the label changes for the given args."""
additions = {}
if flags.FlagIsExplicitlySet(args, 'labels'):
additions = args.labels
elif flags.FlagIsExplicitlySet(args, 'update_labels'):
additions = args.update_labels
subtractions = (
args.remove_labels
if flags.FlagIsExplicitlySet(args, 'remove_labels')
else []
)
return config_changes.LabelChange(
additions=additions,
subtractions=subtractions,
clear_labels=args.clear_labels if 'clear_labels' in args else False,
)
def _HasNetworkChanges(args):
"""Returns true iff any network changes are specified."""
network_flags = [
'vpc_egress',
'network',
'subnet',
'network_tags',
'clear_network',
'clear_network_tags',
]
return flags.HasChanges(args, network_flags)
def _GetNetworkChange(args):
return config_changes.VpcAccessChanges(
vpc_egress=args.vpc_egress,
network=args.network,
subnet=args.subnet,
network_tags=args.network_tags
if flags.FlagIsExplicitlySet(args, 'network_tags')
else [],
clear_network=args.clear_network if 'clear_network' in args else False,
clear_network_tags=args.clear_network_tags
if 'clear_network_tags' in args
else False,
)
def _HasCmekKeyChanges(args):
"""Returns true iff any CMEK key changes are specified."""
cmek_key_flags = [
'key',
'post_key_revocation_action_type',
'encryption_key_shutdown_hours',
'clear_key',
'clear_post_key_revocation_action_type',
'clear_encryption_key_shutdown_hours',
]
return flags.HasChanges(args, cmek_key_flags)
def _GetCmekKeyChange(args):
return config_changes.CmekKeyChanges(
key=args.key if flags.FlagIsExplicitlySet(args, 'key') else None,
post_key_revocation_action_type=args.post_key_revocation_action_type
if flags.FlagIsExplicitlySet(args, 'post_key_revocation_action_type')
else None,
encryption_key_shutdown_hours=int(args.encryption_key_shutdown_hours)
if flags.FlagIsExplicitlySet(args, 'encryption_key_shutdown_hours')
else None,
clear_key=flags.FlagIsExplicitlySet(args, 'clear_key'),
clear_post_key_revocation_action_type=flags.FlagIsExplicitlySet(
args, 'clear_post_key_revocation_action_type'
),
clear_encryption_key_shutdown_hours=flags.FlagIsExplicitlySet(
args, 'clear_encryption_key_shutdown_hours'
),
)
def _GetSecretsChanges(args, non_ingress_type=False, container_name=None):
"""Returns the secrets changes for the given args."""
changes = []
updates = flags.StripKeys(
getattr(args, 'update_secrets', None) or args.set_secrets or {}
)
for key in updates:
# Secrets volume mount is not supported for Worker Pools yet.
if key.startswith('/'):
raise exceptions.ConfigurationError(
'Secrets volume mount is not supported for Worker Pools yet.'
)
removes = flags.MapLStrip(getattr(args, 'remove_secrets', None) or [])
for key in removes:
# Secrets volume mount is not supported for Worker Pools yet.
if key.startswith('/'):
raise exceptions.ConfigurationError(
'Secrets volume mount is not supported for Worker Pools yet.'
)
clear_others = bool(args.set_secrets or args.clear_secrets)
if updates or removes or clear_others:
changes.append(
config_changes.SecretsEnvVarChanges(
updates=updates,
removes=removes,
clear_others=clear_others,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
return changes
def _GetCloudSQLChanges(args):
"""Returns the Cloud SQL changes for the given args."""
region = flags.GetRegion(args)
project = getattr(
args, 'project', None
) or properties.VALUES.core.project.Get(required=True)
if flags.EnabledCloudSqlApiRequired(args):
flags.CheckCloudSQLApiEnablement()
# At most one of the cloud sql flags can be set.
change = []
if (
flags.FlagIsExplicitlySet(args, 'add_cloudsql_instances')
and args.add_cloudsql_instances
):
change.append(
config_changes.AddCloudSQLChanges(
project=project,
region=region,
add_cloudsql_instances=args.add_cloudsql_instances,
)
)
elif (
flags.FlagIsExplicitlySet(args, 'remove_cloudsql_instances')
and args.remove_cloudsql_instances
):
change.append(
config_changes.RemoveCloudSQLChanges(
project=project,
region=region,
remove_cloudsql_instances=args.remove_cloudsql_instances,
)
)
elif (
flags.FlagIsExplicitlySet(args, 'clear_cloudsql_instances')
and args.clear_cloudsql_instances
):
change.append(config_changes.ClearCloudSQLChanges())
elif (
flags.FlagIsExplicitlySet(args, 'set_cloudsql_instances')
and args.set_cloudsql_instances
):
change.append(
config_changes.SetCloudSQLChanges(
project=project,
region=region,
set_cloudsql_instances=args.set_cloudsql_instances,
)
)
return change
def _GetContainerConfigurationChanges(
container_args, container_name=None, non_ingress_type=True
):
"""Returns per-container configuration changes."""
changes = []
# FlagIsExplicitlySet can't be used here because args.image is also set from
# code in deploy.py.
if hasattr(container_args, 'image') and container_args.image is not None:
changes.append(
config_changes.ImageChange(
container_args.image,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if flags.HasEnvChanges(container_args):
changes.append(
_GetEnvChanges(
container_args,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if container_args.IsSpecified('cpu'):
changes.append(
config_changes.ResourceLimitsChange(
cpu=container_args.cpu,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if container_args.IsSpecified('memory'):
changes.append(
config_changes.ResourceLimitsChange(
memory=container_args.memory,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if container_args.IsSpecified('command'):
# Allow passing an empty string here to reset the field
changes.append(
config_changes.ContainerCommandChange(
container_args.command,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if container_args.IsSpecified('args'):
# Allow passing an empty string here to reset the field
changes.append(
config_changes.ContainerArgsChange(
container_args.args,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if flags.FlagIsExplicitlySet(
container_args, 'remove_volume_mount'
) or flags.FlagIsExplicitlySet(container_args, 'clear_volume_mounts'):
changes.append(
config_changes.RemoveVolumeMountChange(
removed_mounts=container_args.remove_volume_mount,
clear_mounts=container_args.clear_volume_mounts,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if flags.HasSecretsChanges(container_args):
changes.extend(
_GetSecretsChanges(
container_args,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
if flags.FlagIsExplicitlySet(container_args, 'add_volume_mount'):
changes.append(
config_changes.AddVolumeMountChange(
new_mounts=container_args.add_volume_mount,
container_name=container_name,
non_ingress_type=non_ingress_type,
)
)
return changes
def _GetTemplateConfigurationChanges(
args, release_track, non_ingress_type=False
):
"""Returns a list of changes shared by multiple resources, based on the flags set."""
changes = []
# Revision name suffix
if flags.FlagIsExplicitlySet(args, 'revision_suffix'):
changes.append(config_changes.RevisionNameChange(args.revision_suffix))
if flags.FlagIsExplicitlySet(args, 'mesh'):
changes.append(
config_changes.MeshChange(
project=properties.VALUES.core.project.Get(required=True),
mesh_name=args.mesh,
)
)
if _HasNetworkChanges(args):
changes.append(_GetNetworkChange(args))
if _HasCmekKeyChanges(args):
changes.append(_GetCmekKeyChange(args))
# Service account
if 'service_account' in args and args.service_account:
changes.append(
config_changes.ServiceAccountChange(
service_account=args.service_account
)
)
# FlagIsExplicitlySet can't be used here because args.image is also set from
# code in deploy.py once deploy from source is supported.
if hasattr(args, 'image') and args.image is not None:
changes.append(
config_changes.ImageChange(
args.image, non_ingress_type=non_ingress_type
)
)
if 'command' in args and args.command is not None:
changes.append(
config_changes.ContainerCommandChange(
args.command, non_ingress_type=non_ingress_type
)
)
if 'args' in args and args.args is not None:
changes.append(
config_changes.ContainerArgsChange(
args.args, non_ingress_type=non_ingress_type
)
)
if flags.HasEnvChanges(args):
changes.append(_GetEnvChanges(args, non_ingress_type=non_ingress_type))
# Add cpu, memory and gpu limits changes.
changes.extend(
_GetResourceLimitsChanges(args, non_ingress_type=non_ingress_type)
)
if 'gpu_type' in args and args.gpu_type:
changes.append(config_changes.GpuTypeChange(gpu_type=args.gpu_type))
if flags.FlagIsExplicitlySet(args, 'gpu_zonal_redundancy'):
changes.append(
config_changes.GpuZonalRedundancyChange(
gpu_zonal_redundancy=args.gpu_zonal_redundancy
)
)
# Cloud SQL changes
if flags.HasCloudSQLChanges(args):
changes.extend(_GetCloudSQLChanges(args))
# Volumes / Volume Mounts / Secrets changes
if flags.FlagIsExplicitlySet(
args, 'remove_volume_mount'
) or flags.FlagIsExplicitlySet(args, 'clear_volume_mounts'):
changes.append(
config_changes.RemoveVolumeMountChange(
removed_mounts=args.remove_volume_mount,
clear_mounts=args.clear_volume_mounts,
non_ingress_type=non_ingress_type,
)
)
if (
flags.FlagIsExplicitlySet(args, 'remove_volume') and args.remove_volume
) or (
flags.FlagIsExplicitlySet(args, 'clear_volumes') and args.clear_volumes
):
changes.append(
config_changes.RemoveVolumeChange(
args.remove_volume, args.clear_volumes
)
)
if flags.HasSecretsChanges(args):
changes.extend(_GetSecretsChanges(args, non_ingress_type=non_ingress_type))
if flags.FlagIsExplicitlySet(args, 'add_volume') and args.add_volume:
# Volume names must be generated before calling AddVolumeChange
_ValidateAndMaybeGenerateVolumeNames(args, release_track)
changes.append(
config_changes.AddVolumeChange(args.add_volume, release_track)
)
_MaybeAddVolumeMountChange(args, changes, release_track)
if (
flags.FlagIsExplicitlySet(args, 'add_volume_mount')
and args.add_volume_mount
):
changes.append(
config_changes.AddVolumeMountChange(
new_mounts=args.add_volume_mount,
non_ingress_type=non_ingress_type,
)
)
if flags.FlagIsExplicitlySet(args, 'remove_containers'):
changes.append(
config_changes.RemoveContainersChange(args.remove_containers)
)
# Add an empty ContainerDependenciesChange to update dependencies.
changes.append(config_changes.ContainerDependenciesChange())
# Per container changes
if flags.FlagIsExplicitlySet(args, 'containers'):
for container_name, container_args in args.containers.items():
changes.extend(
_GetContainerConfigurationChanges(
container_args, container_name=container_name
)
)
# Dependencies
if flags.FlagIsExplicitlySet(args, 'containers'):
# TODO: b/393482156 - Add support for per container config changes.
dependency_changes = {
container_name: container_args.depends_on
for container_name, container_args in args.containers.items()
if container_args.IsSpecified('depends_on')
}
if dependency_changes:
changes.append(
config_changes.ContainerDependenciesChange(dependency_changes)
)
return changes
def _GetEnvChanges(args, **kwargs):
"""Returns the env var literal changes for the given args."""
return config_changes.EnvVarLiteralChanges(
updates=flags.StripKeys(
getattr(args, 'update_env_vars', None)
or args.set_env_vars
or args.env_vars_file
or {}
),
removes=flags.MapLStrip(getattr(args, 'remove_env_vars', None) or []),
clear_others=bool(
args.set_env_vars or args.env_vars_file or args.clear_env_vars
),
**kwargs,
)
def _HasWorkerPoolScalingChanges(args):
"""Returns true iff any worker pool scaling changes are specified."""
scaling_flags = [
'min',
'max',
'scaling',
]
return flags.HasChanges(args, scaling_flags)
def _GetWorkerPoolScalingChange(args, release_track):
"""Return the changes for engine-level scaling for Worker Pools for the given args."""
# Catch the case where user sets scaling mode to auto for BETA.
if release_track == base.ReleaseTrack.BETA:
if args.scaling and args.scaling.auto_scaling:
raise exceptions.ConfigurationError(
'Automatic scaling is not supported in BETA.'
)
return config_changes.WorkerPoolScalingChange(
min_instance_count=args.min
if 'min' in args and args.min is not None
else None,
max_instance_count=args.max
if 'max' in args and args.max is not None
else None,
scaling=args.scaling
if 'scaling' in args and args.scaling is not None
else None,
)
def _HasBinaryAuthorizationChanges(args):
"""Returns true iff any binary authorization changes are specified."""
bin_auth_flags = [
'binary_authorization',
'clear_binary_authorization',
'breakglass',
]
return flags.HasChanges(args, bin_auth_flags)
def _GetBinaryAuthorizationChanges(args):
"""Returns the binary authorization changes for the given args."""
changes = []
if flags.FlagIsExplicitlySet(args, 'binary_authorization'):
changes.append(
config_changes.BinaryAuthorizationChange(
policy=args.binary_authorization
)
)
if flags.FlagIsExplicitlySet(args, 'clear_binary_authorization'):
changes.append(
config_changes.BinaryAuthorizationChange(
clear_binary_authorization=True
)
)
if flags.FlagIsExplicitlySet(args, 'breakglass'):
changes.append(
config_changes.BinaryAuthorizationChange(
breakglass_justification=args.breakglass
)
)
return changes
def _GetInstanceSplitChanges(args):
"""Returns the instance split changes for the given args."""
if args.to_latest:
# Mutually exclusive flag with to-revisions
return config_changes.InstanceSplitChange(to_latest=True)
elif args.to_revisions:
return config_changes.InstanceSplitChange(to_revisions=args.to_revisions)
def _ValidateAndMaybeGenerateVolumeNames(args, release_track):
"""Validates used of the volumes shortcut and generates volume names when needed.
Specifically, it checks that the 'mount-path' parameter is not being used
with the --containers flag and that the volume type is an allowed type. If
validation succeeds and the volume also needs a name, one is generated.
Args:
args: The argparse namespace containing the parsed command line arguments.
release_track: The current release track (e.g., base.ReleaseTrack.ALPHA).
"""
uses_containers_flag = flags.FlagIsExplicitlySet(args, 'containers')
if release_track != base.ReleaseTrack.GA:
for volume in args.add_volume:
# If mount-path is specified, the user is attempting to use the volumes
# shortcut.
if 'mount-path' in volume:
# The volumes shortcut is not compatible with the --containers flag.
if uses_containers_flag:
raise exceptions.ConfigurationError(
'When using the --containers flag, "mount-path" cannot be'
' specified under the --add-volume flag. Instead, specify'
' "mount-path" using the --add-volume-mount flag after the'
' --container flag of the container the volume should be'
' mounted to.'
)
# Generate a name if the user has not specified one.
if 'name' not in volume:
volume['name'] = GenerateVolumeName(volume['type'])
def _MaybeAddVolumeMountChange(args, changes, release_track):
"""Adds a VolumeMountChange to the list of changes if applicable.
This function checks if new volume mounts should be added based on the
`--add-volume` flag in ALPHA release track. If a volume in `args.add_volume`
has a 'mount-path', a corresponding AddVolumeMountChange
is appended to the `changes` list.
Args:
args: The argparse namespace containing the parsed command line arguments.
changes: A list of configuration changes to append to.
release_track: The current release track (e.g., base.ReleaseTrack.ALPHA).
"""
if release_track != base.ReleaseTrack.GA:
new_volume_mounts = []
for volume in args.add_volume:
if 'mount-path' in volume and 'name' in volume:
volume_mount_args = {
'volume': volume['name'],
'mount-path': volume['mount-path'],
}
new_volume_mounts.append(volume_mount_args)
if new_volume_mounts:
changes.append(
config_changes.AddVolumeMountChange(
new_mounts=new_volume_mounts,
)
)
def GetWorkerPoolConfigurationChanges(args, release_track):
"""Returns a list of changes to the worker pool config, based on the flags set."""
changes = []
# Description
if flags.FlagIsExplicitlySet(args, 'description'):
changes.append(config_changes.DescriptionChange(args.description))
# Labels
if flags.HasLabelChanges(args):
changes.append(_GetLabelChanges(args))
# Binary authorization
if _HasBinaryAuthorizationChanges(args):
changes.extend(_GetBinaryAuthorizationChanges(args))
# Template changes
changes.extend(
_GetTemplateConfigurationChanges(
args, release_track, non_ingress_type=True
)
)
# Worker pool scaling
if _HasWorkerPoolScalingChanges(args):
changes.append(_GetWorkerPoolScalingChange(args, release_track))
if flags.HasInstanceSplitChanges(args):
changes.append(_GetInstanceSplitChanges(args))
if 'no_promote' in args and args.no_promote:
changes.append(config_changes.NoPromoteChange())
_PrependClientNameAndVersionChange(args, changes)
return changes

View File

@@ -0,0 +1,483 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations on WorkerPool V2 API instance splits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from typing import Dict, List, Union
from googlecloudsdk.command_lib.run import resource_name_conversion
from googlecloudsdk.core import exceptions
from googlecloudsdk.generated_clients.gapic_clients.run_v2.types import instance_split
from googlecloudsdk.generated_clients.gapic_clients.run_v2.types import worker_pool as worker_pool_objects
import six
# Human readable indicator for a missing split percentage.
_MISSING_PERCENT = '-'
# Designated key value for latest.
# Revisions' names may not be uppercase, so this is distinct.
LATEST_REVISION_KEY = 'LATEST'
class InvalidInstanceSplitSpecificationError(exceptions.Error):
"""Error to indicate an invalid instance split specification."""
pass
def _GetCurrentSplitsMap(
splits: List[instance_split.InstanceSplit],
) -> Dict[str, int]:
"""Returns the current instance split percentages into a map."""
current_splits = {}
for split in splits:
if (
split.type_
== instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST
):
current_splits[LATEST_REVISION_KEY] = split.percent
elif (
split.type_
== instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_REVISION
):
current_splits[split.revision] = split.percent
return current_splits
def _GetUnspecifiedSplits(
new_percentages: Dict[str, int],
current_splits: Dict[str, int],
) -> Dict[str, int]:
"""Returns the instance splits that are in the current splits but not specified in new_percentages."""
result = {}
for target, percent in current_splits.items():
if target not in new_percentages:
result[target] = percent
return result
def _ValidateNewSplits(
new_splits: Dict[str, int], unspecified_targets: Dict[str, int]
):
"""Validates the new instance split percentages."""
if sum(new_splits.values()) > 100:
raise InvalidInstanceSplitSpecificationError(
'The sum of instance split specifications exceeds 100.'
)
for target, percent in new_splits.items():
if percent < 0 or percent > 100:
raise InvalidInstanceSplitSpecificationError(
'Instance split specification for {} is {}%, not between 0 and 100'
.format(target, percent)
)
if not unspecified_targets and sum(new_splits.values()) < 100:
raise InvalidInstanceSplitSpecificationError(
'Every target with instance split is updated but 100% of total split'
' has not been specified.'
)
def _ValidateCurrentSplits(current_splits: Dict[str, int]):
"""Validates the current instance split percentages."""
total_percent = 0
for target, percent in current_splits.items():
if percent < 0 or percent > 100:
raise ValueError(
'Current instance split allocation for {} is {}%, not between 0 and'
' 100'.format(target, percent)
)
total_percent += percent
if total_percent != 100:
raise ValueError(
'Current instance split allocation of {} is not 100 percent'.format(
total_percent
)
)
def _ModifyUnspecifiedSplits(
new_splits: Dict[str, int], unspecified_splits: Dict[str, int]
):
"""Modifies the unspecified splits by assigning the remaining split percent proportionally to the original splits."""
percent_to_assign = 100 - sum(new_splits.values())
if percent_to_assign == 0:
return {}
original_splits_percent = sum(unspecified_splits.values())
reduction_ratio = float(percent_to_assign) / original_splits_percent
#
# We assign instance split to unassigned targets (were seving and
# have not explicit new percentage assignment). The assignment
# is proportional to the original split for the each target.
#
# reduction_ratio = percent_to_assign / original_splits_percent
#
# percent_to_assign
# == percent_to_assign_from * reduction_ratio
# == sum(unspecified_splits[k] * reduction_ratio)
# == sum(unspecified_splits[k] * reduction_ratio)
modified_splits = {}
for target, percent in unspecified_splits.items():
modified_splits[target] = percent * reduction_ratio
return modified_splits
def _SortKeyFromInstanceSplit(split: instance_split.InstanceSplit):
"""Sorted key function to order InstanceSplit objects by key.
Args:
split: A InstanceSplit.
Returns:
A value that sorts by revisionName with LATEST_REVISION_KEY
last.
"""
if (
split.type
== instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST
):
key = LATEST_REVISION_KEY
else:
key = split.revision
return _SortKeyFromKey(key)
def _SortKeyFromKey(key):
"""Sorted key function to order InstanceSplit keys.
InstanceSplits keys are one of:
o revisionName
o LATEST_REVISION_KEY
Note LATEST_REVISION_KEY is not a str so its ordering with respect
to revisionName keys is hard to predict.
Args:
key: Key for a InstanceSplits dictionary.
Returns:
A value that sorts by revisionName with LATEST_REVISION_KEY
last.
"""
if key == LATEST_REVISION_KEY:
result = (2, key)
else:
result = (1, key)
return result
def _NewRoundingCorrectionPrecedence(key_and_percent):
"""Returns object that sorts in the order we correct split rounding errors.
The caller specifies explicit split percentages for some revisions and
this module scales instance split for remaining revisions that are already
serving instance split up or down to assure that 100% of instance split is
assigned.
This scaling can result in non integer percentages that Cloud Run
does not supprt. We correct by:
- Trimming the decimal part of float_percent, int(float_percent)
- Adding an extra 1 percent instance split to enough revisions that have
had their instance split reduced to get us to 100%
The returned value sorts in the order we correct revisions:
1) Revisions with a bigger loss due are corrected before revisions with
a smaller loss. Since 0 <= loss < 1 we sort by the value: 1 - loss.
2) In the case of ties revisions with less instance split are corrected
before
revisions with more instance split.
3) In case of a tie revisions with a smaller key are corrected before
revisions with a larger key.
Args:
key_and_percent: tuple with (key, float_percent)
Returns:
A value that sorts with respect to values returned for
other revisions in the order we correct for rounding
errors.
"""
key, float_percent = key_and_percent
return [
1 - (float_percent - int(float_percent)),
float_percent,
_SortKeyFromKey(key),
]
def _IntPercentages(float_percentages: Dict[str, int]):
"""Returns rounded integer percentages."""
rounded_percentages = {
k: int(float_percentages[k]) for k in float_percentages
}
loss = int(round(sum(float_percentages.values()))) - sum(
rounded_percentages.values()
)
correction_precedence = sorted(
float_percentages.items(), key=_NewRoundingCorrectionPrecedence
)
for key, _ in correction_precedence[:loss]:
rounded_percentages[key] += 1
return rounded_percentages
def GetUpdatedSplits(
current_splits: List[instance_split.InstanceSplit],
new_splits: Dict[str, Union[int, float]],
) -> List[instance_split.InstanceSplit]:
"""Returns the updated instance splits."""
# Current split status.
current_splits_map = _GetCurrentSplitsMap(current_splits)
_ValidateCurrentSplits(current_splits_map)
# Current split that is not specified in new splits.
unspecified_splits = _GetUnspecifiedSplits(new_splits, current_splits_map)
_ValidateNewSplits(new_splits, unspecified_splits)
# Modify the unspecified splits by proprotinally assigning the remaining
# split percent to the original splits.
unspecified_splits_modified = _ModifyUnspecifiedSplits(
new_splits, unspecified_splits
)
new_splits.update(unspecified_splits_modified)
# Do the detailed correction of rounding up/down the float percentages.
int_percent_splits = _IntPercentages(new_splits)
return sorted(
[
instance_split.InstanceSplit(
type_=instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST
if key == LATEST_REVISION_KEY
else instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_REVISION,
revision=key if key != LATEST_REVISION_KEY else None,
percent=percent,
)
for key, percent in int_percent_splits.items()
if percent > 0
],
key=_SortKeyFromInstanceSplit,
)
def ZeroLatestAssignment(
current_splits: List[instance_split.InstanceSplit],
latest_ready_revision_name: str,
) -> List[instance_split.InstanceSplit]:
"""Returns the instance splits with LATEST assignment moved to the latest ready revision."""
current_splits_map = _GetCurrentSplitsMap(current_splits)
if LATEST_REVISION_KEY in current_splits_map:
latest = current_splits_map.pop(LATEST_REVISION_KEY)
current_splits_map[latest_ready_revision_name] = (
current_splits_map.get(latest_ready_revision_name, 0) + latest
)
return sorted(
[
instance_split.InstanceSplit(
type_=instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST
if key == LATEST_REVISION_KEY
else instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_REVISION,
revision=key if key != LATEST_REVISION_KEY else None,
percent=percent,
)
for key, percent in current_splits_map.items()
if percent > 0
],
key=_SortKeyFromInstanceSplit,
)
def _FormatPercentage(percent):
if percent == _MISSING_PERCENT:
return _MISSING_PERCENT
else:
return f'{percent}%'
def _SumPercent(splits: List[instance_split.InstanceSplit]) -> int:
"""Returns the sum of the instance split percentages."""
return sum([split.percent for split in splits])
class InstanceSplitPair(object):
"""Holder for InstanceSplit status information.
The representation of the status of instance split for a worker pool
includes:
o User requested assignments (instance_splits)
o Actual assignments (instance_split_statuses)
"""
def __init__(
self,
target_splits: List[instance_split.InstanceSplit],
current_splits: List[instance_split.InstanceSplitStatus],
revision_name: str,
latest: bool,
):
"""Creates a new InstanceSplitPair.
Args:
target_splits: A list of target instance splits that all reference the
same revision, either by name or the latest ready.
current_splits: A list of current instance splits that all reference the
same revision, either by name or the latest ready.
revision_name: The name of the revision referenced by the instance splits.
latest: A boolean indicating if these instance splits reference the latest
ready revision.
Returns:
A new InstanceSplitPair instance.
"""
self._target_splits = target_splits
self._current_splits = current_splits
self._revision_name = revision_name
self._latest = latest
@property
def key(self):
"""The key for the instance split."""
return LATEST_REVISION_KEY if self.latest_revision else self.revision_name
@property
def latest_revision(self):
"""True if the instance split reference the latest revision."""
return self._latest
@property
def revision_name(self):
"""Name of the revision referenced by the instance split."""
return self._revision_name
@property
def target_percent(self):
"""Target percent of instance split allocated to the revision."""
if self._target_splits:
return six.text_type(_SumPercent(self._target_splits))
else:
return _MISSING_PERCENT
@property
def status_percent(self):
"""Current percent of instance split allocated to the revision."""
if self._current_splits:
return six.text_type(_SumPercent(self._current_splits))
else:
return _MISSING_PERCENT
@property
def display_percent(self):
"""Human readable revision percent."""
if self.status_percent == self.target_percent:
return _FormatPercentage(self.status_percent)
else:
return (
f'{_FormatPercentage(self.target_percent):4} (currently'
f' {_FormatPercentage(self.status_percent)})'
)
@property
def display_revision_id(self):
"""Human readable revision identifier."""
if self.latest_revision:
return f'{LATEST_REVISION_KEY} (currently {self.revision_name})'
else:
return self.revision_name
def _SortKeyFromInstanceSplitPair(pair: InstanceSplitPair):
"""Sorted key function to order InstanceSplitPair objects by key.
Args:
pair: A InstanceSplitPair.
Returns:
A value that sorts by revisionName with LATEST_REVISION_KEY last.
"""
if pair.latest_revision:
key = LATEST_REVISION_KEY
else:
key = pair.revision_name
return _SortKeyFromKey(key)
def _GetSplitsMap(
splits: List[
Union[instance_split.InstanceSplit, instance_split.InstanceSplitStatus]
],
latest_ready_revision_name: str,
) -> Dict[
str, Union[instance_split.InstanceSplit, instance_split.InstanceSplitStatus]
]:
"""Returns the instance split list into a map.
The map uses LATEST_REVISION_KEY as the key for the latest ready revision.
Args:
splits: A list of InstanceSplit or InstanceSplitStatus objects.
latest_ready_revision_name: The name of the latest ready revision.
Returns:
A map of revision names to InstanceSplit or InstanceSplitStatus objects.
"""
splits_map = collections.defaultdict(list)
for split in splits:
if (
split.type_
== instance_split.InstanceSplitAllocationType.INSTANCE_SPLIT_ALLOCATION_TYPE_LATEST
or split.revision == latest_ready_revision_name
):
splits_map[LATEST_REVISION_KEY].append(split)
else:
splits_map[split.revision].append(split)
return splits_map
def GetInstanceSplitPairs(
worker_pool: worker_pool_objects.WorkerPool,
) -> List[InstanceSplitPair]:
"""Returns the instance split pairs for the worker pool."""
instance_split_pairs = []
try:
latest_ready_revision_name = (
resource_name_conversion.GetNameFromFullChildName(
worker_pool.latest_ready_revision
)
)
except AttributeError:
latest_ready_revision_name = ''
target_splits = _GetSplitsMap(
worker_pool.instance_splits, latest_ready_revision_name
)
current_splits = _GetSplitsMap(
worker_pool.instance_split_statuses, latest_ready_revision_name
)
for key in set(target_splits).union(current_splits):
revision_name = (
latest_ready_revision_name if key == LATEST_REVISION_KEY else key
)
instance_split_pairs.append(
InstanceSplitPair(
target_splits.get(key),
current_splits.get(key),
revision_name,
key == LATEST_REVISION_KEY,
)
)
return sorted(instance_split_pairs, key=_SortKeyFromInstanceSplitPair)

View File

@@ -0,0 +1,278 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows you to write surfaces in terms of logical Cloud Run V2 WorkerPools API operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from google.api_core import exceptions
from googlecloudsdk.api_lib.run import metric_names
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import stages
from googlecloudsdk.command_lib.run.sourcedeploys import deployer
from googlecloudsdk.command_lib.run.v2 import config_changes as config_changes_mod
from googlecloudsdk.core import metrics
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.generated_clients.gapic_clients.run_v2.types import worker_pool as worker_pool_objects
class WorkerPoolsOperations(object):
"""Client used to communicate with the actual Cloud Run V2 WorkerPools API."""
def __init__(self, client):
self._client = client
def GetWorkerPool(self, worker_pool_ref):
"""Get the WorkerPool.
Args:
worker_pool_ref: Resource, WorkerPool to get.
Returns:
A WorkerPool object.
"""
worker_pools = self._client.worker
get_request = self._client.types.GetWorkerPoolRequest(
name=worker_pool_ref.RelativeName()
)
try:
with metrics.RecordDuration(metric_names.GET_WORKER_POOL):
return worker_pools.get_worker_pool(get_request)
except exceptions.NotFound:
return None
def DeleteWorkerPool(self, worker_pool_ref):
"""Delete the WorkerPool.
Args:
worker_pool_ref: Resource, WorkerPool to delete.
Returns:
A LRO for delete operation.
"""
worker_pools = self._client.worker
delete_request = self._client.types.DeleteWorkerPoolRequest(
name=worker_pool_ref.RelativeName()
)
try:
with metrics.RecordDuration(metric_names.DELETE_WORKER_POOL):
return worker_pools.delete_worker_pool(delete_request)
except exceptions.NotFound:
return None
def ListWorkerPools(self, region_ref):
"""List the WorkerPools in a region.
Args:
region_ref: Resource, Region to get the list of WorkerPools from.
Returns:
A list of WorkerPool objects.
"""
worker_pools = self._client.worker
list_request = self._client.types.ListWorkerPoolsRequest(
parent=region_ref.RelativeName()
)
# TODO(b/366501494): Support `next_page_token`
with metrics.RecordDuration(metric_names.LIST_WORKER_POOLS):
return worker_pools.list_worker_pools(list_request)
def ReleaseWorkerPool(
self,
worker_pool_ref,
config_changes,
release_track=base.ReleaseTrack.ALPHA,
tracker=None,
prefetch=False,
build_image=None,
build_pack=None,
build_source=None,
build_from_source_container_name=None,
repo_to_create=None,
already_activated_services=False,
force_new_revision=False,
):
"""Stubbed method for worker pool deploy surface.
Update the WorkerPool if it exists, otherwise create it (Upsert).
Args:
worker_pool_ref: WorkerPool reference containing project, location,
workerpool IDs.
config_changes: list, objects that implement Adjust().
release_track: ReleaseTrack, the release track of a command calling this.
tracker: StagedProgressTracker, used to track progress.
prefetch: the worker pool, pre-fetched for ReleaseWorkerPool. `False`
indicates the caller did not perform a prefetch; `None` indicates a
nonexistent worker pool.
build_image: The build image reference to the build.
build_pack: The build pack reference to the build.
build_source: The build source reference to the build.
build_from_source_container_name: The name of the container to be deployed
from source.
repo_to_create: Optional
googlecloudsdk.command_lib.artifacts.docker_util.DockerRepo defining a
repository to be created.
already_activated_services: bool. If true, skip activation prompts for
services
force_new_revision: bool to force a new revision to be created.
Returns:
A WorkerPool object.
"""
if tracker is None:
tracker = progress_tracker.NoOpStagedProgressTracker(
stages.WorkerPoolStages(
include_build=build_source is not None,
include_create_repo=repo_to_create is not None,
),
interruptable=True,
aborted_message='aborted',
)
# Deploying from a source.
if build_source is not None:
(
image_digest,
_, # build_base_image
_, # build_id
_, # uploaded_source
_, # build_name
) = deployer.CreateImage(
tracker,
build_image,
build_source,
build_pack,
repo_to_create,
release_track,
already_activated_services,
worker_pool_ref.locationsId, # region
worker_pool_ref,
)
if image_digest is None:
return
config_changes.append(
config_changes_mod.AddDigestToImageChange(
container_name=build_from_source_container_name,
non_ingress_type=True,
image_digest=image_digest,
)
)
if prefetch is None:
worker_pool = None
elif build_source:
# if we're building from source, we want to force a new fetch
# because building takes a while which leaves a long time for
# potential write conflicts.
worker_pool = self.GetWorkerPool(worker_pool_ref)
else:
worker_pool = prefetch or self.GetWorkerPool(worker_pool_ref)
metric_name = metric_names.UPDATE_WORKER_POOL
if worker_pool is None:
# WorkerPool does not exist, create it.
worker_pool = worker_pool_objects.WorkerPool(
name=worker_pool_ref.RelativeName(),
)
metric_name = metric_names.CREATE_WORKER_POOL
# Apply config changes to the WorkerPool.
worker_pool = config_changes_mod.WithChanges(worker_pool, config_changes)
worker_pools = self._client.worker
upsert_request = self._client.types.UpdateWorkerPoolRequest(
worker_pool=worker_pool,
allow_missing=True,
force_new_revision=force_new_revision,
)
with metrics.RecordDuration(metric_name):
return worker_pools.update_worker_pool(upsert_request)
def UpdateInstanceSplit(
self,
worker_pool_ref,
config_changes,
):
"""Update the instance split of a WorkerPool."""
worker_pool = self.GetWorkerPool(worker_pool_ref)
if worker_pool is None:
raise exceptions.NotFound(
'WorkerPool [{}] could not be found.'.format(
worker_pool_ref.workerPoolsId
)
)
worker_pool = config_changes_mod.WithChanges(worker_pool, config_changes)
worker_pools = self._client.worker
update_request = self._client.types.UpdateWorkerPoolRequest(
worker_pool=worker_pool,
)
with metrics.RecordDuration(metric_names.UPDATE_WORKER_POOL):
return worker_pools.update_worker_pool(update_request)
def GetRevision(self, worker_pool_revision_ref):
"""Get the Revision.
Args:
worker_pool_revision_ref: Resource, Revision to get.
Returns:
A Revision object.
"""
worker_pool_revisions = self._client.revisions
get_request = self._client.types.GetRevisionRequest(
name=worker_pool_revision_ref.RelativeName()
)
try:
with metrics.RecordDuration(metric_names.GET_WORKER_POOL_REVISION):
return worker_pool_revisions.get_revision(get_request)
except exceptions.NotFound:
return None
def DeleteRevision(self, worker_pool_revision_ref):
"""Delete the Revision.
Args:
worker_pool_revision_ref: Resource, Revision to delete.
Returns:
A LRO for delete operation.
"""
worker_pool_revisions = self._client.revisions
delete_request = self._client.types.DeleteRevisionRequest(
name=worker_pool_revision_ref.RelativeName()
)
try:
with metrics.RecordDuration(metric_names.DELETE_WORKER_POOL_REVISION):
return worker_pool_revisions.delete_revision(delete_request)
except exceptions.NotFound:
return None
def ListRevisions(self, worker_pool_ref):
"""List the Revisions in a region under the given WorkerPool.
Args:
worker_pool_ref: Resource, WorkerPool to get the list of Revisions from.
Returns:
A list of Revision objects.
"""
worker_pool_revisions = self._client.revisions
list_request = self._client.types.ListRevisionsRequest(
parent=worker_pool_ref.RelativeName()
)
# TODO(b/366501494): Support `next_page_token`
with metrics.RecordDuration(metric_names.LIST_WORKER_POOL_REVISIONS):
return worker_pool_revisions.list_revisions(list_request)