feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local gcloud cache for Cloud Workflows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
from googlecloudsdk.core.util import files
EXECUTION_COLLECTION = (
'workflowexecutions.projects.locations.workflows.executions')
WORKFLOW_CACHE_FILE = '.workflows-cached-execution-id.json'
def get_cached_execution_id():
"""Gets the cached execution object.
Returns:
execution: the execution resource name
"""
cache_path = _get_cache_path()
if not os.path.isfile(cache_path):
raise exceptions.Error(_NO_CACHE_MESSAGE)
try:
cached_execution = files.ReadFileContents(cache_path)
execution_ref = resources.REGISTRY.Parse(
cached_execution, collection=EXECUTION_COLLECTION)
log.status.Print('Using cached execution name: {}'.format(
execution_ref.RelativeName()))
return execution_ref
except Exception:
raise exceptions.Error(_NO_CACHE_MESSAGE)
def cache_execution_id(execution_name):
"""Saves the execution resource to a named cache file.
Args:
execution_name: the execution resource name
"""
try:
files.WriteFileContents(_get_cache_path(), execution_name)
except files.Error:
# Not outputting any error messages to the user as it might be
# unclear what it means to not be able to cache the execution
# and what the cache is used for when executing their workflow.
pass
def delete_execution_cache():
"""Clears the execution cache.
Returns:
bool: True if the file was found and deleted, false otherwise.
"""
try:
os.remove(_get_cache_path())
except OSError:
return False
return True
def _get_cache_path():
config_dir = config.Paths().global_config_dir
return os.path.join(config_dir, WORKFLOW_CACHE_FILE)
_NO_CACHE_MESSAGE = (
'[NOT FOUND] There are no cached executions available. '
'Use gcloud list and describe commands or '
'https://console.developers.google.com/ to check resource state.')

View File

@@ -0,0 +1,145 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The canonical error codes for Google APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
class Code(enum.Enum):
"""Defines the canonical error codes for API responses."""
# Not an error; returned on success
OK = 0
# The operation was cancelled, typically by the caller.
CANCELLED = 1
# Unknown error. For example, this error may be returned when
# a Status value received from another address space belongs to
# an error-space that is not known in this address space. Also
# errors raised by APIs that do not return enough error information
# may be converted to this error.
UNKNOWN = 2
# The client specified an invalid argument. Note that this differs
# from FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments
# that are problematic regardless of the state of the system
# (e.g., a malformed file name).
INVALID_ARGUMENT = 3
# The deadline expired before the operation could complete. For operations
# that change the state of the system, this error may be returned
# even if the operation has completed successfully. For example, a
# successful response from a server could have been delayed long
# enough for the deadline to expire.
DEADLINE_EXCEEDED = 4
# Some requested entity (e.g., file or directory) was not found.
#
# Note to server developers: if a request is denied for an entire class
# of users, such as gradual feature rollout or undocumented allowlist,
# `NOT_FOUND` may be used. If a request is denied for some users within
# a class of users, such as user-based access control, `PERMISSION_DENIED`
# must be used.
NOT_FOUND = 5
# The entity that a client attempted to create (e.g., file or directory)
# already exists.
ALREADY_EXISTS = 6
# The caller does not have permission to execute the specified
# operation. `PERMISSION_DENIED` must not be used for rejections
# caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
# instead for those errors). `PERMISSION_DENIED` must not be
# used if the caller can not be identified (use `UNAUTHENTICATED`
# instead for those errors). This error code does not imply the
# request is valid or the requested entity exists or satisfies
# other pre-conditions.
PERMISSION_DENIED = 7
# The request does not have valid authentication credentials for the
# operation.
UNAUTHENTICATED = 16
# Some resource has been exhausted, perhaps a per-user quota, or
# perhaps the entire file system is out of space.
RESOURCE_EXHAUSTED = 8
# The operation was rejected because the system is not in a state
# required for the operation's execution. For example, the directory
# to be deleted is non-empty, an rmdir operation is applied to
# a non-directory, etc.
#
# A litmus test that may help a service implementer in deciding
# between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
# (a) Use UNAVAILABLE if the client can retry just the failing call.
# (b) Use ABORTED if the client should retry at a higher-level
# (e.g., when a client-specified test-and-set fails, indicating the
# client should restart a read-modify-write sequence).
# (c) Use FAILED_PRECONDITION if the client should not retry until
# the system state has been explicitly fixed. E.g., if an "rmdir"
# fails because the directory is non-empty, FAILED_PRECONDITION
# should be returned since the client should not retry unless
# the files are deleted from the directory.
FAILED_PRECONDITION = 9
# The operation was aborted, typically due to a concurrency issue such as
# a sequencer check failure or transaction abort.
#
# See litmus test above for deciding between FAILED_PRECONDITION,
# ABORTED, and UNAVAILABLE.
ABORTED = 10
# The operation was attempted past the valid range. E.g., seeking or
# reading past end-of-file.
#
# Unlike INVALID_ARGUMENT, this error indicates a problem that may
# be fixed if the system state changes. For example, a 32-bit file
# system will generate INVALID_ARGUMENT if asked to read at an
# offset that is not in the range [0,2^32-1], but it will generate
# OUT_OF_RANGE if asked to read from an offset past the current
# file size.
#
# There is a fair bit of overlap between FAILED_PRECONDITION and
# OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific
# error) when it applies so that callers who are iterating through
# a space can easily look for an OUT_OF_RANGE error to detect when
# they are done.
OUT_OF_RANGE = 11
# The operation is not implemented or is not supported/enabled in this
# service.
UNIMPLEMENTED = 12
# Internal errors. This means that some invariants expected by the
# underlying system have been broken. This error code is reserved
# for serious errors.
INTERNAL = 13
# The service is currently unavailable. This is most likely a
# transient condition, which can be corrected by retrying with
# a backoff. Note that it is not always safe to retry
# non-idempotent operations.
#
# See litmus test above for deciding between FAILED_PRECONDITION,
# ABORTED, and UNAVAILABLE.
UNAVAILABLE = 14
# Unrecoverable data loss or corruption.
DATA_LOSS = 15

View File

@@ -0,0 +1,124 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Cloud Workflows poller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.api_lib.workflows import codes
class OperationsClient(object):
"""Client for Operations service in the Cloud Workflows API."""
def __init__(self, client, messages):
self.client = client
self.messages = messages
self._service = self.client.projects_locations_operations
def Get(self, operation_ref):
"""Gets an Operation.
Args:
operation_ref: Resource reference to the Operation to get.
Returns:
Operation: The operation if it exists, None otherwise.
"""
get_req = self.messages.WorkflowsProjectsLocationsOperationsGetRequest(
name=operation_ref.RelativeName())
try:
return self._service.Get(get_req)
except exceptions.HttpNotFoundError:
return None
class WorkflowsOperationPoller(waiter.OperationPoller):
"""Implementation of OperationPoller for Workflows Operations."""
def __init__(self, workflows, operations, workflow_ref):
"""Creates the poller.
Args:
workflows: the Workflows API client used to get the resource after
operation is complete.
operations: the Operations API client used to poll for the operation.
workflow_ref: a reference to a workflow that is the subject of this
operation.
"""
self.workflows = workflows
self.operations = operations
self.workflow_ref = workflow_ref
def IsDone(self, operation):
"""Overrides."""
if operation.done:
if operation.error:
raise waiter.OperationError(_ExtractErrorMessage(operation.error))
return True
return False
def Poll(self, operation_ref):
"""Overrides."""
return self.operations.Get(operation_ref)
def GetResult(self, operation):
"""Overrides."""
return self.workflows.Get(self.workflow_ref)
class ExecutionsPoller(waiter.OperationPoller):
"""Implementation of OperationPoller for Workflows Executions."""
def __init__(self, workflow_execution):
"""Creates the execution poller.
Args:
workflow_execution: the Workflows Executions API client used to get the
execution resource.
"""
self.workflow_execution = workflow_execution
def IsDone(self, execution):
"""Overrides."""
return execution.state.name != 'ACTIVE' and execution.state.name != 'QUEUED'
def Poll(self, execution_ref):
"""Overrides."""
return self.workflow_execution.Get(execution_ref)
def GetResult(self, execution):
"""Overrides."""
return execution
def _ExtractErrorMessage(error):
"""Extracts the error message for better format."""
if hasattr(error, 'code'):
code_name = codes.Code(error.code).name
else:
code_name = 'UNKNOWN'
if hasattr(error, 'message'):
error_message = error.message
else:
# Returns the entire error object if no message field is available.
error_message = error
return '[{code}] {message}'.format(code=code_name, message=error_message)

View File

@@ -0,0 +1,48 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation for Cloud Workflows API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.calliope import exceptions
def WorkflowNameConforms(name):
"""Confirm workflow name is of acceptable length and uses valid characters."""
if not 1 <= len(name) <= 64:
raise exceptions.InvalidArgumentException(
'workflow', 'ID must be between 1-64 characters long')
# Avoiding an all-in-one regular expression so we can give meaninful errors.
if not re.search('^[a-zA-Z].*', name):
raise exceptions.InvalidArgumentException(
'workflow', 'ID must start with a letter')
if not re.search('.*[a-zA-Z0-9]$', name):
raise exceptions.InvalidArgumentException(
'workflow', 'ID must end with a letter or number')
if not re.search('^[-_a-zA-Z0-9]*$', name):
raise exceptions.InvalidArgumentException(
'workflow',
'ID must only contain letters, numbers, underscores and hyphens')
def ValidateWorkflow(workflow, first_deployment=False):
if first_deployment and not workflow.sourceContents:
raise exceptions.RequiredArgumentException('--source',
'required on first deployment')

View File

@@ -0,0 +1,368 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Cloud Workflows API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import exceptions
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.api_lib.workflows import cache
from googlecloudsdk.api_lib.workflows import poller_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.command_lib.workflows import flags
from googlecloudsdk.core import resources
class UnsupportedReleaseTrackError(Exception):
"""Raised when requesting API version for an unsupported release track."""
def ReleaseTrackToApiVersion(release_track):
if release_track == base.ReleaseTrack.ALPHA:
return 'v1alpha1'
elif release_track == base.ReleaseTrack.BETA:
return 'v1beta'
elif release_track == base.ReleaseTrack.GA:
return 'v1'
else:
raise UnsupportedReleaseTrackError(release_track)
class WorkflowsClient(object):
"""Client for Workflows service in the Cloud Workflows API."""
def __init__(self, api_version):
self.client = apis.GetClientInstance('workflows', api_version)
self.messages = self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_workflows
def Get(self, workflow_ref):
"""Gets a Workflow.
Args:
workflow_ref: Resource reference to the Workflow to get.
Returns:
Workflow: The workflow if it exists, None otherwise.
"""
get_req = self.messages.WorkflowsProjectsLocationsWorkflowsGetRequest(
name=workflow_ref.RelativeName(),
)
try:
return self._service.Get(get_req)
except api_exceptions.HttpNotFoundError:
return None
def Create(self, workflow_ref, workflow):
"""Creates a Workflow.
Args:
workflow_ref: Resource reference to the Workflow to create.
workflow: Workflow resource message to create.
Returns:
Long-running operation for create.
"""
create_req = self.messages.WorkflowsProjectsLocationsWorkflowsCreateRequest(
parent=workflow_ref.Parent().RelativeName(),
workflow=workflow,
workflowId=workflow_ref.Name(),
)
return self._service.Create(create_req)
def Patch(self, workflow_ref, workflow, updated_fields):
"""Updates a Workflow.
If updated fields are specified it uses patch semantics.
Args:
workflow_ref: Resource reference to the Workflow to update.
workflow: Workflow resource message to update.
updated_fields: List of the updated fields used in a patch request.
Returns:
Long-running operation for update.
"""
update_mask = ','.join(sorted(updated_fields))
patch_req = self.messages.WorkflowsProjectsLocationsWorkflowsPatchRequest(
name=workflow_ref.RelativeName(),
updateMask=update_mask,
workflow=workflow,
)
return self._service.Patch(patch_req)
def BuildWorkflowFromArgs(self, args, old_workflow, release_track):
"""Creates a workflow from command-line arguments.
Args:
args: The arguments of the gcloud command.
old_workflow: The workflow from previous revision.
release_track: The gcloud release track used in the command.
Returns:
workflow: The consturcted Workflow message from the passed in arguments.
updated_fields: The workflow fields that are updated.
"""
workflow = self.messages.Workflow()
updated_fields = []
flags.SetSource(args, workflow, updated_fields)
flags.SetDescription(args, workflow, updated_fields)
flags.SetServiceAccount(args, workflow, updated_fields)
labels = labels_util.ParseCreateArgs(
args, self.messages.Workflow.LabelsValue,
)
flags.SetLabels(labels, workflow, updated_fields)
if release_track == base.ReleaseTrack.GA:
flags.SetKmsKey(args, workflow, updated_fields)
env_vars = None
if args.IsSpecified('set_env_vars'):
env_vars = labels_util.ParseCreateArgs(
args, self.messages.Workflow.UserEnvVarsValue, 'set_env_vars',
)
if args.IsSpecified('env_vars_file'):
if len(args.env_vars_file) > flags.USER_ENV_VARS_LIMIT:
raise arg_parsers.ArgumentTypeError(
'too many environment variables, limit is {max_len}.'.format(
max_len=flags.USER_ENV_VARS_LIMIT,
)
)
env_vars = labels_util.ParseCreateArgs(
args, self.messages.Workflow.UserEnvVarsValue, 'env_vars_file',
)
# clear_env_vars clears all previously set env vars.
if args.IsSpecified('clear_env_vars'):
env_vars = flags.CLEAR_ENVIRONMENT
# SetUserEnvVars acts as a no-op when it takes env_vars=None.
flags.SetUserEnvVars(env_vars, workflow, updated_fields)
env_vars = None
if args.IsSpecified('update_env_vars'):
env_vars = {
p.key: p.value
for p in old_workflow.userEnvVars.additionalProperties
}
env_vars.update(args.update_env_vars)
# remove_env_vars removes user selected env vars from previous revision.
if args.IsSpecified('remove_env_vars'):
env_vars = {
p.key: p.value
for p in old_workflow.userEnvVars.additionalProperties
}
for v in args.remove_env_vars:
if v in env_vars:
del env_vars[v]
else:
raise arg_parsers.argparse.ArgumentError(
argument=None,
message='key {k} is not found.'.format(k=v),
)
flags.UpdateUserEnvVars(env_vars, workflow, updated_fields)
if args.IsSpecified('call_log_level'):
call_log_level_enum = self.messages.Workflow.CallLogLevelValueValuesEnum
log_level = arg_utils.ChoiceToEnum(
args.call_log_level,
call_log_level_enum,
valid_choices=[
'none',
'log-all-calls',
'log-errors-only',
'log-none',
],
)
flags.SetWorkflowLoggingArg(log_level, workflow, updated_fields)
if args.IsSpecified('execution_history_level'):
execution_history_level_enum = (
self.messages.Workflow.ExecutionHistoryLevelValueValuesEnum
)
history_level = arg_utils.ChoiceToEnum(
args.execution_history_level,
execution_history_level_enum,
valid_choices=[
'none',
'execution-history-basic',
'execution-history-detailed',
],
)
flags.SetWorkflowExecutionHistoryLevelArg(
history_level, workflow, updated_fields)
if args.IsSpecified('tags') and old_workflow is not None:
raise arg_parsers.argparse.ArgumentError(
argument=None,
message='tags cannot be updated for an existing workflow',
)
flags.SetWorkflowsTagsArg(
args, workflow, self.messages.Workflow.TagsValue
)
return workflow, updated_fields
def WaitForOperation(self, operation, workflow_ref):
"""Waits until the given long-running operation is complete."""
operation_ref = resources.REGISTRY.Parse(
operation.name,
collection='workflows.projects.locations.operations',
)
operations = poller_utils.OperationsClient(self.client, self.messages)
poller = poller_utils.WorkflowsOperationPoller(
workflows=self,
operations=operations,
workflow_ref=workflow_ref,
)
progress_string = 'Waiting for operation [{}] to complete'.format(
operation_ref.Name(),
)
return waiter.WaitFor(poller, operation_ref, progress_string)
class WorkflowExecutionClient(object):
"""Client for Workflows Execution service in the Cloud Workflows Execution API."""
def __init__(self, api_version):
self.client = apis.GetClientInstance('workflowexecutions', api_version)
self.messages = self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_workflows_executions
def Create(
self,
workflow_ref,
data,
call_log_level=None,
execution_history_level=None,
labels=None,
overflow_buffering_disabled=False,
):
"""Creates a Workflow execution.
Args:
workflow_ref: Resource reference to the Workflow to execute.
data: Argments to use for executing the workflow.
call_log_level: Level of call logging to apply during execution.
execution_history_level: Level of execution history to apply for the
execution.
labels: Labels associated to the execution.
overflow_buffering_disabled: If set to true, the execution will not be
backlogged when the concurrency quota is exhausted. Backlogged
executions start when the concurrency quota becomes available.
Returns:
Execution: The workflow execution.
"""
execution = self.messages.Execution()
execution.argument = data
if overflow_buffering_disabled:
execution.disableConcurrencyQuotaOverflowBuffering = True
if labels is not None:
execution.labels = labels
if call_log_level is not None and call_log_level != 'none':
call_log_level_enum = self.messages.Execution.CallLogLevelValueValuesEnum
execution.callLogLevel = arg_utils.ChoiceToEnum(
call_log_level,
call_log_level_enum,
valid_choices=[
'none',
'log-all-calls',
'log-errors-only',
'log-none',
],
)
if (
execution_history_level is not None
and execution_history_level != 'none'
):
execution_history_level_enum = (
self.messages.Execution.ExecutionHistoryLevelValueValuesEnum
)
execution.executionHistoryLevel = arg_utils.ChoiceToEnum(
execution_history_level,
execution_history_level_enum,
valid_choices=[
'none',
'execution-history-basic',
'execution-history-detailed',
],
)
create_req = self.messages.WorkflowexecutionsProjectsLocationsWorkflowsExecutionsCreateRequest(
parent=workflow_ref.RelativeName(), execution=execution,
)
try:
return self._service.Create(create_req)
except api_exceptions.HttpError as e:
raise exceptions.HttpException(e, error_format='{message}')
def Get(self, execution_ref):
"""Gets a workflow execution.
Args:
execution_ref: Resource reference to the Workflow execution to get.
Returns:
Workflow: The workflow execution if it exists, an error exception
otherwise.
"""
if execution_ref is None:
execution_ref = cache.get_cached_execution_id()
get_req = self.messages.WorkflowexecutionsProjectsLocationsWorkflowsExecutionsGetRequest(
name=execution_ref.RelativeName(),
)
try:
return self._service.Get(get_req)
except api_exceptions.HttpError as e:
raise exceptions.HttpException(e, error_format='{message}')
def WaitForExecution(self, execution_ref):
"""Waits until the given execution is complete or the maximum wait time is reached."""
if execution_ref is None:
execution_ref = cache.get_cached_execution_id()
poller = poller_utils.ExecutionsPoller(workflow_execution=self)
progress_string = 'Waiting for execution [{}] to complete'.format(
execution_ref.Name(),
)
try:
return waiter.WaitFor(
poller,
execution_ref,
progress_string,
pre_start_sleep_ms=100,
max_wait_ms=86400000, # max wait time is 24 hours.
exponential_sleep_multiplier=1.25,
wait_ceiling_ms=60000) # truncate sleep exponential at 1 minute.
except waiter.TimeoutError:
raise waiter.TimeoutError(
'Execution {0} has not finished in 24 hours. {1}'.format(
execution_ref, _TIMEOUT_MESSAGE,
)
)
except waiter.AbortWaitError:
raise waiter.AbortWaitError(
'Aborting wait for execution {0}.'.format(execution_ref),
)
# Same message as the LRO time out error, modified with the word execution.
_TIMEOUT_MESSAGE = (
'The execution may still be underway remotely and may still succeed; '
'use gcloud list and describe commands or '
'https://console.developers.google.com/ to check resource state.'
)