feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,100 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for gcloud cloudtasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.tasks import cmek_config
from googlecloudsdk.api_lib.tasks import locations
from googlecloudsdk.api_lib.tasks import queues
from googlecloudsdk.api_lib.tasks import tasks
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
API_NAME = 'cloudtasks'
ALPHA_API_VERSION = 'v2beta2'
BETA_API_VERSION = 'v2beta3'
GA_API_VERSION = 'v2'
class UnsupportedReleaseTrackError(Exception):
"""Raised when requesting an api for an unsupported release track."""
def ApiVersionFromReleaseTrack(release_track):
if release_track == base.ReleaseTrack.ALPHA:
return ALPHA_API_VERSION
if release_track == base.ReleaseTrack.BETA:
return BETA_API_VERSION
if release_track == base.ReleaseTrack.GA:
return GA_API_VERSION
else:
raise UnsupportedReleaseTrackError(release_track)
def GetApiAdapter(release_track):
if release_track == base.ReleaseTrack.ALPHA:
return AlphaApiAdapter()
if release_track == base.ReleaseTrack.BETA:
return BetaApiAdapter()
if release_track == base.ReleaseTrack.GA:
return GaApiAdapter()
else:
raise UnsupportedReleaseTrackError(release_track)
class BaseApiAdapter(object):
def __init__(self, api_version):
self.client = apis.GetClientInstance(API_NAME, api_version)
self.messages = self.client.MESSAGES_MODULE
self.locations = locations.Locations(self.client.MESSAGES_MODULE,
self.client.projects_locations)
self.cmek_config = cmek_config.CmekConfig(
self.client.MESSAGES_MODULE, self.client.projects_locations
)
class AlphaApiAdapter(BaseApiAdapter):
def __init__(self):
super(AlphaApiAdapter, self).__init__(ALPHA_API_VERSION)
self.queues = queues.AlphaQueues(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues)
self.tasks = tasks.AlphaTasks(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues_tasks)
class BetaApiAdapter(BaseApiAdapter):
def __init__(self):
super(BetaApiAdapter, self).__init__(BETA_API_VERSION)
self.queues = queues.BetaQueues(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues)
self.tasks = tasks.Tasks(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues_tasks)
class GaApiAdapter(BaseApiAdapter):
def __init__(self):
super(GaApiAdapter, self).__init__(GA_API_VERSION)
self.queues = queues.Queues(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues)
self.tasks = tasks.Tasks(self.client.MESSAGES_MODULE,
self.client.projects_locations_queues_tasks)

View File

@@ -0,0 +1,998 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for `gcloud app deploy <queue|cron>.yaml` deployments.
Functions defined here are used to migrate away from soon to be deprecated
admin-console-hr superapp. Instead we will be using Cloud Tasks APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.tasks import task_queues_convertors as convertors
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import parser_extensions
from googlecloudsdk.command_lib.tasks import app
from googlecloudsdk.command_lib.tasks import constants
from googlecloudsdk.command_lib.tasks import flags
from googlecloudsdk.command_lib.tasks import parsers
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
import six
from six.moves import urllib
# Some values still need to be further processed and can not be used as is for
# CT APIs. One example is 'task retry limit' where the value stored in the
# backend is always x + 1 where x is the value in the YAML file.
CONVERSION_FUNCTIONS = {
'max_concurrent_requests': lambda x: min(5000, max(1, int(x))),
'rate': convertors.ConvertRate,
'retry_parameters.min_backoff_seconds': convertors.ConvertBackoffSeconds,
'retry_parameters.max_backoff_seconds': convertors.ConvertBackoffSeconds,
'retry_parameters.task_age_limit': convertors.ConvertTaskAgeLimit,
'retry_parameters.task_retry_limit': lambda x: int(x) + 1,
'target': convertors.ConvertTarget
}
def IsClose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Checks if two numerical values are same or almost the same.
This function is only created to provides backwards compatability for python2
which does not support 'math.isclose(...)' function. The output of this
function mimicks exactly the behavior of math.isclose.
Args:
a: One of the values to be tested for relative closeness.
b: One of the values to be tested for relative closeness.
rel_tol: Relative tolerance allowed. Default value is set so that the two
values must be equivalent to 9 decimal digits.
abs_tol: The minimum absoulute tolerance difference. Useful for
comparisons near zero.
Returns:
True if the attribute needs to be updated to the new value, False otherwise.
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def _DoesAttributeNeedToBeUpdated(cur_queue_state, attribute, new_value):
"""Checks whether the attribute & value provided need to be updated.
Note: We only check if the attribute exists in `queue.rateLimits` and
`queue.retryConfig` since those are the only attributes we verify here. The
only attribute we do not verify here is app-engine routing override which we
handle separately.
Args:
cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
The Queue instance fetched from the backend.
attribute: Snake case representation of the CT API attribute name. One
example is 'max_burst_size'.
new_value: The value we are trying to set this attribute to.
Returns:
True if the attribute needs to be updated to the new value, False otherwise.
"""
proto_attribute_name = convertors.ConvertStringToCamelCase(attribute)
if (
hasattr(cur_queue_state, 'rateLimits') and
hasattr(cur_queue_state.rateLimits, proto_attribute_name)
):
old_value = getattr(cur_queue_state.rateLimits, proto_attribute_name)
elif hasattr(cur_queue_state.retryConfig, proto_attribute_name):
old_value = getattr(cur_queue_state.retryConfig, proto_attribute_name)
else:
# Unable to get old attribute value.
return True
if old_value == new_value:
return False
if (
old_value is None and
attribute != 'max_concurrent_dispatches' and
attribute in constants.PUSH_QUEUES_APP_DEPLOY_DEFAULT_VALUES and
new_value == constants.PUSH_QUEUES_APP_DEPLOY_DEFAULT_VALUES[attribute]
):
return False
if attribute == 'max_dispatches_per_second' and not new_value:
# No need to set rate if rate specified is 0. Instead, we will pause the
# queue if it is not already paused or blocked.
return False
if old_value is None or new_value is None:
return True
old_value = convertors.CheckAndConvertStringToFloatIfApplicable(old_value)
new_value = convertors.CheckAndConvertStringToFloatIfApplicable(new_value)
if (
isinstance(old_value, float) and
isinstance(new_value, float)
):
return not IsClose(old_value, new_value)
return old_value != new_value
def _SetSpecifiedArg(cloud_task_args, key, value):
"""Sets the specified key, value pair in the namespace provided.
The main reason to have this function is to centralize all the protected
access to _specified_args
Args:
cloud_task_args: argparse.Namespace, A placeholder args namespace built to
pass on forwards to Cloud Tasks API.
key: The attribute key we are trying to set.
value: The attribute value we are trying to set.
"""
# pylint: disable=protected-access
cloud_task_args._specified_args[key] = value
def _DeleteSpecifiedArg(cloud_task_args, key):
"""Deletes the specified key in the namespace provided.
Args:
cloud_task_args: argparse.Namespace, A placeholder args namespace built to
pass on forwards to Cloud Tasks API.
key: The attribute key we are trying to set.
"""
# pylint: disable=protected-access
del cloud_task_args._specified_args[key]
def _PostProcessMinMaxBackoff(
cloud_task_args, used_default_value_for_min_backoff, cur_queue_state):
"""Checks min and max backoff values and updates the other value if needed.
When uploading via queue.yaml files, if only one of the backoff values is
specified, the other value will automatically be updated to the default
value. If the default value does not satisfy the condition
min_backoff <= max_backoff, then it is set equal to the other backoff value.
Args:
cloud_task_args: argparse.Namespace, A placeholder args namespace built to
pass on forwards to Cloud Tasks API.
used_default_value_for_min_backoff: A boolean value telling us if we used
a default value for min_backoff or if it was specified explicitly in the
YAML file.
cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
The Queue instance fetched from the backend if it exists, None otherwise.
"""
if cloud_task_args.type == 'pull':
return
min_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
cloud_task_args.min_backoff)
max_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
cloud_task_args.max_backoff)
if min_backoff > max_backoff:
if used_default_value_for_min_backoff:
min_backoff = max_backoff
cloud_task_args.min_backoff = cloud_task_args.max_backoff
_SetSpecifiedArg(
cloud_task_args, 'min_backoff', cloud_task_args.max_backoff)
else:
max_backoff = min_backoff
cloud_task_args.max_backoff = cloud_task_args.min_backoff
_SetSpecifiedArg(
cloud_task_args, 'max_backoff', cloud_task_args.min_backoff)
# Check if the backend values match with what we are trying to set
if cur_queue_state and cur_queue_state.retryConfig:
old_min_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
cur_queue_state.retryConfig.minBackoff)
old_max_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
cur_queue_state.retryConfig.maxBackoff)
if max_backoff == old_max_backoff and min_backoff == old_min_backoff:
_DeleteSpecifiedArg(cloud_task_args, 'min_backoff')
cloud_task_args.min_backoff = None
_DeleteSpecifiedArg(cloud_task_args, 'max_backoff')
cloud_task_args.max_backoff = None
def _PostProcessRoutingOverride(cloud_task_args, cur_queue_state):
"""Checks if service and target values need to be updated for host URL.
An app engine host URL may have optionally version_dot_service appended to
the URL if specified via 'routing_override'. Here we check the existing URL
and make sure the service & target values are only updated when need be.
Args:
cloud_task_args: argparse.Namespace, A placeholder args namespace built to
pass on forwards to Cloud Tasks API.
cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
The Queue instance fetched from the backend if it exists, None otherwise.
"""
try:
host_url = cur_queue_state.appEngineHttpQueue.appEngineRoutingOverride.host
except AttributeError:
# The queue does not exist or had no override set before.
return
if cloud_task_args.IsSpecified('routing_override'):
targets = []
if 'version' in cloud_task_args.routing_override:
targets.append(cloud_task_args.routing_override['version'])
if 'service' in cloud_task_args.routing_override:
targets.append(cloud_task_args.routing_override['service'])
targets_sub_url = '.'.join(targets)
targets_sub_url_and_project = '{}.{}.'.format(
targets_sub_url, properties.VALUES.core.project.Get())
if host_url.startswith(targets_sub_url_and_project):
# pylint: disable=protected-access
del cloud_task_args._specified_args['routing_override']
cloud_task_args.routing_override = None
def _PopulateCloudTasksArgs(queue, cur_queue_state, ct_expected_args):
"""Builds placeholder command line args to pass on to Cloud Tasks API.
Most of Cloud Tasks functions use args passed in during CLI invocation. To
reuse those functions without extensive rework on their implementation, we
recreate the args in the format that those functions expect.
Args:
queue: third_party.appengine.api.queueinfo.QueueEntry, The QueueEntry
instance generated from the parsed YAML file.
cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
The Queue instance fetched from the backend if it exists, None otherwise.
ct_expected_args: A list of expected args that we need to initialize before
forwarding to Cloud Tasks APIs.
Returns:
argparse.Namespace, A placeholder args namespace built to pass on forwards
to Cloud Tasks API.
"""
cloud_task_args = parser_extensions.Namespace()
for task_flag in ct_expected_args:
setattr(cloud_task_args, task_flag, None)
used_default_value_for_min_backoff = False
for old_arg, new_arg in constants.APP_TO_TASKS_ATTRIBUTES_MAPPING.items():
# e.g. old_arg, new_arg = 'retry_parameters.max_doublings', 'max_doublings'
old_arg_list = old_arg.split('.')
value = queue
for old_arg_sub in old_arg_list:
if not hasattr(value, old_arg_sub):
value = None
break
value = getattr(value, old_arg_sub)
# Max attempts is a special case because 0 is actually stored as 1.
if value or (value is not None and new_arg in ('max_attempts',)):
# Some values need to be converted to a format that CT APIs accept
if old_arg in CONVERSION_FUNCTIONS:
value = CONVERSION_FUNCTIONS[old_arg](value)
if (
not cur_queue_state or
new_arg in ('name', 'type', 'min_backoff', 'max_backoff') or
_DoesAttributeNeedToBeUpdated(cur_queue_state, new_arg, value)
):
# Attributes specified here are forwarded to CT APIs. We always forward
# 'name' and 'type' attributes and we forward any other attributes if
# they have changed from before or if this is a brand new queue.
_SetSpecifiedArg(cloud_task_args, new_arg, value)
else:
# Set default values for some of the attributes if no value is present
if queue.mode == constants.PULL_QUEUE:
default_values = constants.PULL_QUEUES_APP_DEPLOY_DEFAULT_VALUES
else:
default_values = constants.PUSH_QUEUES_APP_DEPLOY_DEFAULT_VALUES
if new_arg in default_values:
if new_arg == 'min_backoff':
used_default_value_for_min_backoff = True
value = default_values[new_arg]
if (
not cur_queue_state or
new_arg in ('min_backoff', 'max_backoff') or
_DoesAttributeNeedToBeUpdated(cur_queue_state, new_arg, value)
):
_SetSpecifiedArg(cloud_task_args, new_arg, value)
setattr(cloud_task_args, new_arg, value)
_PostProcessMinMaxBackoff(
cloud_task_args, used_default_value_for_min_backoff, cur_queue_state)
_PostProcessRoutingOverride(cloud_task_args, cur_queue_state)
return cloud_task_args
def _AnyUpdatableFields(args):
"""Check whether the queue has any changed attributes based on args provided.
Args:
args: argparse.Namespace, A placeholder args namespace built to pass on
forwards to Cloud Tasks API.
Returns:
True if any of the queue attributes have changed from the attributes stored
in the backend, False otherwise.
"""
# pylint: disable=protected-access
modifiable_args = [
x for x in args._specified_args if x not in ('name', 'type')]
return True if modifiable_args else False
def _RaiseHTTPException(msg_body):
"""Raises an HTTP exception with status code 400.
This function is used to raise the same exceptions generated by the older
implementation of `gcloud app delpoy queue.yaml` when it communicated with
the Zeus backend over HTTP.
Args:
msg_body: A string providing more information about the error being raised.
Raises:
HTTPError: Based on the inputs provided.
"""
exc_msg = 'Bad Request Unexpected HTTP status 400'
error = urllib.error.HTTPError(None, six.moves.http_client.BAD_REQUEST,
exc_msg, None, None)
msg_body = six.ensure_binary(msg_body)
exceptions.reraise(util.RPCError(error, body=msg_body))
def _ValidateTaskRetryLimit(queue):
"""Validates task retry limit input values for both queues in the YAML file.
Args:
queue: third_party.appengine.api.queueinfo.QueueEntry, The QueueEntry
instance generated from the parsed YAML file.
Raises:
HTTPError: Based on the inputs provided if value specified is negative.
"""
if (
queue.retry_parameters.task_retry_limit and
queue.retry_parameters.task_retry_limit < 0
):
_RaiseHTTPException(
'Invalid queue configuration. Task retry limit must not be less '
'than zero.')
def ValidateCronYamlFileConfig(config):
"""Validates jobs configuration parameters in the cron YAML file.
The purpose of this function is to mimick the behaviour of the old
implementation of `gcloud app deploy cron.yaml` before migrating away
from console-admin-hr. The errors generated are the same as the ones
previously seen when gcloud sent the batch-request for updating jobs to the
Zeus backend.
Args:
config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
are going to process.
Raises:
HTTPError: Various different scenarios defined in the function can cause
this exception to be raised.
"""
cron_yaml = config.parsed
if not cron_yaml.cron:
return
for job in cron_yaml.cron:
# Retry Parameters
if job.retry_parameters:
# Job Retry Limit
if (
job.retry_parameters.job_retry_limit and
job.retry_parameters.job_retry_limit > 5
):
_RaiseHTTPException(
'Invalid Cron retry parameters: Cannot set retry limit to more '
'than 5 (currently set to {}).'.format(
job.retry_parameters.job_retry_limit))
# Job Age Limit
if (
job.retry_parameters.job_age_limit and
int(convertors.CheckAndConvertStringToFloatIfApplicable(
job.retry_parameters.job_age_limit)) <= 0
):
_RaiseHTTPException(
'Invalid Cron retry parameters: Job age limit must be greater '
'than zero seconds.')
# Min & Max backoff comparison
if (
job.retry_parameters.min_backoff_seconds is not None and
job.retry_parameters.max_backoff_seconds is not None
):
min_backoff = job.retry_parameters.min_backoff_seconds
max_backoff = job.retry_parameters.max_backoff_seconds
if max_backoff < min_backoff:
_RaiseHTTPException(
'Invalid Cron retry parameters: Min backoff sec must not be '
'greater than than max backoff sec.')
def ValidateQueueYamlFileConfig(config):
"""Validates queue configuration parameters in the queue YAML file.
The purpose of this function is to mimick the behaviour of the old
implementation of `gcloud app deploy queue.yaml` before migrating away
from console-admin-hr. The errors generated are the same as the ones
previously seen when gcloud sent the batch-request for updating queues to the
Zeus backend.
Args:
config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
are going to process.
Raises:
HTTPError: Various different scenarios defined in the function can cause
this exception to be raised.
"""
queue_yaml = config.parsed
if not queue_yaml.queue:
return
for queue in queue_yaml.queue:
# Push queues
if not queue.mode or queue.mode == constants.PUSH_QUEUE:
# Rate
if not queue.rate:
_RaiseHTTPException(
'Invalid queue configuration. Refill rate must be specified for '
'push-based queue.')
else:
rate_in_seconds = convertors.ConvertRate(queue.rate)
if rate_in_seconds > constants.MAX_RATE:
_RaiseHTTPException(
'Invalid queue configuration. Refill rate must not exceed '
'{} per second (is {:.1f}).'.format(
constants.MAX_RATE, rate_in_seconds))
# Retry Parameters
if queue.retry_parameters:
# Task Retry Limit
_ValidateTaskRetryLimit(queue)
# Task Age Limit
if (
queue.retry_parameters.task_age_limit and
int(convertors.CheckAndConvertStringToFloatIfApplicable(
queue.retry_parameters.task_age_limit)) <= 0
):
_RaiseHTTPException(
'Invalid queue configuration. Task age limit must be greater '
'than zero.')
# Min backoff
if (
queue.retry_parameters.min_backoff_seconds and
queue.retry_parameters.min_backoff_seconds < 0
):
_RaiseHTTPException(
'Invalid queue configuration. Min backoff seconds must not be '
'less than zero.')
# Max backoff
if (
queue.retry_parameters.max_backoff_seconds and
queue.retry_parameters.max_backoff_seconds < 0
):
_RaiseHTTPException(
'Invalid queue configuration. Max backoff seconds must not be '
'less than zero.')
# Max Doublings
if (
queue.retry_parameters.max_doublings and
queue.retry_parameters.max_doublings < 0
):
_RaiseHTTPException(
'Invalid queue configuration. Max doublings must not be less '
'than zero.')
# Min & Max backoff comparison
if (
queue.retry_parameters.min_backoff_seconds is not None and
queue.retry_parameters.max_backoff_seconds is not None
):
min_backoff = queue.retry_parameters.min_backoff_seconds
max_backoff = queue.retry_parameters.max_backoff_seconds
if max_backoff < min_backoff:
_RaiseHTTPException(
'Invalid queue configuration. Min backoff sec must not be '
'greater than than max backoff sec.')
# Bucket size
if queue.bucket_size:
if queue.bucket_size < 0:
_RaiseHTTPException(
'Error updating queue "{}": The queue rate is invalid.'.format(
queue.name))
elif queue.bucket_size > constants.MAX_BUCKET_SIZE:
_RaiseHTTPException(
'Error updating queue "{}": Maximum bucket size is {}.'.format(
queue.name, constants.MAX_BUCKET_SIZE))
# Pull Queues
else:
# Rate
if queue.rate:
_RaiseHTTPException(
'Invalid queue configuration. Refill rate must not be specified '
'for pull-based queue.')
# Retry Parameters
if queue.retry_parameters:
# Task Retry Limit
_ValidateTaskRetryLimit(queue)
# Task Age Limit
if queue.retry_parameters.task_age_limit is not None:
_RaiseHTTPException(
"Invalid queue configuration. Can't specify task_age_limit "
"for a pull queue.")
# Min backoff
if queue.retry_parameters.min_backoff_seconds is not None:
_RaiseHTTPException(
"Invalid queue configuration. Can't specify min_backoff_seconds "
"for a pull queue.")
# Max backoff
if queue.retry_parameters.max_backoff_seconds is not None:
_RaiseHTTPException(
"Invalid queue configuration. Can't specify max_backoff_seconds "
"for a pull queue.")
# Max doublings
if queue.retry_parameters.max_doublings is not None:
_RaiseHTTPException(
"Invalid queue configuration. Can't specify max_doublings "
"for a pull queue.")
# Max concurrent requests
if queue.max_concurrent_requests is not None:
_RaiseHTTPException(
'Invalid queue configuration. Max concurrent requests must not '
'be specified for pull-based queue.')
# Bucket size
if queue.bucket_size is not None:
_RaiseHTTPException(
'Invalid queue configuration. Bucket size must not be specified '
'for pull-based queue.')
# Target
if queue.target:
_RaiseHTTPException(
'Invalid queue configuration. Target must not be specified for '
'pull-based queue.')
def FetchCurrentQueuesData(tasks_api):
"""Fetches the current queues data stored in the database.
Args:
tasks_api: api_lib.tasks.<Alpha|Beta|GA>ApiAdapter, Cloud Tasks API needed
for doing queue based operations.
Returns:
A dictionary with queue names as keys and corresponding protobuf Queue
objects as values apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue
"""
queues_client = tasks_api.queues
app_location = app.ResolveAppLocation(parsers.ParseProject())
region_ref = parsers.ParseLocation(app_location)
all_queues_in_db_dict = {
os.path.basename(x.name): x for x in queues_client.List(region_ref)
}
return all_queues_in_db_dict
def FetchCurrentJobsData(scheduler_api):
"""Fetches the current jobs data stored in the database.
Args:
scheduler_api: api_lib.scheduler.<Alpha|Beta|GA>ApiAdapter, Cloud Scheduler
API needed for doing jobs based operations.
Returns:
A list of currently existing jobs in the backend.
"""
jobs_client = scheduler_api.jobs
app_location = app.ResolveAppLocation(
parsers.ParseProject(), locations_client=scheduler_api.locations)
region_ref = parsers.ParseLocation(app_location).RelativeName()
return list(x for x in jobs_client.List(region_ref))
def DeployQueuesYamlFile(
tasks_api,
config,
all_queues_in_db_dict,
ct_api_version=base.ReleaseTrack.BETA
):
"""Perform a deployment based on the parsed 'queue.yaml' file.
Args:
tasks_api: api_lib.tasks.<Alpha|Beta|GA>ApiAdapter, Cloud Tasks API needed
for doing queue based operations.
config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
are going to process.
all_queues_in_db_dict: A dictionary with queue names as keys and
corresponding apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue
objects as values
ct_api_version: The Cloud Tasks API version we want to use.
Returns:
A list of responses received from the Cloud Tasks APIs representing queue
states for every call made to modify the attributes of a queue.
"""
class _PlaceholderQueueRef:
"""A placeholder class to simulate queue_ref resource objects used in CT APIs.
This class simulates the behaviour of the resource object returned by
tasks.parsers.ParseQueue(...) function. We use this placeholder class
instead of creating an actual resource instance because otherwise it takes
roughly 2 minutes to create resource instances for a 1000 queues.
Attributes:
_relative_path: A string representing the full path for a queue in the
format: 'projects/<project>/locations/<location>/queues/<queue>'
"""
def __init__(self, relative_path):
"""Initializes the instance and sets the relative path."""
self._relative_path = relative_path
def RelativeName(self):
"""Gets the string representing the full path for a queue.
This is the only function we are currently using in CT APIs for the
queue_ref resource object.
Returns:
A string representing the full path for a queue in the following
format: 'projects/<project>/locations/<location>/queues/<queue>'
"""
return self._relative_path
queue_yaml = config.parsed
resume_paused_queues = queue_yaml.resume_paused_queues != 'False'
queues_client = tasks_api.queues
queues_not_present_in_yaml = set(all_queues_in_db_dict.keys())
# Just need to create one real instance of queue_ref. After that we can
# create placeholder queue_ref objects based on this instance.
queue_ref = parsers.ParseQueue('a')
queue_ref_stub = queue_ref.RelativeName()[:-1]
# Get the arg values that we need to fill up for each queue using CT APIs
# pylint: disable=protected-access
task_args = flags._PushQueueFlags(release_track=ct_api_version)
# TODO(b/169069379) Remove max_burst_size when/if API is exposed via `gcloud
# tasks queues` CLI invocation.
task_args.append(base.Argument('--max_burst_size', type=int, help=''))
expected_args = []
for task_flag in task_args:
new_arg = task_flag.args[0][2:].replace('-', '_')
expected_args.extend((new_arg, 'clear_{}'.format(new_arg)))
responses = []
if queue_yaml.queue is None:
queue_yaml.queue = []
for queue in queue_yaml.queue:
if queue.name in queues_not_present_in_yaml:
queues_not_present_in_yaml.remove(queue.name)
queue_ref = _PlaceholderQueueRef('{}{}'.format(queue_ref_stub, queue.name))
cur_queue_object = all_queues_in_db_dict.get(queue.name, None)
cloud_task_args = _PopulateCloudTasksArgs(queue, cur_queue_object,
expected_args)
rate_to_set = cloud_task_args.GetValue('max_dispatches_per_second')
if (
resume_paused_queues and
cur_queue_object and
(rate_to_set or queue.mode == constants.PULL_QUEUE) and
cur_queue_object.state in (cur_queue_object.state.DISABLED,
cur_queue_object.state.PAUSED)
):
# Resume queue if it exists, was previously disabled/paused, the new
# rate > 0 and if there is no global flag to skip resuming paused queues.
queues_client.Resume(queue_ref)
elif (
cur_queue_object and
not rate_to_set and
cur_queue_object.state == cur_queue_object.state.RUNNING and
queue.mode in (None, constants.PUSH_QUEUE)
):
queues_client.Pause(queue_ref)
if not _AnyUpdatableFields(cloud_task_args):
# Queue attributes in DB == Queue attributes in YAML
continue
queue_config = parsers.ParseCreateOrUpdateQueueArgs(
cloud_task_args,
# Deliberately hardcoding push queues because we want to be able to
# modify all attributes even for pull queues.
constants.PUSH_QUEUE,
tasks_api.messages,
release_track=ct_api_version,
http_queue=False,
)
updated_fields = parsers.GetSpecifiedFieldsMask(
cloud_task_args, constants.PUSH_QUEUE, release_track=ct_api_version)
# TaskTTL and TombstoneTTL are both immutable so we only set them upon
# queue creation. The values set here are as close as possible to the
# default values used with legacy app deploy which used superapps.
if not cur_queue_object:
updated_fields.extend(['taskTtl', 'tombstoneTtl'])
app_engine_routing_override = (
queue_config.appEngineHttpQueue.appEngineRoutingOverride
if queue_config.appEngineHttpQueue is not None else None)
response = queues_client.Patch(
queue_ref,
updated_fields,
retry_config=queue_config.retryConfig,
rate_limits=queue_config.rateLimits,
app_engine_routing_override=app_engine_routing_override,
task_ttl=constants.MAX_TASK_TTL if not cur_queue_object else None,
task_tombstone_ttl=(
constants.MAX_TASK_TOMBSTONE_TTL if not cur_queue_object else None),
queue_type=queue_config.type
)
responses.append(response)
if (
not cur_queue_object and
not rate_to_set and
queue.mode == constants.PUSH_QUEUE
):
# Pause queue if its a new push-queue and rate is zero.
queues_client.Pause(queue_ref)
for queue_name in queues_not_present_in_yaml:
# Skipping 'default' queue to retain backwards compatability with legacy
# behaviour where admin-console-hr would not DISABLE queues named 'default'.
if queue_name == 'default':
continue
queue = all_queues_in_db_dict[queue_name]
if queue.state in (queue.state.PAUSED, queue.state.DISABLED):
continue
queue_ref = _PlaceholderQueueRef('{}{}'.format(queue_ref_stub, queue_name))
queues_client.Pause(queue_ref)
return responses
def _CreateUniqueJobKeyForExistingJob(job, project):
"""Creates a key from the proto job instance's attributes passed as input.
Args:
job: An instance of job fetched from the backend.
project: The base name of the project.
Returns:
A tuple of attributes used as a key to identify this job.
"""
return (
job.schedule,
job.timeZone,
job.appEngineHttpTarget.relativeUri,
job.description,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.minBackoffDuration) if job.retryConfig else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.maxBackoffDuration) if job.retryConfig else None,
job.retryConfig.maxDoublings if job.retryConfig else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.maxRetryDuration) if job.retryConfig else None,
job.retryConfig.retryCount if job.retryConfig else None,
parsers.ExtractTargetFromAppEngineHostUrl(job, project),
)
def _ReplaceDefaultRetryParamsForYamlJob(job):
"""Replaces default values for retry parameters.
Retry parameters are set to their default values if not already user defined.
These values are only set if the user has defined at least one retry
parameter. Also we are limiting min_backoff to a minimum value of 5.0s since
the new scheduler API does not support setting a lower value than this.
Modifies input `job` argument directly.
Args:
job: An instance of a parsed YAML job object.
"""
defaults = constants.CRON_JOB_LEGACY_DEFAULT_VALUES
retry_data = job.retry_parameters
if retry_data:
# Min max backoff is a special case. If only one is specified, the other
# value is set to its default value as long as this condition is satisfied:
# 'min_backoff <= max_backoff'. Otherwise, the unspecified value is set
# equal to the specified value.
if (
retry_data.min_backoff_seconds is None and
retry_data.max_backoff_seconds is None
):
# Both values are None so we should set them to defaults.
retry_data.min_backoff_seconds = defaults['min_backoff']
retry_data.max_backoff_seconds = defaults['max_backoff']
elif (
retry_data.min_backoff_seconds is None or
retry_data.max_backoff_seconds is None
):
# Only one of the backoff values is None. We need to ensure that
# min_backoff <= max_backoff.
if not retry_data.min_backoff_seconds:
retry_data.min_backoff_seconds = defaults['min_backoff']
if retry_data.max_backoff_seconds:
retry_data.min_backoff_seconds = min(retry_data.min_backoff_seconds,
retry_data.max_backoff_seconds)
if retry_data.max_backoff_seconds is None:
retry_data.max_backoff_seconds = defaults['max_backoff']
retry_data.max_backoff_seconds = max(retry_data.min_backoff_seconds,
retry_data.max_backoff_seconds)
# Max Doublings
if retry_data.max_doublings is None:
retry_data.max_doublings = defaults['max_doublings']
# Job Age Limit
if retry_data.job_age_limit is None:
retry_data.job_age_limit = defaults['max_retry_duration']
def _CreateUniqueJobKeyForYamlJob(job):
"""Creates a key from the YAML job instance's attributes passed as input.
Args:
job: An instance of a parsed YAML job object.
Returns:
A tuple of attributes used as a key to identify this job.
"""
retry_params = job.retry_parameters
return (
job.schedule,
job.timezone if job.timezone else 'UTC',
job.url,
job.description,
retry_params.min_backoff_seconds if retry_params else None,
retry_params.max_backoff_seconds if retry_params else None,
retry_params.max_doublings if retry_params else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
retry_params.job_age_limit) if retry_params else None,
retry_params.job_retry_limit if retry_params else None,
job.target,
)
def _BuildJobsMappingDict(existing_jobs, project):
"""Builds a dictionary of unique jobs by attributes.
Each key is in this dictionary is based on all the existing attributes of a
job. Multiple jobs can map to the same key if all their attributes (schedule,
url, timezone, description, etc.) match.
Args:
existing_jobs: A list of jobs that already exist in the backend. Each job
maps to an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
project: The base name of the project.
Returns:
A dictionary where a key is built based on a all the job attributes and the
value is an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
"""
jobs_indexed_dict = {}
for job in existing_jobs:
key = _CreateUniqueJobKeyForExistingJob(job, project)
if key not in jobs_indexed_dict:
jobs_indexed_dict[key] = []
jobs_indexed_dict[key].append(job)
return jobs_indexed_dict
def CreateJobInstance(scheduler_api, yaml_job):
"""Build a proto format job instance matching the input YAML based job.
Args:
scheduler_api: api_lib.scheduler.<Alpha|Beta|GA>ApiAdapter, Cloud Scheduler
API needed for doing jobs based operations.
yaml_job: A parsed yaml_job entry read from the 'cron.yaml' file.
Returns:
An cloudscheduler.<ver>.cloudscheduler_<ver>_messages.Job instance.
"""
messages = scheduler_api.messages
if yaml_job.retry_parameters:
retry_config = messages.RetryConfig(
maxBackoffDuration=convertors.ConvertBackoffSeconds(
yaml_job.retry_parameters.max_backoff_seconds),
maxDoublings=yaml_job.retry_parameters.max_doublings,
maxRetryDuration=convertors.ConvertTaskAgeLimit(
yaml_job.retry_parameters.job_age_limit),
minBackoffDuration=convertors.ConvertBackoffSeconds(
yaml_job.retry_parameters.min_backoff_seconds),
retryCount=yaml_job.retry_parameters.job_retry_limit
)
else:
retry_config = None
return messages.Job(
appEngineHttpTarget=messages.AppEngineHttpTarget(
httpMethod=messages.AppEngineHttpTarget.HttpMethodValueValuesEnum.GET,
relativeUri=yaml_job.url,
appEngineRouting=messages.AppEngineRouting(service=yaml_job.target)),
retryConfig=retry_config,
description=yaml_job.description,
legacyAppEngineCron=scheduler_api.jobs.legacy_cron,
schedule=yaml_job.schedule,
timeZone=yaml_job.timezone if yaml_job.timezone else 'UTC')
def DeployCronYamlFile(scheduler_api, config, existing_jobs):
"""Perform a deployment based on the parsed 'cron.yaml' file.
For every job defined in the cron.yaml file, we will create a new cron job
for any job that did not already exist in our backend. We will also delete
those jobs which are not present in the YAML file but exist in our backend.
Note: We do not update any jobs. The only operations are Create and Delete.
So if we modify any attribute of an existing job in the YAML file, the old
job gets deleted and a new job is created based on the new attributes.
Args:
scheduler_api: api_lib.scheduler.<Alpha|Beta|GA>ApiAdapter, Cloud Scheduler
API needed for doing jobs based operations.
config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
are going to process.
existing_jobs: A list of jobs that already exist in the backend. Each job
maps to an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
Returns:
A list of responses received from the Cloud Scheduler APIs representing job
states for every call made to create a job.
"""
cron_yaml = config.parsed
jobs_client = scheduler_api.jobs
app_location = app.ResolveAppLocation(
parsers.ParseProject(), locations_client=scheduler_api.locations)
region_ref = parsers.ParseLocation(app_location).RelativeName()
project = os.path.basename(str(parsers.ParseProject()))
existing_jobs_dict = _BuildJobsMappingDict(existing_jobs, project)
# Create a new job for any job that does not map exactly to jobs that already
# exist in the backend.
responses = []
if cron_yaml.cron:
for yaml_job in cron_yaml.cron:
_ReplaceDefaultRetryParamsForYamlJob(yaml_job)
job_key = _CreateUniqueJobKeyForYamlJob(yaml_job)
if job_key in existing_jobs_dict and existing_jobs_dict[job_key]:
# If the job already exists then we do not need to do anything.
# TODO(b/169069379): Enhance to pop based on oldest/newest
existing_jobs_dict[job_key].pop()
continue
job = CreateJobInstance(scheduler_api, yaml_job)
responses.append(jobs_client.Create(region_ref, job))
# TODO(b/169069379): Preserve next job execution for jobs whose only change
# is description
# Delete the jobs which are no longer in the YAML file
for jobs_list in existing_jobs_dict.values():
for yaml_job in jobs_list:
jobs_client.Delete(yaml_job.name)
return responses

View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for `gcloud tasks cmek-config`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class RequiredFieldsMissingError(exceptions.Error):
"""Error for when calling a method when a required field is unspecified."""
class CmekConfig(object):
"""Client for cmekConfig resource in the Cloud Tasks API."""
def __init__(self, messages, cmek_config_service):
self.messages = messages
self.cmek_config_service = cmek_config_service
def UpdateCmekConfig(
self, project_id, location_id, full_kms_key_name, clear=False
):
"""Prepares and sends a UpdateCmekConfig request for the given CmekConfig."""
# When clearing, the location and project must be set.
if clear and (location_id is None or project_id is None):
raise RequiredFieldsMissingError(
'The location or project are undefined.'
' Please set these flags properly.'
)
# When updating, flags combination must be set properly.
# Either a full KMS key name is provided, or all of the flags are provided.
elif (not clear) and (
full_kms_key_name is None or location_id is None or project_id is None
):
raise RequiredFieldsMissingError(
'One or more of the --kms-key-name, --kms-keyring, --location, or'
' --project are invalid. Please set these flags properly or make sure'
' the full KMS key name is valid. (args: kms_key={}, location={},'
' project={})'.format(full_kms_key_name, location_id, project_id)
)
cmek_config_name = (
'projects/{project_id}/locations/{location_id}/cmekConfig'.format(
project_id=project_id, location_id=location_id
)
)
cmek_config = self.messages.CmekConfig(
name=cmek_config_name,
kmsKey=full_kms_key_name,
)
request = self.messages.CloudtasksProjectsLocationsUpdateCmekConfigRequest(
cmekConfig=cmek_config,
name=cmek_config_name
)
return self.cmek_config_service.UpdateCmekConfig(request)
def GetCmekConfig(self, project_id, location_id):
"""Prepares and sends a GetCmekConfig request for the given CmekConfig."""
if project_id is None:
raise RequiredFieldsMissingError(
'Project ({project_id}) is invalid. Must specify --project'
' properly.'.format(project_id=project_id)
)
if location_id is None:
raise RequiredFieldsMissingError(
'Location path ({location_id}) is invalid. Must specify --location'
' properly.'.format(location_id=location_id)
)
cmek_config_name = (
'projects/{project_id}/locations/{location_id}/cmekConfig'.format(
project_id=project_id, location_id=location_id
)
)
request = self.messages.CloudtasksProjectsLocationsGetCmekConfigRequest(
name=cmek_config_name,
)
return self.cmek_config_service.GetCmekConfig(request)

View File

@@ -0,0 +1,41 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for `gcloud tasks locations`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
class Locations(object):
"""Client for locations service in the Cloud Tasks API."""
def __init__(self, messages, locations_service):
self.messages = messages
self.locations_service = locations_service
def Get(self, location_ref):
request = self.messages.CloudtasksProjectsLocationsGetRequest(
name=location_ref.RelativeName())
return self.locations_service.Get(request)
def List(self, project_ref, limit=None, page_size=100):
request = self.messages.CloudtasksProjectsLocationsListRequest(
name=project_ref.RelativeName())
return list_pager.YieldFromList(
self.locations_service, request, batch_size=page_size, limit=limit,
field='locations', batch_size_attribute='pageSize')

View File

@@ -0,0 +1,625 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for gcloud tasks queues."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from apitools.base.py import list_pager
from googlecloudsdk.core import exceptions
import six
http_target_update_masks_list = [
'httpTarget.headerOverrides',
'httpTarget.httpMethod',
'httpTarget.oauthToken.serviceAccountEmail',
'httpTarget.oauthToken.scope',
'httpTarget.oidcToken.serviceAccountEmail',
'httpTarget.oidcToken.audience',
'httpTarget.uriOverride',
]
class CreatingPullAndAppEngineQueueError(exceptions.InternalError):
"""Error for when attempt to create a queue as both pull and App Engine."""
class CreatingHttpAndAppEngineQueueError(exceptions.InternalError):
"""Error for when attempt to create a queue with both http and App Engine targets."""
class NoFieldsSpecifiedError(exceptions.Error):
"""Error for when calling a patch method with no fields specified."""
class RequiredFieldsMissingError(exceptions.Error):
"""Error for when calling a patch method when a required field is unspecified."""
class BaseQueues(object):
"""Client for queues service in the Cloud Tasks API."""
def __init__(self, messages, queues_service):
self.messages = messages
self.queues_service = queues_service
def Get(self, queue_ref):
request = self.messages.CloudtasksProjectsLocationsQueuesGetRequest(
name=queue_ref.RelativeName()
)
return self.queues_service.Get(request)
def List(self, parent_ref, limit=None, page_size=100):
request = self.messages.CloudtasksProjectsLocationsQueuesListRequest(
parent=parent_ref.RelativeName()
)
return list_pager.YieldFromList(
self.queues_service,
request,
batch_size=page_size,
limit=limit,
field='queues',
batch_size_attribute='pageSize',
)
def Delete(self, queue_ref):
request = self.messages.CloudtasksProjectsLocationsQueuesDeleteRequest(
name=queue_ref.RelativeName()
)
return self.queues_service.Delete(request)
def Purge(self, queue_ref):
request = self.messages.CloudtasksProjectsLocationsQueuesPurgeRequest(
name=queue_ref.RelativeName()
)
return self.queues_service.Purge(request)
def Pause(self, queue_ref):
request = self.messages.CloudtasksProjectsLocationsQueuesPauseRequest(
name=queue_ref.RelativeName()
)
return self.queues_service.Pause(request)
def Resume(self, queue_ref):
request = self.messages.CloudtasksProjectsLocationsQueuesResumeRequest(
name=queue_ref.RelativeName()
)
return self.queues_service.Resume(request)
def GetIamPolicy(self, queue_ref):
request = (
self.messages.CloudtasksProjectsLocationsQueuesGetIamPolicyRequest(
resource=queue_ref.RelativeName()
)
)
return self.queues_service.GetIamPolicy(request)
def SetIamPolicy(self, queue_ref, policy):
request = (
self.messages.CloudtasksProjectsLocationsQueuesSetIamPolicyRequest(
resource=queue_ref.RelativeName(),
setIamPolicyRequest=self.messages.SetIamPolicyRequest(
policy=policy
),
)
)
return self.queues_service.SetIamPolicy(request)
class Queues(BaseQueues):
"""Client for queues service in the Cloud Tasks API."""
def Create(
self,
parent_ref,
queue_ref,
retry_config=None,
rate_limits=None,
app_engine_routing_override=None,
stackdriver_logging_config=None,
http_target=None,
):
"""Prepares and sends a Create request for creating a queue."""
targets = (app_engine_routing_override, http_target)
if sum([1 if x is not None else 0 for x in targets]) > 1:
raise CreatingHttpAndAppEngineQueueError(
'Attempting to send multiple queue target types simultaneously: {}'
' , {}'.format(
six.text_type(app_engine_routing_override),
six.text_type(http_target),
)
)
queue = self.messages.Queue(
name=queue_ref.RelativeName(),
retryConfig=retry_config,
rateLimits=rate_limits,
appEngineRoutingOverride=app_engine_routing_override,
stackdriverLoggingConfig=stackdriver_logging_config,
httpTarget=http_target
)
request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(
parent=parent_ref.RelativeName(), queue=queue
)
return self.queues_service.Create(request)
def Patch(
self,
queue_ref,
updated_fields,
retry_config=None,
rate_limits=None,
app_engine_routing_override=None,
stackdriver_logging_config=None,
http_uri_override=None,
http_method_override=None,
http_header_override=None,
http_oauth_email_override=None,
http_oauth_scope_override=None,
http_oidc_email_override=None,
http_oidc_audience_override=None,
):
"""Prepares and sends a Patch request for modifying a queue."""
if not any([retry_config, rate_limits, stackdriver_logging_config]):
# If appEngineRoutingOverride is in updated_fields then an empty
# app_engine_routing_override will remove the routing override field.
if (
not app_engine_routing_override
and 'appEngineRoutingOverride' not in updated_fields
) and _NeitherUpdateNorClear(
[
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
],
http_target_update_masks_list,
updated_fields,
):
raise NoFieldsSpecifiedError(
'Must specify at least one field to update.'
)
queue = self.messages.Queue(name=queue_ref.RelativeName())
if retry_config is not None:
queue.retryConfig = retry_config
if rate_limits is not None:
queue.rateLimits = rate_limits
if app_engine_routing_override is not None:
if _IsEmptyConfig(app_engine_routing_override):
queue.appEngineRoutingOverride = self.messages.AppEngineRouting()
else:
queue.appEngineRoutingOverride = app_engine_routing_override
if stackdriver_logging_config is not None:
queue.stackdriverLoggingConfig = stackdriver_logging_config
# modifies the queue
_GenerateHttpTargetUpdateMask(
self.messages,
queue,
updated_fields,
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
)
update_mask = ','.join(updated_fields)
request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(
name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask
)
return self.queues_service.Patch(request)
class BetaQueues(BaseQueues):
"""Client for queues service in the Cloud Tasks API."""
def Create(
self,
parent_ref,
queue_ref,
retry_config=None,
rate_limits=None,
app_engine_http_queue=None,
stackdriver_logging_config=None,
queue_type=None,
http_target=None,
):
"""Prepares and sends a Create request for creating a queue."""
# There are different cases: if both app_engine and HTTP targets are
# provided, then throw an error. If HTTP target is provided, then use it,
# otherwise use app_engine by default.
is_app_engine_target_set = (
app_engine_http_queue is not None
and app_engine_http_queue.appEngineRoutingOverride is not None
)
is_http_target_set = http_target is not None
if is_app_engine_target_set and is_http_target_set:
raise CreatingHttpAndAppEngineQueueError(
'Attempting to send multiple queue target types simultaneously: {}'
' , {}'.format(
six.text_type(app_engine_http_queue), six.text_type(http_target)
)
)
if is_http_target_set:
queue = self.messages.Queue(
name=queue_ref.RelativeName(),
retryConfig=retry_config,
rateLimits=rate_limits,
stackdriverLoggingConfig=stackdriver_logging_config,
type=queue_type,
httpTarget=http_target,
)
else:
queue = self.messages.Queue(
name=queue_ref.RelativeName(),
retryConfig=retry_config,
rateLimits=rate_limits,
appEngineHttpQueue=app_engine_http_queue,
stackdriverLoggingConfig=stackdriver_logging_config,
type=queue_type,
)
request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(
parent=parent_ref.RelativeName(), queue=queue
)
return self.queues_service.Create(request)
def Patch(
self,
queue_ref,
updated_fields,
retry_config=None,
rate_limits=None,
app_engine_routing_override=None,
task_ttl=None,
task_tombstone_ttl=None,
stackdriver_logging_config=None,
queue_type=None,
http_uri_override=None,
http_method_override=None,
http_header_override=None,
http_oauth_email_override=None,
http_oauth_scope_override=None,
http_oidc_email_override=None,
http_oidc_audience_override=None,
):
"""Prepares and sends a Patch request for modifying a queue."""
# The following block is necessary to modify pull queue attributes without
# explicitly setting type to 'pull' during CLI invocation.
if queue_type and queue_type != queue_type.PULL:
queue_type = None
if not any([
retry_config,
rate_limits, # No effect here as it is not user-configurable
task_ttl, # No effect here as it is not user-configurable
task_tombstone_ttl,
stackdriver_logging_config,
]):
# IF no app_engine_routing_override (for updating the value) AND
# IF no appEngineRoutingOverride in the update fields (to clear the value)
# AND IF none of the http target override parts are given (to update their
# values) AND IF none of the http target override update masks are in the
# update fields (to clear their values) THEN throw error.
if _NeitherUpdateNorClear(
[app_engine_routing_override],
['appEngineRoutingOverride'],
updated_fields,
) and _NeitherUpdateNorClear(
[
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
],
http_target_update_masks_list,
updated_fields,
):
raise NoFieldsSpecifiedError(
'Must specify at least one field to update.'
)
queue = self.messages.Queue(name=queue_ref.RelativeName(), type=queue_type)
if retry_config is not None:
queue.retryConfig = retry_config
if rate_limits is not None:
queue.rateLimits = rate_limits
if task_ttl is not None:
queue.taskTtl = task_ttl
if task_tombstone_ttl is not None:
queue.tombstoneTtl = task_tombstone_ttl
if stackdriver_logging_config is not None:
queue.stackdriverLoggingConfig = stackdriver_logging_config
if app_engine_routing_override is not None:
if _IsEmptyConfig(app_engine_routing_override):
queue.appEngineHttpQueue = self.messages.AppEngineHttpQueue()
else:
queue.appEngineHttpQueue = self.messages.AppEngineHttpQueue(
appEngineRoutingOverride=app_engine_routing_override
)
# modifies the queue
_GenerateHttpTargetUpdateMask(
self.messages,
queue,
updated_fields,
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
)
update_mask = ','.join(updated_fields)
request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(
name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask
)
return self.queues_service.Patch(request)
class AlphaQueues(BaseQueues):
"""Client for queues service in the Cloud Tasks API."""
def Create(
self,
parent_ref,
queue_ref,
retry_config=None,
rate_limits=None,
pull_target=None,
app_engine_http_target=None,
http_target=None,
):
"""Prepares and sends a Create request for creating a queue."""
targets = (app_engine_http_target, http_target)
if sum([1 if x is not None else 0 for x in targets]) > 1:
raise CreatingHttpAndAppEngineQueueError(
'Attempting to send multiple queue target types simultaneously: {}'
' , {}'.format(
six.text_type(app_engine_http_target), six.text_type(http_target)
)
)
targets = (pull_target, app_engine_http_target, http_target)
if sum([1 if x is not None else 0 for x in targets]) > 1:
raise CreatingPullAndAppEngineQueueError(
'Attempting to send multiple queue target types simultaneously'
)
queue = self.messages.Queue(
name=queue_ref.RelativeName(),
retryConfig=retry_config,
rateLimits=rate_limits,
pullTarget=pull_target,
appEngineHttpTarget=app_engine_http_target,
httpTarget=http_target,
)
request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(
parent=parent_ref.RelativeName(), queue=queue
)
return self.queues_service.Create(request)
def Patch(
self,
queue_ref,
updated_fields,
retry_config=None,
rate_limits=None,
app_engine_routing_override=None,
http_uri_override=None,
http_method_override=None,
http_header_override=None,
http_oauth_email_override=None,
http_oauth_scope_override=None,
http_oidc_email_override=None,
http_oidc_audience_override=None,
):
"""Prepares and sends a Patch request for modifying a queue."""
if not any([retry_config, rate_limits]):
# IF no app_engine_routing_override (for updating the value) AND
# IF no appEngineRoutingOverride in the update fields (to clear the value)
# AND IF none of the http target override parts are given (to update their
# values) AND IF none of the http target override update masks are in the
# update fields (to clear their values) THEN throw error.
if _NeitherUpdateNorClear(
[app_engine_routing_override],
['appEngineRoutingOverride'],
updated_fields,
) and _NeitherUpdateNorClear(
[
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
],
http_target_update_masks_list,
updated_fields,
):
raise NoFieldsSpecifiedError(
'Must specify at least one field to update.'
)
queue = self.messages.Queue(name=queue_ref.RelativeName())
if retry_config is not None:
queue.retryConfig = retry_config
if rate_limits is not None:
queue.rateLimits = rate_limits
if app_engine_routing_override is not None:
if _IsEmptyConfig(app_engine_routing_override):
queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()
else:
queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(
appEngineRoutingOverride=app_engine_routing_override
)
# modifies the queue
_GenerateHttpTargetUpdateMask(
self.messages,
queue,
updated_fields,
http_uri_override,
http_method_override,
http_header_override,
http_oauth_email_override,
http_oauth_scope_override,
http_oidc_email_override,
http_oidc_audience_override,
)
update_mask = ','.join(updated_fields)
request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(
name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask
)
return self.queues_service.Patch(request)
def _GenerateHttpTargetUpdateMask(
messages,
queue,
updated_fields,
http_uri_override=None,
http_method_override=None,
http_header_override=None,
http_oauth_email_override=None,
http_oauth_scope_override=None,
http_oidc_email_override=None,
http_oidc_audience_override=None,
):
"""A helper function to generate update mask given the override config."""
if _HttpTargetNeedsUpdate(updated_fields):
http_target = messages.HttpTarget()
if 'httpTarget.uriOverride' in updated_fields:
http_target.uriOverride = http_uri_override
if 'httpTarget.httpMethod' in updated_fields:
http_target.httpMethod = http_method_override
if 'httpTarget.headerOverrides' in updated_fields:
if http_header_override is None:
http_target.headerOverrides = []
else:
headers_list = []
for ho in http_header_override:
header_override = messages.HeaderOverride(
header=messages.Header(key=ho.header.key, value=ho.header.value)
)
headers_list.append(header_override)
http_target.headerOverrides = headers_list
if (
'httpTarget.oauthToken.serviceAccountEmail' in updated_fields
or 'httpTarget.oauthToken.scope' in updated_fields
):
# service account email is required
if 'httpTarget.oauthToken.serviceAccountEmail' not in updated_fields or (
http_oauth_email_override is None
and http_oauth_scope_override is not None
):
# We raise exception here because CT backend generates an error:
# generic::invalid_argument:
# service_account_email must be set. [google.rpc.error_details_ext]
# { message: \"service_account_email must be set.\" }
raise RequiredFieldsMissingError(
'Oauth service account email'
' (http-oauth-service-account-email-override) must be set.'
)
elif (
http_oauth_email_override is None
and http_oauth_scope_override is None
):
http_target.oauthToken = None
else:
http_target.oauthToken = messages.OAuthToken(
serviceAccountEmail=http_oauth_email_override,
scope=http_oauth_scope_override,
)
if (
'httpTarget.oidcToken.serviceAccountEmail' in updated_fields
or 'httpTarget.oidcToken.audience' in updated_fields
):
# service account email is required
if 'httpTarget.oidcToken.serviceAccountEmail' not in updated_fields or (
http_oidc_email_override is None
and http_oidc_audience_override is not None
):
raise RequiredFieldsMissingError(
'Oidc service account email'
' (http-oidc-service-account-email-override) must be set.'
)
if (
http_oidc_email_override is None
and http_oidc_audience_override is None
):
http_target.oidcToken = None
else:
http_target.oidcToken = messages.OidcToken(
serviceAccountEmail=http_oidc_email_override,
audience=http_oidc_audience_override,
)
queue.httpTarget = None if _IsEmptyConfig(http_target) else http_target
def _HttpTargetNeedsUpdate(updated_fields):
for mask in http_target_update_masks_list:
if mask in updated_fields:
return True
return False
def _NeitherUpdateNorClear(update_values, available_masks, update_fields):
return all(item is None for item in update_values) and not any(
item in available_masks for item in update_fields
)
def _IsEmptyConfig(config):
if config is None:
return True
config_dict = encoding.MessageToDict(config)
return not any(config_dict.values())

View File

@@ -0,0 +1,152 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to convert attribute formats between Task Queue and Cloud Tasks.
Functions defined here are used to migrate away from soon to be deprecated
admin-console-hr superapp. Instead we will be using Cloud Tasks APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import re
from googlecloudsdk.command_lib.tasks import constants
import six
def ConvertStringToCamelCase(string):
"""Takes a 'snake_case' string and converts it to 'camelCase'.
Args:
string: The string we want to convert.
Returns:
The converted string. Some examples are below:
min_backoff => minBackoff
max_retry_duration => maxRetryDuration
"""
if not hasattr(ConvertStringToCamelCase, 'processed_strings'):
ConvertStringToCamelCase.processed_strings = {}
if string in ConvertStringToCamelCase.processed_strings:
return ConvertStringToCamelCase.processed_strings[string]
attributes = string.split('_')
for index, attribute in enumerate(attributes):
if index == 0:
continue
attributes[index] = attribute.capitalize()
camel_case_string = ''.join(attributes)
ConvertStringToCamelCase.processed_strings[string] = camel_case_string
return camel_case_string
def ConvertRate(value):
"""Converts the time based rate into its integer value in seconds.
This function converts the input float values into its seconds equivalent.
For example,
'100/s' => 100.0
'60/m' => 1.0
Args:
value: The string value we want to convert.
Returns:
A float value representing the rate on a per second basis
"""
float_value, unit = float(value[:-2]), value[-1]
return round(float_value / constants.TIME_IN_SECONDS[unit], 9)
def CheckAndConvertStringToFloatIfApplicable(string):
"""Converts the input into a float if possible.
This function converts the input float values into its seconds equivalent if
the string has any relevant time units. For example,
'2m' => 120.0
'1h' => 3600.0
'8s' => 8.0
'apples' => 'apples'
Args:
string: The string we want to convert.
Returns:
The input itself if it is not possible to convert it to a float value,
the converted float value otherwise.
"""
if not isinstance(string, six.string_types):
return string
if re.match(r'^(\d+(\.\d+)?|\.\d+)[smhd]$', string):
return float(string[:-1]) * constants.TIME_IN_SECONDS[string[-1]]
try:
return_value = float(string)
except ValueError:
return_value = string
return return_value
def ConvertBackoffSeconds(value):
"""Converts min/max backoff values to the format CT expects.
Args:
value: A float value representing time in seconds.
Returns:
The string representing the time with 's' appended at the end.
"""
if value is None:
return None
return '{}s'.format(round(value, 8))
def ConvertTarget(value):
"""Converts target to that format that Cloud Tasks APIs expect.
Args:
value: A string representing the service or version_dot_service.
Returns:
An ordered dict with parsed values for service and target if it exists.
Raises:
ValueError: If the input provided for target is not in the format expected.
"""
targets = value.split('.')
if len(targets) == 1:
return collections.OrderedDict({'service': targets[0]})
elif len(targets) == 2:
return collections.OrderedDict(
{'service': targets[1], 'version': targets[0]})
raise ValueError('Unsupported value received for target {}'.format(value))
def ConvertTaskAgeLimit(value):
"""Converts task age limit values to the format CT expects.
Args:
value: A string value representing the task age limit. For example, '2.5m',
'1h', '8s', etc.
Returns:
The string representing the time to the nearest second with 's' appended
at the end.
"""
time_in_seconds = float(value[:-1]) * constants.TIME_IN_SECONDS[value[-1]]
return '{}s'.format(int(time_in_seconds))

View File

@@ -0,0 +1,192 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for gcloud tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.core import exceptions
class ModifyingPullAndAppEngineTaskError(exceptions.InternalError):
"""Error for when attempt to create a queue as both pull and App Engine."""
class BaseTasks(object):
"""API client for Cloud Tasks tasks."""
def __init__(self, messages, tasks_service):
self.messages = messages
self.tasks_service = tasks_service
def List(self, parent_ref, limit=None, page_size=100):
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksListRequest(
parent=parent_ref.RelativeName()))
return list_pager.YieldFromList(
self.tasks_service, request, batch_size=page_size, limit=limit,
field='tasks', batch_size_attribute='pageSize')
def Get(self, task_ref, response_view=None):
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksGetRequest(
name=task_ref.RelativeName(),
responseView=response_view))
return self.tasks_service.Get(request)
def Delete(self, task_ref):
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksDeleteRequest(
name=task_ref.RelativeName()))
return self.tasks_service.Delete(request)
def Run(self, task_ref):
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksRunRequest(
name=task_ref.RelativeName()))
return self.tasks_service.Run(request)
class Tasks(BaseTasks):
"""API client for Cloud Tasks tasks."""
def Create(self, parent_ref, task_ref=None, schedule_time=None,
app_engine_http_request=None, http_request=None):
"""Prepares and sends a Create request for creating a task."""
name = task_ref.RelativeName() if task_ref else None
task = self.messages.Task(
name=name, scheduleTime=schedule_time,
appEngineHttpRequest=app_engine_http_request)
if http_request:
task.httpRequest = http_request
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksCreateRequest(
createTaskRequest=self.messages.CreateTaskRequest(task=task),
parent=parent_ref.RelativeName()))
return self.tasks_service.Create(request)
def Buffer(self, parent_ref, task_id=''):
"""Prepares and sends a Create request for buffering a task."""
request = self.messages.CloudtasksProjectsLocationsQueuesTasksBufferRequest(
queue=parent_ref.RelativeName(), taskId=task_id
)
return self.tasks_service.Buffer(request)
class AlphaTasks(BaseTasks):
"""API client for Cloud Tasks tasks."""
def Buffer(self, parent_ref, task_id=''):
"""Prepares and sends a Create request for buffering a task."""
request = self.messages.CloudtasksProjectsLocationsQueuesTasksBufferRequest(
queue=parent_ref.RelativeName(), taskId=task_id)
return self.tasks_service.Buffer(request)
def Create(self, parent_ref, task_ref=None, schedule_time=None,
pull_message=None, app_engine_http_request=None):
"""Prepares and sends a Create request for creating a task."""
if pull_message and app_engine_http_request:
raise ModifyingPullAndAppEngineTaskError(
'Attempting to send PullMessage and AppEngineHttpRequest '
'simultaneously')
name = task_ref.RelativeName() if task_ref else None
task = self.messages.Task(
name=name, scheduleTime=schedule_time, pullMessage=pull_message,
appEngineHttpRequest=app_engine_http_request)
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksCreateRequest(
createTaskRequest=self.messages.CreateTaskRequest(task=task),
parent=parent_ref.RelativeName()))
return self.tasks_service.Create(request)
def RenewLease(self, task_ref, schedule_time, lease_duration):
"""Constructs and sends a tasks RenewLease request to the Cloud Tasks API.
Args:
task_ref: A cloudtasks.projects.locations.queues.tasks resource reference
schedule_time: string formatted as an ISO 8601 datetime with timezone
lease_duration: string of an integer followed by 's', (e.g. '10s') for
the number of seconds for the new lease
Returns:
The response of the tasks RenewLease request
"""
renew_lease_request = self.messages.RenewLeaseRequest(
scheduleTime=schedule_time, leaseDuration=lease_duration)
request_cls = (self.messages.
CloudtasksProjectsLocationsQueuesTasksRenewLeaseRequest)
request = request_cls(renewLeaseRequest=renew_lease_request,
name=task_ref.RelativeName())
return self.tasks_service.RenewLease(request)
def CancelLease(self, task_ref, schedule_time):
"""Constructs and sends a tasks CancelLease request to the Cloud Tasks API.
Args:
task_ref: A cloudtasks.projects.locations.queues.tasks resource reference
schedule_time: string formatted as an ISO 8601 datetime with timezone
Returns:
The response of the tasks CancelLease request
"""
cancel_lease_request = self.messages.CancelLeaseRequest(
scheduleTime=schedule_time)
request_cls = (self.messages.
CloudtasksProjectsLocationsQueuesTasksCancelLeaseRequest)
request = request_cls(cancelLeaseRequest=cancel_lease_request,
name=task_ref.RelativeName())
return self.tasks_service.CancelLease(request)
def Lease(self, queue_ref, lease_duration, filter_string=None,
max_tasks=None):
"""Constructs and sends a LeaseTasks request to the Cloud Tasks API.
Args:
queue_ref: A cloudtasks.projects.locations.queues resource reference
lease_duration: string of an integer followed by 's', (e.g. '10s') for the
number of seconds for the new leases
filter_string: string with an expression to filter which tasks are leased
max_tasks: the maximum number of tasks to lease
Returns:
The response of the LeaseTasks request
"""
lease_tasks_request = self.messages.LeaseTasksRequest(
filter=filter_string, leaseDuration=lease_duration, maxTasks=max_tasks)
request = (
self.messages.CloudtasksProjectsLocationsQueuesTasksLeaseRequest(
leaseTasksRequest=lease_tasks_request,
parent=queue_ref.RelativeName()))
return self.tasks_service.Lease(request)
def Acknowledge(self, task_ref, schedule_time):
"""Constructs and sends a tasks Acknowledge request to the Cloud Tasks API.
Args:
task_ref: A cloudtasks.projects.locations.queues.tasks resource reference
schedule_time: string formatted as an ISO 8601 datetime with timezone
Returns:
The response of the tasks Acknowledge request
"""
acknowledge_task_request = self.messages.AcknowledgeTaskRequest(
scheduleTime=schedule_time)
request_cls = (self.messages.
CloudtasksProjectsLocationsQueuesTasksAcknowledgeRequest)
request = request_cls(acknowledgeTaskRequest=acknowledge_task_request,
name=task_ref.RelativeName())
return self.tasks_service.Acknowledge(request)