feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package marker file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

View File

@@ -0,0 +1,72 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for deleting resources."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.composer import operations_util as operations_api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.core import log
import six
class EnvironmentDeletionWaiter(object):
"""Class for waiting for synchronous deletion of one or more Environments."""
def __init__(self, release_track=base.ReleaseTrack.GA):
self.pending_deletes = []
self.release_track = release_track
def AddPendingDelete(self, environment_name, operation):
"""Adds an environment whose deletion to track.
Args:
environment_name: str, the relative resource name of the environment
being deleted
operation: Operation, the longrunning operation object returned by the
API when the deletion was initiated
"""
self.pending_deletes.append(
_PendingEnvironmentDelete(environment_name, operation))
def Wait(self):
"""Polls pending deletions and returns when they are complete."""
encountered_errors = False
for pending_delete in self.pending_deletes:
try:
operations_api_util.WaitForOperation(
pending_delete.operation,
'Waiting for [{}] to be deleted'.format(
pending_delete.environment_name),
release_track=self.release_track)
except command_util.OperationError as e:
encountered_errors = True
log.DeletedResource(
pending_delete.environment_name,
kind='environment',
is_async=False,
failed=six.text_type(e))
return encountered_errors
class _PendingEnvironmentDelete(object):
"""Data class holding information about a pending environment deletion."""
def __init__(self, environment_name, operation):
self.environment_name = environment_name
self.operation = operation

View File

@@ -0,0 +1,482 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for Image Version validation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import re
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.api_lib.composer import image_versions_util as image_version_api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import flags
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.core.util import semver
# Envs must be running at least this version of Composer to be upgradeable.
MIN_UPGRADEABLE_COMPOSER_VER = '1.0.0'
# Required for version comparisons
COMPOSER_LATEST_VERSION_PLACEHOLDER = '2.1.12'
UpgradeValidator = collections.namedtuple('UpgradeValidator',
['upgrade_valid', 'error'])
# Major version that is used as a replacement for the 'latest' alias.
COMPOSER_LATEST_MAJOR_VERSION = 2
class InvalidImageVersionError(command_util.Error):
"""Class for errors raised when an invalid image version is encountered."""
class _ImageVersionItem(object):
"""Class used to dissect and analyze image version components and strings."""
def __init__(self, image_ver=None, composer_ver=None, airflow_ver=None):
image_version_regex = r'^composer-(\d+(?:(?:\.\d+\.\d+(?:-[a-z]+\.\d+)?)?)?|latest)-airflow-(\d+(?:\.\d+(?:\.\d+)?)?)'
composer_version_alias_regex = r'^(\d+|latest)$'
airflow_version_alias_regex = r'^(\d+|\d+\.\d+)$'
if image_ver is not None:
iv_parts = re.findall(image_version_regex, image_ver)[0]
self.composer_ver = iv_parts[0]
self.airflow_ver = iv_parts[1]
if composer_ver is not None:
self.composer_ver = composer_ver
if airflow_ver is not None:
self.airflow_ver = airflow_ver
# Determines the state of aliases
self.composer_contains_alias = re.match(composer_version_alias_regex,
self.composer_ver)
self.airflow_contains_alias = re.match(airflow_version_alias_regex,
self.airflow_ver)
def GetImageVersionString(self):
return 'composer-{}-airflow-{}'.format(self.composer_ver, self.airflow_ver)
def ListImageVersionUpgrades(env_ref, release_track=base.ReleaseTrack.GA):
"""List of available image version upgrades for provided env_ref."""
env_details = environments_api_util.Get(env_ref, release_track)
proj_location_ref = env_ref.Parent()
cur_image_version_id = env_details.config.softwareConfig.imageVersion
cur_python_version = env_details.config.softwareConfig.pythonVersion
return _BuildUpgradeCandidateList(proj_location_ref, cur_image_version_id,
cur_python_version, release_track)
def IsValidImageVersionUpgrade(cur_image_version_str,
image_version_id):
"""Checks if image version candidate is a valid upgrade for environment."""
# Checks for the use of an alias and confirms that a valid airflow upgrade has
# been requested.
cur_image_ver = _ImageVersionItem(
image_ver=cur_image_version_str)
is_composer3 = IsVersionComposer3Compatible(
cur_image_version_str
)
if not is_composer3 and not (
CompareVersions(MIN_UPGRADEABLE_COMPOSER_VER, cur_image_ver.composer_ver)
<= 0
):
raise InvalidImageVersionError(
'This environment does not support upgrades.')
return _ValidateCandidateImageVersionId(
cur_image_version_str,
image_version_id)
def ImageVersionFromAirflowVersion(new_airflow_version, cur_image_version=None):
"""Converts airflow-version string into a image-version string."""
is_composer3 = cur_image_version and IsVersionComposer3Compatible(
cur_image_version
)
composer_ver = (
_ImageVersionItem(cur_image_version).composer_ver
if is_composer3
else 'latest'
)
return _ImageVersionItem(
composer_ver=composer_ver,
airflow_ver=new_airflow_version).GetImageVersionString()
def IsImageVersionStringComposerV1(image_version):
"""Checks if string composer-X.Y.Z-airflow-A.B.C is Composer v1 version."""
return image_version is not None and (
image_version.startswith('composer-1.')
or image_version.startswith('composer-1-')
)
def IsImageVersionStringComposerV2(image_version):
"""Checks if string composer-X.Y.Z-airflow-A.B.C is Composer v2 version."""
return image_version is not None and (
image_version.startswith('composer-2.')
or image_version.startswith('composer-2-')
)
def IsImageVersionStringComposerV3(image_version):
"""Checks if string composer-X.Y.Z-airflow-A.B.C is Composer v3 version."""
return image_version is not None and (
image_version.startswith('composer-3.')
or image_version.startswith('composer-3-')
)
def IsDefaultImageVersion(image_version):
return image_version is None or image_version.startswith('composer-latest')
def BuildDefaultComposerVersionWarning(image_version, airflow_version):
"""Builds warning message about using default Composer version."""
message = (
'{} resolves to Cloud Composer current default version, which is'
' presently Composer 2 and is subject to'
' further changes in the future. Consider using'
' --image-version=composer-A-airflow-X[.Y[.Z]]. More info at'
' https://cloud.google.com/composer/docs/concepts/versioning/composer-versioning-overview#version-aliases'
)
if airflow_version:
return message.format('Using --airflow-version=X[.Y[.Z]]')
if image_version:
return message.format(
'Using --image-version=composer-latest-airflow-X[.Y[.Z]]'
)
return message.format('Not defining --image-version')
def _CompareVersions(v1, v2):
"""Compares versions."""
if v1 == v2:
return 0
elif v1 > v2:
return 1
else:
return -1
def CompareLooseVersions(v1, v2):
"""Compares loose version strings.
Args:
v1: first loose version string
v2: second loose version string
Returns:
Value == 1 when v1 is greater; Value == -1 when v2 is greater; otherwise 0.
"""
v1, v2 = _VersionStrToLooseVersion(v1), _VersionStrToLooseVersion(v2)
return _CompareVersions(v1, v2)
def CompareVersions(v1, v2):
"""Compares semantic version strings.
Args:
v1: first semantic version string
v2: second semantic version string
Returns:
Value == 1 when v1 is greater; Value == -1 when v2 is greater; otherwise 0.
"""
v1, v2 = _VersionStrToSemanticVersion(v1), _VersionStrToSemanticVersion(v2)
return _CompareVersions(v1, v2)
def IsVersionComposer3Compatible(image_version):
"""Checks if given `image_version` is compatible with Composer 3.
Args:
image_version: image version str that includes Composer version.
Returns:
True if Composer version is greater than or equal to 3.0.0 or its prerelease
variant, otherwise False.
"""
if image_version:
version_item = _ImageVersionItem(image_version)
if version_item and version_item.composer_ver:
composer_version = version_item.composer_ver
if composer_version == '3':
return True
if composer_version == 'latest':
composer_version = COMPOSER_LATEST_VERSION_PLACEHOLDER
return IsVersionInRange(
composer_version, flags.MIN_COMPOSER3_VERSION, None,
True)
return False
def IsVersionAirflowCommandsApiCompatible(image_version):
"""Checks if given `version` is compatible with Composer Airflow Commands API.
Args:
image_version: image version str that includes Composer version.
Returns:
True if Composer version is compatible with Aiflow Commands API,
otherwise False.
"""
if image_version:
version_item = _ImageVersionItem(image_version)
if version_item and version_item.composer_ver:
composer_version = version_item.composer_ver
return IsVersionInRange(
composer_version,
flags.MIN_COMPOSER_RUN_AIRFLOW_CLI_VERSION,
None,
True,
)
return False
def IsVersionTriggererCompatible(image_version):
"""Checks if given `version` is compatible with triggerer .
Args:
image_version: image version str that includes airflow version.
Returns:
True if given airflow version is compatible with Triggerer(>=2.2.x)
and Composer version is >=2.0.31 otherwise False
"""
if image_version:
version_item = _ImageVersionItem(image_version)
# Triggerer is supported in Composer 3.
if IsVersionComposer3Compatible(image_version):
return True
if version_item and version_item.airflow_ver and version_item.composer_ver:
airflow_version = version_item.airflow_ver
composer_version = version_item.composer_ver
if composer_version == 'latest':
composer_version = COMPOSER_LATEST_VERSION_PLACEHOLDER
return IsVersionInRange(
composer_version, flags.MIN_TRIGGERER_COMPOSER_VERSION, None, True
) and IsVersionInRange(
airflow_version, flags.MIN_TRIGGERER_AIRFLOW_VERSION, None, True
)
return False
def IsVersionInRange(version, range_from, range_to, loose=False):
"""Checks if given `version` is in range of (`range_from`, `range_to`).
Args:
version: version to check
range_from: left boundary of range (inclusive), if None - no boundary
range_to: right boundary of range (exclusive), if None - no boundary
loose: if true use LooseVersion to compare, use SemVer otherwise
Returns:
True if given version is in range, otherwise False.
"""
compare_fn = CompareLooseVersions if loose else CompareVersions
return ((range_from is None or compare_fn(range_from, version) <= 0) and
(range_to is None or compare_fn(version, range_to) < 0))
def _BuildUpgradeCandidateList(location_ref,
image_version_id,
python_version,
release_track=base.ReleaseTrack.GA):
"""Builds a list of eligible image version upgrades."""
image_version_service = image_version_api_util.ImageVersionService(
release_track)
image_version_item = _ImageVersionItem(image_version_id)
available_upgrades = []
# Checks if current composer version meets minimum threshold.
if (
IsVersionComposer3Compatible(image_version_id)
or CompareVersions(
MIN_UPGRADEABLE_COMPOSER_VER, image_version_item.composer_ver
)
<= 0
):
# If so, builds list of eligible upgrades.
for version in image_version_service.List(location_ref):
if _ValidateCandidateImageVersionId(
image_version_id, version.imageVersionId
).upgrade_valid and (
not python_version
or python_version in version.supportedPythonVersions
):
available_upgrades.append(version)
else:
raise InvalidImageVersionError(
'This environment does not support upgrades.')
return available_upgrades
def _GetComposerMajorVersion(composer_ver_alias):
if composer_ver_alias == 'latest':
return COMPOSER_LATEST_MAJOR_VERSION
return int(composer_ver_alias)
def _IsComposerMajorOnlyVersionUpgradeCompatible(parsed_curr, parsed_cand):
"""Validates whether Composer major only version upgrade is compatible."""
if parsed_curr.composer_contains_alias:
major_version_curr = _GetComposerMajorVersion(
parsed_curr.composer_contains_alias[0]
)
else:
major_version_curr = semver.SemVer(parsed_curr.composer_ver).major
if parsed_cand.composer_contains_alias:
major_version_cand = _GetComposerMajorVersion(
parsed_cand.composer_contains_alias[0]
)
else:
major_version_cand = semver.SemVer(parsed_cand.composer_ver).major
# Allow major version upgrades only between Composer 2 and Composer 3.
return UpgradeValidator(
major_version_curr == major_version_cand
or (major_version_curr == 2 and major_version_cand == 3),
None,
)
def _ValidateCandidateImageVersionId(
current_image_version_id, candidate_image_version_id
):
"""Determines if candidate version is a valid upgrade from current version.
Args:
current_image_version_id: current image version
candidate_image_version_id: image version requested for upgrade
Returns:
UpgradeValidator namedtuple containing True and None error message if
given version upgrade between given versions is valid, otherwise False and
error message with problems description.
"""
upgrade_validator = UpgradeValidator(True, None)
if current_image_version_id == candidate_image_version_id:
error_message = ('Existing and requested image versions are equal ({}). '
'Select image version newer than current to perform '
'upgrade.').format(current_image_version_id)
upgrade_validator = UpgradeValidator(False, error_message)
parsed_curr = _ImageVersionItem(image_ver=current_image_version_id)
parsed_cand = _ImageVersionItem(image_ver=candidate_image_version_id)
has_alias_or_major_only_composer_ver = (
parsed_cand.composer_contains_alias
or parsed_curr.composer_contains_alias
)
# Checks Composer versions.
if has_alias_or_major_only_composer_ver:
upgrade_validator = _IsComposerMajorOnlyVersionUpgradeCompatible(
parsed_curr, parsed_cand
)
elif (
upgrade_validator.upgrade_valid
):
upgrade_validator = _IsVersionUpgradeCompatible(
parsed_curr.composer_ver, parsed_cand.composer_ver, 'Composer'
)
# Checks Airflow versions.
if upgrade_validator.upgrade_valid and not parsed_cand.airflow_contains_alias:
upgrade_validator = _IsVersionUpgradeCompatible(parsed_curr.airflow_ver,
parsed_cand.airflow_ver,
'Airflow')
# Leaves the validity check to the Composer backend request validation.
return upgrade_validator
def _VersionStrToSemanticVersion(version_str):
"""Parses version_str into semantic version."""
return semver.SemVer(version_str)
def _VersionStrToLooseVersion(version_str):
"""Parses version_str into loose version."""
return semver.LooseVersion(version_str)
def _IsVersionUpgradeCompatible(cur_version, candidate_version,
image_version_part):
"""Validates whether version candidate is greater than or equal to current.
Applicable both for Airflow and Composer version upgrades. Composer supports
both Airflow and self MINOR and PATCH-level upgrades.
Args:
cur_version: current 'a.b.c' version
candidate_version: candidate 'x.y.z' version
image_version_part: part of image to be validated. Must be either 'Airflow'
or 'Composer'
Returns:
UpgradeValidator namedtuple containing boolean value whether selected image
version component is valid for upgrade and eventual error message if it is
not.
"""
assert image_version_part in ['Airflow', 'Composer']
curr_semantic_version = _VersionStrToSemanticVersion(cur_version)
cand_semantic_version = _VersionStrToSemanticVersion(candidate_version)
if curr_semantic_version > cand_semantic_version:
error_message = ('Upgrade cannot decrease {composer_or_airflow1}\'s '
'version. Current {composer_or_airflow2} version: '
'{cur_version}, requested {composer_or_airflow3} version: '
'{req_version}.').format(
composer_or_airflow1=image_version_part,
composer_or_airflow2=image_version_part,
cur_version=cur_version,
composer_or_airflow3=image_version_part,
req_version=candidate_version)
return UpgradeValidator(False, error_message)
# Allow only version upgrades only between Composer 2 and Composer 3.
if (
(curr_semantic_version.major != 2 or cand_semantic_version.major != 3)
and curr_semantic_version.major != cand_semantic_version.major
):
error_message = (
"Upgrades between different {}'s major versions are not"
' supported. Current major version {}, requested major '
'version {}.'
).format(
image_version_part,
curr_semantic_version.major,
cand_semantic_version.major,
)
return UpgradeValidator(False, error_message)
return UpgradeValidator(True, None)

View File

@@ -0,0 +1,192 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource parsing helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
def GetLocation(required=True):
"""Returns the value of the composer/location config property.
Config properties can be overridden with command line flags. If the --location
flag was provided, this will return the value provided with the flag.
Args:
required: boolean, if True, the absence of the [composer/location] property
will result in an exception being raised
"""
return properties.VALUES.composer.location.Get(required=required)
def GetProject():
"""Returns the value of the core/project config property.
Config properties can be overridden with command line flags. If the --project
flag was provided, this will return the value provided with the flag.
"""
return properties.VALUES.core.project.Get(required=True)
def ParseEnvironment(environment_name):
"""Parse out an environment resource using configuration properties.
Args:
environment_name: str, the environment's ID, absolute name, or relative name
Returns:
Environment: the parsed environment resource
"""
return resources.REGISTRY.Parse(
environment_name,
params={
'projectsId': GetProject,
'locationsId': GetLocation
},
collection='composer.projects.locations.environments')
def ParseLocation(location_name):
"""Parse out a location resource using configuration properties.
Args:
location_name: str, the location's base name, absolute name, or
relative name
Returns:
Location: the parsed Location resource
"""
return resources.REGISTRY.Parse(
location_name,
params={'projectsId': GetProject},
collection='composer.projects.locations')
def ParseOperation(operation_name):
"""Parse out an operation resource using configuration properties.
Args:
operation_name: str, the operation's UUID, absolute name, or relative name
Returns:
Operation: the parsed Operation resource
"""
return resources.REGISTRY.Parse(
operation_name,
params={
'projectsId': GetProject,
'locationsId': GetLocation
},
collection='composer.projects.locations.operations')
def ParseZone(zone):
"""Parses a zone name using configuration properties for fallback.
Args:
zone: str, the zone's ID, fully-qualified URL, or relative name
Returns:
googlecloudsdk.core.resources.Resource: a resource reference for the zone
"""
return resources.REGISTRY.Parse(
zone,
params={'project': GetProject},
collection='compute.zones')
def ParseMachineType(machine_type, fallback_zone=None):
"""Parses a machine type name using configuration properties for fallback.
Args:
machine_type: str, the machine type's ID, fully-qualified URL, or relative
name
fallback_zone: str, the zone to use if `machine_type` does not contain zone
information. If None, and `machine_type` does not contain zone
information, parsing will fail.
Returns:
googlecloudsdk.core.resources.Resource: a resource reference for the
machine type
"""
params = {'project': GetProject}
if fallback_zone:
params['zone'] = lambda z=fallback_zone: z
return resources.REGISTRY.Parse(
machine_type,
params=params,
collection='compute.machineTypes')
def ParseNetworkAttachment(network_attachment, fallback_region=None):
"""Parses a network attachment name using configuration properties for fallback.
Args:
network_attachment: str, the network attachment's ID, fully-qualified URL,
or relative name
fallback_region: str, the region to use if `networkAttachment` does not
contain region information. If None, and `networkAttachment` does not
contain region information, parsing will fail.
Returns:
googlecloudsdk.core.resources.Resource: a resource reference for the
networkAttachment
"""
params = {'project': GetProject}
if fallback_region:
params['region'] = lambda r=fallback_region: r
return resources.REGISTRY.Parse(
network_attachment, params=params, collection='compute.networkAttachments'
)
def ParseNetwork(network):
"""Parses a network name using configuration properties for fallback.
Args:
network: str, the network's ID, fully-qualified URL, or relative name
Returns:
googlecloudsdk.core.resources.Resource: a resource reference for the network
"""
return resources.REGISTRY.Parse(
network,
params={'project': GetProject},
collection='compute.networks')
def ParseSubnetwork(subnetwork, fallback_region=None):
"""Parses a subnetwork name using configuration properties for fallback.
Args:
subnetwork: str, the subnetwork's ID, fully-qualified URL, or relative name
fallback_region: str, the region to use if `subnetwork` does not contain
region information. If None, and `subnetwork` does not contain region
information, parsing will fail.
Returns:
googlecloudsdk.core.resources.Resource: a resource reference for the
subnetwork
"""
params = {'project': GetProject}
if fallback_region:
params['region'] = lambda r=fallback_region: r
return resources.REGISTRY.Parse(
subnetwork,
params=params,
collection='compute.subnetworks')

View File

@@ -0,0 +1,186 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared resource flags for Cloud Composer commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.calliope.concepts import deps
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import properties
def LocationAttributeConfig(fallthroughs_enabled=True):
fallthroughs = ([
deps.PropertyFallthrough(properties.VALUES.composer.location)
] if fallthroughs_enabled else [])
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='Compute Engine region in which to create the {resource}.',
fallthroughs=fallthroughs)
def EnvironmentLocationAttributeConfig(fallthroughs_enabled=True):
fallthroughs = ([
deps.PropertyFallthrough(properties.VALUES.composer.location)
] if fallthroughs_enabled else [])
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='Region where Composer environment runs or in which to create the environment.',
fallthroughs=fallthroughs)
def OperationLocationAttributeConfig(fallthroughs_enabled=True):
fallthroughs = ([
deps.PropertyFallthrough(properties.VALUES.composer.location)
] if fallthroughs_enabled else [])
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='Compute Engine region in which to create the {resource}.',
fallthroughs=fallthroughs)
def EnvironmentAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='environment',
help_text='Cloud Composer environment for the {resource}.')
def OperationAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='operation',
help_text='Cloud Composer operation for the {resource}.')
def GetLocationResourceSpec(fallthroughs_enabled=True):
return concepts.ResourceSpec(
'composer.projects.locations',
resource_name='location',
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
locationsId=LocationAttributeConfig(
fallthroughs_enabled=fallthroughs_enabled))
def GetEnvironmentResourceSpec():
return concepts.ResourceSpec(
'composer.projects.locations.environments',
resource_name='environment',
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
locationsId=EnvironmentLocationAttributeConfig(),
environmentsId=EnvironmentAttributeConfig())
def GetOperationResourceSpec():
return concepts.ResourceSpec(
'composer.projects.locations.operations',
resource_name='operation',
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
locationsId=OperationLocationAttributeConfig(),
operationsId=OperationAttributeConfig())
def AddLocationResourceArg(parser,
verb,
positional=True,
required=True,
plural=False,
help_supplement=None):
"""Add a resource argument for a Cloud Composer location.
Fallthroughs are disabled if the argument is plural, as this would cause
the fallthrough processor to iterate over each character in the fallthrough
value and parse it as a location ID.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command
verb: str, the verb to describe the resource, for example, 'to update'.
positional: boolean, if True, means that the resource is a positional rather
than a flag.
required: boolean, if True, the arg is required
plural: boolean, if True, expects a list of resources
help_supplement: str, Supplementary help text specific to the command
in which the resource arg is being used..
"""
help_supplement = help_supplement or ''
noun = 'location' + ('s' if plural else '')
name = _BuildArgName(noun, positional)
concept_parsers.ConceptParser.ForResource(
name,
GetLocationResourceSpec(fallthroughs_enabled=not plural),
'The {} {}. {}'.format(noun, verb, help_supplement),
required=required,
plural=plural).AddToParser(parser)
def AddEnvironmentResourceArg(parser,
verb,
positional=True,
required=True,
plural=False):
"""Add a resource argument for a Cloud Composer Environment.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command
verb: str, the verb to describe the resource, for example, 'to update'.
positional: boolean, if True, means that the resource is a positional rather
than a flag.
required: boolean, if True, the arg is required
plural: boolean, if True, expects a list of resources
"""
noun = 'environment' + ('s' if plural else '')
name = _BuildArgName(noun, positional)
concept_parsers.ConceptParser.ForResource(
name,
GetEnvironmentResourceSpec(),
'The {} {}.'.format(noun, verb),
required=required,
plural=plural).AddToParser(parser)
def AddOperationResourceArg(parser,
verb,
positional=True,
required=True,
plural=False):
"""Add a resource argument for a Cloud Composer long-running operation.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command
verb: str, the verb to describe the resource, for example, 'to update'.
positional: boolean, if True, means that the resource is a positional rather
than a flag.
required: boolean, if True, the arg is required
plural: boolean, if True, expects a list of resources
"""
noun = 'operation' + ('s' if plural else '')
name = _BuildArgName(noun, positional)
concept_parsers.ConceptParser.ForResource(
name,
GetOperationResourceSpec(),
'The {} {}.'.format(noun, verb),
required=required,
plural=plural).AddToParser(parser)
def _BuildArgName(name, positional):
return '{}{}'.format('' if positional else '--', name)

View File

@@ -0,0 +1,439 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for Composer environment storage commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os.path
import posixpath
import re
from apitools.base.py import exceptions as apitools_exceptions
from apitools.base.py import transfer
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.command_lib.util import gcloudignore
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
import six
BUCKET_MISSING_MSG = 'Could not retrieve Cloud Storage bucket for environment.'
def WarnIfWildcardIsPresent(path, flag_name):
"""Logs deprecation warning if gsutil wildcards are in args."""
if path and ('*' in path or '?' in path or re.search(r'\[.*\]', path)):
log.warning('Use of gsutil wildcards is no longer supported in {0}. '
'Set the storage/use_gsutil property to get the old behavior '
'back temporarily. However, this property will eventually be '
'removed.'.format(flag_name))
# NOTE: Only support 2 paths instead of *args so the gsutil_path doesn't get
# hidden in **kwargs.
def _JoinPaths(path1, path2, gsutil_path=False):
"""Joins paths using the appropriate separator for local or gsutil."""
if gsutil_path:
return posixpath.join(path1, path2)
else:
return os.path.join(path1, path2)
def List(env_ref, gcs_subdir, release_track=base.ReleaseTrack.GA):
"""Lists all resources in one folder of bucket.
Args:
env_ref: googlecloudsdk.core.resources.Resource, Resource representing
the Environment whose corresponding bucket to list.
gcs_subdir: str, subdir of the Cloud Storage bucket which to list
release_track: base.ReleaseTrack, the release track of command. Will dictate
which Composer client library will be used.
Returns:
list of Objects inside subdirectory of Cloud Storage bucket for environment
Raises:
command_util.Error: if the storage bucket could not be retrieved
"""
bucket_ref = _GetStorageBucket(env_ref, release_track=release_track)
storage_client = storage_api.StorageClient()
return storage_client.ListBucket(bucket_ref, prefix=gcs_subdir + '/')
def Import(env_ref, source, destination, release_track=base.ReleaseTrack.GA):
"""Imports files and directories into a bucket.
Args:
env_ref: googlecloudsdk.core.resources.Resource, Resource representing
the Environment whose bucket into which to import.
source: str, a path from which to import files into the
environment's bucket. Directory sources are imported recursively; the
directory itself will be present in the destination bucket.
Must contain at least one non-empty value.
destination: str, subdir of the Cloud Storage bucket into which to import
`sources`. Must have a single trailing slash but no leading slash. For
example, 'data/foo/bar/'.
release_track: base.ReleaseTrack, the release track of command. Will dictate
which Composer client library will be used.
Returns:
None
Raises:
command_util.Error: if the storage bucket could not be retrieved
command_util.GsutilError: the gsutil command failed
"""
gcs_bucket = _GetStorageBucket(env_ref, release_track=release_track)
use_gsutil = properties.VALUES.storage.use_gsutil.GetBool()
if use_gsutil:
_ImportGsutil(gcs_bucket, source, destination)
else:
_ImportStorageApi(gcs_bucket, source, destination)
def _ImportStorageApi(gcs_bucket, source, destination):
"""Imports files and directories into a bucket."""
client = storage_api.StorageClient()
old_source = source
source = source.rstrip('*')
# Source ends with an asterisk. This means the user indicates that the source
# is a directory so we shouldn't bother trying to see if source is an object.
# This is important because we always have certain subdirs created as objects
# (e.g. dags/), so if we don't do this check, import/export will just try
# and copy this empty object.
object_is_subdir = old_source != source
if not object_is_subdir:
# If source is not indicated to be a subdir, then strip the ending slash
# so the specified directory is present in the destination.
source = source.rstrip(posixpath.sep)
source_is_local = not source.startswith('gs://')
if source_is_local and not os.path.exists(source):
raise command_util.Error('Source for import does not exist.')
# Don't include the specified directory as we want that present in the
# destination bucket.
source_dirname = _JoinPaths(
os.path.dirname(source), '', gsutil_path=not source_is_local)
if source_is_local:
if os.path.isdir(source):
file_chooser = gcloudignore.GetFileChooserForDir(source)
for rel_path in file_chooser.GetIncludedFiles(source):
file_path = _JoinPaths(source, rel_path)
if os.path.isdir(file_path):
continue
dest_path = _GetDestPath(source_dirname, file_path, destination, False)
obj_ref = storage_util.ObjectReference.FromBucketRef(gcs_bucket,
dest_path)
client.CopyFileToGCS(file_path, obj_ref)
else: # Just upload the file.
dest_path = _GetDestPath(source_dirname, source, destination, False)
obj_ref = storage_util.ObjectReference.FromBucketRef(gcs_bucket,
dest_path)
client.CopyFileToGCS(source, obj_ref)
else:
source_ref = storage_util.ObjectReference.FromUrl(source)
to_import = _GetObjectOrSubdirObjects(
source_ref, object_is_subdir=object_is_subdir, client=client)
for obj in to_import:
dest_object = storage_util.ObjectReference.FromBucketRef(
gcs_bucket,
# Use obj.ToUrl() to ensure that the dirname is properly stripped.
_GetDestPath(source_dirname, obj.ToUrl(), destination, False))
client.Copy(obj, dest_object)
def _ImportGsutil(gcs_bucket, source, destination):
"""Imports files and directories into a bucket."""
destination_ref = storage_util.ObjectReference.FromBucketRef(
gcs_bucket, destination)
try:
retval = storage_util.RunGsutilCommand(
'cp',
command_args=(['-r', source, destination_ref.ToUrl()]),
run_concurrent=True,
out_func=log.out.write,
err_func=log.err.write)
except (execution_utils.PermissionError,
execution_utils.InvalidCommandError) as e:
raise command_util.GsutilError(six.text_type(e))
if retval:
raise command_util.GsutilError('gsutil returned non-zero status code.')
def Export(env_ref, source, destination, release_track=base.ReleaseTrack.GA):
"""Exports files and directories from an environment's Cloud Storage bucket.
Args:
env_ref: googlecloudsdk.core.resources.Resource, Resource representing
the Environment whose bucket from which to export.
source: str, a bucket-relative path from which to export files.
Directory sources are imported recursively; the directory itself will
be present in the destination bucket. Can also include wildcards.
destination: str, existing local directory or path to a Cloud Storage
bucket or directory object to which to export.
Must have a single trailing slash but no leading slash. For
example, 'dir/foo/bar/'.
release_track: base.ReleaseTrack, the release track of command. Will dictate
which Composer client library will be used.
Returns:
None
Raises:
command_util.Error: if the storage bucket could not be retrieved or a
non-Cloud Storage destination that is not a local directory was provided.
command_util.GsutilError: the gsutil command failed
"""
gcs_bucket = _GetStorageBucket(env_ref, release_track=release_track)
use_gsutil = properties.VALUES.storage.use_gsutil.GetBool()
if use_gsutil:
_ExportGsutil(gcs_bucket, source, destination)
else:
_ExportStorageApi(gcs_bucket, source, destination)
def _ExportStorageApi(gcs_bucket, source, destination):
"""Exports files and directories from an environment's GCS bucket."""
old_source = source
source = source.rstrip('*')
# Source ends with an asterisk. This means the user indicates that the source
# is a directory so we shouldn't bother trying to see if source is an object.
# This is important because we always have certain subdirs created as objects
# (e.g. dags/), so if we don't do this check, import/export will just try
# and copy this empty object.
object_is_subdir = old_source != source
client = storage_api.StorageClient()
source_ref = storage_util.ObjectReference.FromBucketRef(gcs_bucket, source)
dest_is_local = True
if destination.startswith('gs://'):
destination = _JoinPaths(
destination.strip(posixpath.sep), '', gsutil_path=True)
dest_is_local = False
elif not os.path.isdir(destination):
raise command_util.Error('Destination for export must be a directory.')
source_dirname = _JoinPaths(os.path.dirname(source), '', gsutil_path=True)
to_export = _GetObjectOrSubdirObjects(
source_ref, object_is_subdir=object_is_subdir, client=client)
if dest_is_local:
for obj in to_export:
dest_path = _GetDestPath(source_dirname, obj.name, destination, True)
files.MakeDir(os.path.dirname(dest_path))
# Command description for export commands says overwriting is default
# behavior.
client.CopyFileFromGCS(obj, dest_path, overwrite=True)
else:
for obj in to_export:
dest_object = storage_util.ObjectReference.FromUrl(
_GetDestPath(source_dirname, obj.name, destination, False))
client.Copy(obj, dest_object)
def _ExportGsutil(gcs_bucket, source, destination):
"""Exports files and directories from an environment's GCS bucket."""
source_ref = storage_util.ObjectReference.FromBucketRef(gcs_bucket, source)
if destination.startswith('gs://'):
destination = _JoinPaths(
destination.strip(posixpath.sep), '', gsutil_path=True)
elif not os.path.isdir(destination):
raise command_util.Error('Destination for export must be a directory.')
try:
retval = storage_util.RunGsutilCommand(
'cp',
command_args=['-r', source_ref.ToUrl(), destination],
run_concurrent=True,
out_func=log.out.write,
err_func=log.err.write)
except (execution_utils.PermissionError,
execution_utils.InvalidCommandError) as e:
raise command_util.GsutilError(six.text_type(e))
if retval:
raise command_util.GsutilError('gsutil returned non-zero status code.')
def _GetDestPath(source_dirname, source_path, destination, dest_is_local):
"""Get dest path without the dirname of the source dir if present."""
dest_path_suffix = source_path
if source_path.startswith(source_dirname):
dest_path_suffix = source_path[len(source_dirname):]
# For Windows, replace path separators with the posix path separators.
if not dest_is_local:
dest_path_suffix = dest_path_suffix.replace(os.path.sep, posixpath.sep)
return _JoinPaths(
destination, dest_path_suffix, gsutil_path=not dest_is_local)
def Delete(env_ref, target, gcs_subdir, release_track=base.ReleaseTrack.GA):
"""Deletes objects in a folder of an environment's bucket.
gsutil deletes directory marker objects even when told to delete just the
directory's contents, so we need to check that it exists and create it if it
doesn't.
A better alternative will be to use the storage API to list
objects by prefix and implement deletion ourselves
Args:
env_ref: googlecloudsdk.core.resources.Resource, Resource representing
the Environment in whose corresponding bucket to delete objects.
target: str, the path within the gcs_subdir directory in the bucket
to delete. If this is equal to '*', then delete everything in
gcs_subdir.
gcs_subdir: str, subdir of the Cloud Storage bucket in which to delete.
Should not contain slashes, for example "dags".
release_track: base.ReleaseTrack, the release track of command. Will dictate
which Composer client library will be used.
"""
gcs_bucket = _GetStorageBucket(env_ref, release_track=release_track)
use_gsutil = properties.VALUES.storage.use_gsutil.GetBool()
if use_gsutil:
_DeleteGsutil(gcs_bucket, target, gcs_subdir)
else:
_DeleteStorageApi(gcs_bucket, target, gcs_subdir)
log.status.Print('Deletion operation succeeded.')
_EnsureSubdirExists(gcs_bucket, gcs_subdir)
def _DeleteStorageApi(gcs_bucket, target, gcs_subdir):
"""Deletes objects in a folder of an environment's bucket with storage API."""
client = storage_api.StorageClient()
# Explicitly only support target = '*' and no other globbing notation.
# This is because the flag help text explicitly says to give a subdir.
# Star also has a special meaning and tells the delete function to not try
# and get the object. This is necessary because subdirs in the GCS buckets
# are created as objects to ensure they exist.
delete_all = target == '*'
# Listing in a bucket uses a prefix match and doesn't support * notation.
target = '' if delete_all else target
target_ref = storage_util.ObjectReference.FromBucketRef(
gcs_bucket, _JoinPaths(gcs_subdir, target, gsutil_path=True))
to_delete = _GetObjectOrSubdirObjects(
target_ref, object_is_subdir=delete_all, client=client)
for obj_ref in to_delete:
client.DeleteObject(obj_ref)
def _DeleteGsutil(gcs_bucket, target, gcs_subdir):
"""Deletes objects in a folder of an environment's bucket with gsutil."""
target_ref = storage_util.ObjectReference.FromBucketRef(
gcs_bucket, _JoinPaths(gcs_subdir, target, gsutil_path=True))
try:
retval = storage_util.RunGsutilCommand(
'rm',
command_args=(['-r', target_ref.ToUrl()]),
run_concurrent=True,
out_func=log.out.write,
err_func=log.err.write)
except (execution_utils.PermissionError,
execution_utils.InvalidCommandError) as e:
raise command_util.GsutilError(six.text_type(e))
if retval:
raise command_util.GsutilError('gsutil returned non-zero status code.')
def _GetObjectOrSubdirObjects(object_ref, object_is_subdir=False, client=None):
"""Gets object_ref or the objects under object_ref is it's a subdir."""
client = client or storage_api.StorageClient()
objects = []
# Check if object_ref referes to an actual object. If it does not exist, we
# assume the user is specfying a subdirectory.
target_is_subdir = False
if not object_is_subdir:
try:
client.GetObject(object_ref)
objects.append(object_ref)
except apitools_exceptions.HttpNotFoundError:
target_is_subdir = True
if target_is_subdir or object_is_subdir:
target_path = posixpath.join(object_ref.name, '')
subdir_objects = client.ListBucket(object_ref.bucket_ref, target_path)
for obj in subdir_objects:
if object_is_subdir and obj.name == object_ref.name:
# In this case, object_ref is to be treated as a subdir, so if
# object_ref happens to also be an object, ignore it.
continue
objects.append(
storage_util.ObjectReference.FromName(object_ref.bucket, obj.name))
return objects
def _EnsureSubdirExists(bucket_ref, subdir):
"""Checks that a directory marker object exists in the bucket or creates one.
The directory marker object is needed for subdir listing to not crash
if the directory is empty.
Args:
bucket_ref: googlecloudsk.api_lib.storage.storage_util.BucketReference,
a reference to the environment's bucket
subdir: str, the subdirectory to check or recreate. Should not contain
slashes.
"""
subdir_name = '{}/'.format(subdir)
subdir_ref = storage_util.ObjectReference.FromBucketRef(bucket_ref,
subdir_name)
storage_client = storage_api.StorageClient()
try:
storage_client.GetObject(subdir_ref)
except apitools_exceptions.HttpNotFoundError:
# Insert an empty object into the bucket named subdir_name, which will
# serve as an empty directory marker.
insert_req = storage_client.messages.StorageObjectsInsertRequest(
bucket=bucket_ref.bucket,
name=subdir_name)
upload = transfer.Upload.FromStream(
io.BytesIO(), 'application/octet-stream')
try:
storage_client.client.objects.Insert(insert_req, upload=upload)
except apitools_exceptions.HttpError:
raise command_util.Error(
'Error re-creating empty {}/ directory most likely due to lack of '
'permissions.'.format(subdir))
except apitools_exceptions.HttpForbiddenError:
raise command_util.Error(
'Error checking directory {}/ marker object exists due to lack of '
'permissions.'.format(subdir))
def _GetStorageBucket(env_ref, release_track=base.ReleaseTrack.GA):
env = environments_api_util.Get(env_ref, release_track=release_track)
if not env.config.dagGcsPrefix:
raise command_util.Error(BUCKET_MISSING_MSG)
try:
gcs_dag_dir = storage_util.ObjectReference.FromUrl(env.config.dagGcsPrefix)
except (storage_util.InvalidObjectNameError, ValueError):
raise command_util.Error(BUCKET_MISSING_MSG)
return gcs_dag_dir.bucket_ref

View File

@@ -0,0 +1,739 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for Composer commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import contextlib
import io
import ipaddress
import os
import re
from googlecloudsdk.api_lib.composer import util as api_util
from googlecloudsdk.api_lib.container import api_adapter as gke_api_adapter
from googlecloudsdk.api_lib.container import util as gke_util
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import parsers
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
import six
GKE_API_VERSION = 'v1'
KUBECONFIG_ENV_VAR_NAME = 'KUBECONFIG'
ENVIRONMENT_NAME_PATTERN = re.compile('^[a-z](?:[-0-9a-z]{0,62}[0-9a-z])?$')
_GCLOUD_COMPONENT_NAME = 'gcloud'
_KUBECTL_COMPONENT_NAME = 'kubectl'
MISSING_GCLOUD_MSG = """\
Unable to find [gcloud] on path!
"""
MISSING_KUBECTL_MSG = """\
Accessing a Cloud Composer environment requires the kubernetes commandline
client [kubectl]. To install, run
$ gcloud components install kubectl
"""
# Subcommand list can still be executed, but will be marked/handled as
# deprecated until removed.
SUBCOMMAND_DEPRECATION = {
# No subcommands currently in a deprecated state.
}
# {key: value}, key - name of subcommand, value - range of Airflow versions that
# support. {'cmd': ('1.10.14', '2.0.0')} - command `cmd` is supported for
# Airflow versions greater or equal to 1.10.14, prior to 2.0.0 (exclusive).
# None - means no boundary.
# LINT.IfChange
class SupportedAirflowVersion(collections.namedtuple(
'SupportedAirflowVersion',
'from_version to_version allowed_nested_subcommands')):
__slots__ = ()
def __new__(cls, from_version=None, to_version='3.0.0',
allowed_nested_subcommands=None):
return super(SupportedAirflowVersion, cls).__new__(
cls, from_version, to_version, allowed_nested_subcommands)
SUBCOMMAND_ALLOWLIST = {
'backfill':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'clear':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'connections':
SupportedAirflowVersion(from_version=None, to_version='3.2.0'),
'db':
SupportedAirflowVersion(
from_version='2.3.0',
to_version='3.0.0',
allowed_nested_subcommands={
'check':
SupportedAirflowVersion(
from_version='2.3.0', to_version='3.0.0'),
'trim':
SupportedAirflowVersion(
from_version='2.6.3', to_version='3.0.0'),
}),
'dag_state':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'dags':
SupportedAirflowVersion(from_version='1.10.14', to_version='3.2.0'),
'delete_dag':
SupportedAirflowVersion(from_version='1.10.1', to_version='2.0.0'),
'kerberos':
SupportedAirflowVersion(from_version=None, to_version='3.2.0'),
'kubernetes':
SupportedAirflowVersion(from_version='2.1.4', to_version='3.2.0'),
'list_dag_runs':
SupportedAirflowVersion(from_version='1.10.2', to_version='2.0.0'),
'list_dags':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'list-import-errors':
SupportedAirflowVersion(from_version=None, to_version='3.0.0'),
'list_tasks':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'next_execution':
SupportedAirflowVersion(from_version='1.10.2', to_version='2.0.0'),
'pause':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'pool':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'pools':
SupportedAirflowVersion(from_version='1.10.14', to_version='3.2.0'),
'render':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'roles':
SupportedAirflowVersion(from_version='2.0.0', to_version='3.2.0'),
'run':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'sync-perm':
SupportedAirflowVersion(from_version='1.10.14', to_version='3.2.0'),
'sync_perm':
SupportedAirflowVersion(from_version='1.10.2', to_version='2.0.0'),
'task_failed_deps':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'task_state':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'tasks':
SupportedAirflowVersion(from_version='1.10.14', to_version='3.2.0'),
'test':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'trigger_dag':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'unpause':
SupportedAirflowVersion(from_version=None, to_version='2.0.0'),
'upgrade_check':
SupportedAirflowVersion(from_version='1.10.15', to_version='2.0.0'),
'users':
SupportedAirflowVersion(from_version='1.10.14', to_version='3.2.0'),
'variables':
SupportedAirflowVersion(from_version=None, to_version='3.2.0'),
'version':
SupportedAirflowVersion(from_version=None, to_version='3.2.0'),
}
# Code paths are prohibited from being included in this file.
# LINT.ThenChange()
SUBCOMMAND_ALLOWLIST.update(SUBCOMMAND_DEPRECATION)
DEFAULT_NAMESPACE = 'default'
NAMESPACE_ARG_NAME = '--namespace'
NAMESPACE_ARG_ALIAS = '-n'
NAMESPACE_STATUS_ACTIVE = 'active'
GkePodStatus = collections.namedtuple('GkePodStatus', 'name phase isReady')
class Error(core_exceptions.Error):
"""Class for errors raised by Composer commands."""
class KubectlError(Error):
"""Class for errors raised when shelling out to kubectl."""
class GsutilError(Error):
"""Class for errors raised when shelling out to gsutil."""
class OperationError(Error):
"""Class for errors raised when a polled operation completes with an error."""
def __init__(self, operation_name, description):
super(OperationError, self).__init__('Operation [{}] failed: {}'.format(
operation_name, description))
class EnvironmentCreateError(Error):
"""Class for errors raised when creating an environment."""
class EnvironmentDeleteError(Error):
"""Class for errors raised when deleting an environment."""
class InvalidUserInputError(Error):
"""Class for errors raised when a user input fails client-side validation."""
def ConstructList(title, items):
"""Constructs text output listing the elements of items and a title.
Args:
title: string, the listing title
items: iterable, the iterable whose elements to list
Returns:
string, text representing list title and elements.
"""
buf = io.StringIO()
resource_printer.Print(items, 'list[title="{0}"]'.format(title), out=buf)
return buf.getvalue()
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def TemporaryKubeconfig(location_id, cluster_id, kubecontext_override=None):
"""Context manager that manages a temporary kubeconfig file for a GKE cluster.
The kubeconfig file will be automatically created and destroyed and will
contain only the credentials for the specified GKE cluster. The 'KUBECONFIG'
value in `os.environ` will be temporarily updated with the temporary
kubeconfig's path. Consequently, subprocesses started with
googlecloudsdk.core.execution_utils.Exec while in this context manager will
see the temporary KUBECONFIG environment variable.
Args:
location_id: string, the id of the location to which the cluster belongs
cluster_id: string, the id of the cluster
kubecontext_override: string, the kubecontext override
Raises:
Error: If unable to get credentials for kubernetes cluster.
Returns:
the path to the temporary kubeconfig file
Yields:
Due to b/73533917, linter crashes without yields.
"""
gke_util.CheckKubectlInstalled()
with files.TemporaryDirectory() as tempdir:
kubeconfig = os.path.join(tempdir, 'kubeconfig')
old_kubeconfig = encoding.GetEncodedValue(os.environ,
KUBECONFIG_ENV_VAR_NAME)
try:
encoding.SetEncodedValue(os.environ, KUBECONFIG_ENV_VAR_NAME, kubeconfig)
gke_api = gke_api_adapter.NewAPIAdapter(GKE_API_VERSION)
cluster_ref = gke_api.ParseCluster(cluster_id, location_id)
cluster = gke_api.GetCluster(cluster_ref)
auth = cluster.masterAuth
missing_creds = not (auth and auth.clientCertificate and auth.clientKey)
if missing_creds and not gke_util.ClusterConfig.UseGCPAuthProvider():
raise Error('Unable to get cluster credentials. User must have edit '
'permission on {}'.format(cluster_ref.projectId))
gke_util.ClusterConfig.Persist(
cluster,
cluster_ref.projectId,
kubecontext_override=kubecontext_override,
)
yield kubeconfig
finally:
encoding.SetEncodedValue(os.environ, KUBECONFIG_ENV_VAR_NAME,
old_kubeconfig)
def ExtractGkeClusterLocationId(env_object):
"""Finds the location ID of the GKE cluster running the provided environment.
Args:
env_object: Environment, the environment, likely returned by an API call,
whose cluster location to extract
Raises:
Error: if Kubernetes cluster is not found.
Returns:
str, the location ID (a short name like us-central1-b) of the GKE cluster
running the environment
"""
if env_object.config.nodeConfig.location:
return env_object.config.nodeConfig.location[env_object.config.nodeConfig
.location.rfind('/') + 1:]
gke_cluster = env_object.config.gkeCluster[env_object.config.gkeCluster
.rfind('/') + 1:]
gke_api = gke_api_adapter.NewAPIAdapter(GKE_API_VERSION)
# GKE is in the middle of deprecating zones in favor of locations, so we
# read from whichever one has a value.
cluster_zones = [
c.location[c.location.rfind('/') + 1:] or c.zone
for c in gke_api.ListClusters(parsers.GetProject()).clusters
if c.name == gke_cluster
]
if not cluster_zones:
# This should never happen unless the user has deleted their cluster out of
# band.
raise Error('Kubernetes Engine cluster not found.')
elif len(cluster_zones) == 1:
return cluster_zones[0]
return cluster_zones[console_io.PromptChoice(
['[{}]'.format(z) for z in cluster_zones],
default=0,
message='Cluster found in more than one location. Please select the desired '
'location:')]
def GetGkePod(pod_substr=None, kubectl_namespace=None):
"""Returns the name of a running pod in a GKE cluster.
Retrieves pods in the GKE cluster pointed to by the current kubeconfig
context. To target a specific cluster, this command should be called within
the context of a TemporaryKubeconfig context manager.
If pod_substr is not None, the name of an arbitrary running pod
whose name contains pod_substr is returned; if no pod's name contains
pod_substr, an Error is raised. If pod_substr is None, an arbitrary running
pod is returned.
Pods with 'Ready: true' condition state are preferred. If there are no such
pods, any running pod will be returned.
Args:
pod_substr: string, a filter to apply to pods. The returned pod name must
contain pod_substr (if it is not None).
kubectl_namespace: string or None, namespace to query for gke pods
Raises:
Error: if GKE pods cannot be retrieved or desired pod is not found.
"""
pod_out = io.StringIO()
args = [
'get', 'pods', '--output',
r'jsonpath={range .items[*]}{.metadata.name}{"\t"}{.status.phase}'\
r'{"\t"}{.status.conditions[?(.type=="Ready")].status}{"\n"}'
]
try:
RunKubectlCommand(
args,
out_func=pod_out.write,
err_func=log.err.write,
namespace=kubectl_namespace)
except KubectlError as e:
raise Error('Error retrieving GKE pods: %s' % e)
cluster_pods = [
GkePodStatus(*pod_status.split('\t'))
for pod_status in pod_out.getvalue().split('\n')
if pod_status
]
# 'isReady: true' values should be at the beginning of a list.
# It means that sorting key should be False for such values, because
# ascending order of bools is (False, True).
cluster_pods.sort(key=lambda x: x.isReady.lower() != 'true')
running_pods = [
pod_status.name
for pod_status in cluster_pods
if pod_status.phase.lower() == 'running'
]
if not running_pods:
raise Error('No running GKE pods found. If the environment '
'was recently started, please wait and retry.')
if pod_substr is None:
return running_pods[0]
try:
return next(pod for pod in running_pods if pod_substr in pod)
except StopIteration:
raise Error('Desired GKE pod not found. If the environment '
'was recently started, please wait and retry.')
def IsValidEnvironmentName(name):
"""Returns True if the provided name is a valid environment name."""
return ENVIRONMENT_NAME_PATTERN.match(name) is not None
def RunKubectlCommand(args, out_func=None, err_func=None, namespace=None):
"""Shells out a command to kubectl.
This command should be called within the context of a TemporaryKubeconfig
context manager in order for kubectl to be configured to access the correct
cluster.
Args:
args: list of strings, command line arguments to pass to the kubectl
command. Should omit the kubectl command itself. For example, to execute
'kubectl get pods', provide ['get', 'pods'].
out_func: str->None, a function to call with the stdout of the kubectl
command
err_func: str->None, a function to call with the stderr of the kubectl
command
namespace: str or None, the kubectl namespace to apply to the command
Raises:
Error: if kubectl could not be called
KubectlError: if the invocation of kubectl was unsuccessful
"""
# Check for 'kubectl' along Cloud SDK path. This will fail if component
# manager is disabled. In this case, check entire path.
kubectl_path = files.FindExecutableOnPath(_KUBECTL_COMPONENT_NAME,
config.Paths().sdk_bin_path)
if kubectl_path is None:
kubectl_path = files.FindExecutableOnPath(_KUBECTL_COMPONENT_NAME)
if kubectl_path is None:
raise Error(MISSING_KUBECTL_MSG)
exec_args = AddKubectlNamespace(
namespace, execution_utils.ArgsForExecutableTool(kubectl_path, *args))
try:
# All kubectl requests will execute within the scope of the 'default'
# namespace, unless the namespace scope has been explicitly set within the
# args.
retval = execution_utils.Exec(
exec_args,
no_exit=True,
out_func=out_func,
err_func=lambda err: HandleKubectlErrorStream(err_func, err),
universal_newlines=True)
except (execution_utils.PermissionError,
execution_utils.InvalidCommandError) as e:
raise KubectlError(six.text_type(e))
if retval:
raise KubectlError('kubectl returned non-zero status code.')
def HandleKubectlErrorStream(err_func, err):
err_handler_func = err_func or log.status.Print
if 'Unable to connect to the server' in err:
err_handler_func(err)
err_handler_func(
'\nPlease, check if you have connectivity to GKE control plane.\n')
else:
err_handler_func(err)
def ConvertImageVersionToNamespacePrefix(image_version):
"""Converts an image version string to a kubernetes namespace string."""
return image_version.replace('.', '-')
def FetchKubectlNamespace(env_image_version):
"""Checks environment for valid namespace options.
First checks for the existence of a kubectl namespace based on the env image
version. If namespace does not exist, then return the 'default' namespace.
Args:
env_image_version: str, the environment image version string.
Returns:
The namespace string to apply to any `environments run` commands.
"""
image_version_ns_prefix = ConvertImageVersionToNamespacePrefix(
env_image_version)
args = [
'get', 'namespace', '--all-namespaces',
'--sort-by=.metadata.creationTimestamp', '--output',
r'jsonpath={range .items[*]}{.metadata.name}{"\t"}{.status.phase}{"\n"}',
'--ignore-not-found=true'
]
ns_output = io.StringIO()
RunKubectlCommand(args, ns_output.write, log.err.write)
# Reverses namespace result list because the kubectl query command only sorts
# in ascending order.
namespaces = reversed(ns_output.getvalue().split('\n'))
for ns_entry in namespaces:
ns_parts = ns_entry.split('\t') if ns_entry.strip() else None
# Checks if namespace is 'Active' and matches the image version prefix.
if (ns_parts and ns_parts[1].lower() == NAMESPACE_STATUS_ACTIVE and
ns_parts[0].startswith(image_version_ns_prefix)):
return ns_parts[0]
return DEFAULT_NAMESPACE
def AddKubectlNamespace(namespace, kubectl_args):
"""Adds namespace arguments to the provided list of kubectl args.
If a namespace arg is not already present, insert `--namespace <namespace>`
after the `kubectl` command and before all other arg elements.
Resulting in this general format:
['kubectl', '--namespace', 'namespace_foo', ... <remaining args> ... ]
Args:
namespace: name of the namespace scope
kubectl_args: list of kubectl command arguments. Expects that the first
element will be the `kubectl` command, followed by all additional
arguments.
Returns:
list of kubectl args with the additional namespace args (if necessary).
"""
if namespace is None:
return kubectl_args
# Checks for existing namespace args before adding new ones.
if {NAMESPACE_ARG_NAME, NAMESPACE_ARG_ALIAS}.isdisjoint(set(kubectl_args)):
idx = 0
if kubectl_args and _KUBECTL_COMPONENT_NAME in kubectl_args[0]:
idx = 1
# Inserts new namespace arguments to a fixed index of the kubectl_args list,
# so the list of new args will be inserted in reverse.
for new_arg in [namespace, NAMESPACE_ARG_NAME]:
# Expects `kubectl` command to be the first argument.
kubectl_args.insert(idx, new_arg)
return kubectl_args
def ParseRequirementsFile(requirements_file_path):
"""Parses the given requirements file into a requirements dictionary.
If the file path is GCS file path, use GCS file parser to parse requirements
file. Otherwise, use local file parser.
Args:
requirements_file_path: Filepath to the requirements file.
Returns:
{string: string}, dict mapping from PyPI package name to extras and version
specifier, if provided.
Raises:
Error: if requirements file cannot be read.
"""
try:
is_gcs_file_path = requirements_file_path.startswith('gs://')
if is_gcs_file_path:
storage_client = storage_api.StorageClient()
object_ref = storage_util.ObjectReference.FromUrl(requirements_file_path)
file_content = storage_client.ReadObject(object_ref)
else:
file_content = files.FileReader(requirements_file_path)
requirements = {}
with file_content as requirements_file:
for requirement_specifier in requirements_file:
requirement_specifier = requirement_specifier.strip()
if not requirement_specifier or requirement_specifier.startswith('#'):
continue
package, version = SplitRequirementSpecifier(requirement_specifier)
# Ensure package not already in entry list.
if package in requirements:
raise Error(
'Duplicate package in requirements file: {0}'.format(package))
requirements[package] = version
return requirements
except (files.Error, storage_api.Error, storage_util.Error):
# Raise error when it fails to read requirements file.
core_exceptions.reraise(
Error('Unable to read requirements file {0}'.format(
requirements_file_path)))
def SplitRequirementSpecifier(requirement_specifier):
"""Splits the package name from the other components of a requirement spec.
Only supports PEP 508 `name_req` requirement specifiers. Does not support
requirement specifiers containing environment markers.
Args:
requirement_specifier: str, a PEP 508 requirement specifier that does not
contain an environment marker.
Returns:
(string, string), a 2-tuple of the extracted package name and the tail of
the requirement specifier which could contain extras and/or a version
specifier.
Raises:
Error: No package name was found in the requirement spec.
"""
package = requirement_specifier.strip()
tail_start_regex = r'(\[|\(|==|>=|!=|<=|<|>|~=|===)'
tail_match = re.search(tail_start_regex, requirement_specifier)
tail = ''
if tail_match:
package = requirement_specifier[:tail_match.start()].strip()
tail = requirement_specifier[tail_match.start():].strip()
if not package:
raise Error(r'Missing package name in requirement specifier: \'{}\''.format(
requirement_specifier))
return package, tail
def BuildPartialUpdate(clear, remove_keys, set_entries, field_mask_prefix,
entry_cls, env_builder):
"""Builds the field mask and patch environment for an environment update.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
field_mask_prefix: string, The prefix defining the path to the base of the
proto map to be patched.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type of
entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
(string, Environment), a 2-tuple of the field mask defined by the arguments
and a patch environment produced by env_builder.
"""
remove_keys = set(k.strip() for k in remove_keys or [])
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = collections.OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
if clear:
entries = [
entry_cls(key=key, value=value)
for key, value in six.iteritems(set_entries)
]
return field_mask_prefix, env_builder(entries)
field_mask_entries = []
seen_keys = set()
for key in remove_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
seen_keys.add(key)
entries = []
for key, value in six.iteritems(set_entries):
entries.append(entry_cls(key=key, value=value))
if key not in seen_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
# Sorting field mask entries makes it easier for tests to set the expected
# field mask since dictionary iteration order is undefined.
field_mask_entries.sort()
return ','.join(field_mask_entries), env_builder(entries)
def BuildFullMapUpdate(clear, remove_keys, set_entries, initial_entries,
entry_cls, env_builder):
"""Builds the patch environment for an environment update.
To be used when BuildPartialUpdate cannot be used due to lack of support for
field masks containing map keys.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
initial_entries: [AdditionalProperty], list of AdditionalProperty class with
key and value fields, representing starting dict to update from.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type of
entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
Environment, a patch environment produced by env_builder.
"""
# Transform initial entries list to dictionary for easy processing
entries_dict = collections.OrderedDict(
(entry.key, entry.value) for entry in initial_entries)
# Remove values that are no longer desired
if clear:
entries_dict = collections.OrderedDict()
remove_keys = set(k.strip() for k in remove_keys or [])
for key in remove_keys:
if key in entries_dict:
del entries_dict[key]
# Update dictionary with new values
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = collections.OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
entries_dict.update(set_entries)
# Transform dictionary back into list of entry_cls
return env_builder([
entry_cls(key=key, value=value)
for key, value in six.iteritems(entries_dict)
])
def IsInRunningState(environment, release_track=base.ReleaseTrack.GA):
"""Returns whether an environment currently is in the RUNNING state.
Args:
environment: Environment, an object returned by an API call representing the
environment to check.
release_track: base.ReleaseTrack, the release track of command. Will dictate
which Composer client library will be used.
"""
running_state = (
api_util.GetMessagesModule(
release_track=release_track).Environment.StateValueValuesEnum.RUNNING)
return environment.state == running_state
def ValidateMasterAuthorizedNetworks(networks):
"""Validates given master authorized networks.
Args:
networks: Iterable(string) or None. List of networks in CIDR notation.
"""
if networks is None:
return
for network in networks:
try:
ipaddress.IPv4Network(network)
except Exception as e:
raise InvalidUserInputError(
'Invalid master authorized network: {}'.format(e))