feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper library for this command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API client library for Cloud DNS active peering zones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
class Client(object):
"""API client for Cloud DNS active peering zones."""
_API_NAME = 'dns'
def __init__(self, version, client, messages=None):
self.version = version
self.client = client
self._service = self.client.activePeeringZones
self.messages = messages or self.client.MESSAGES_MODULE
@classmethod
def FromApiVersion(cls, version):
return cls(version, apis.GetClientInstance(cls._API_NAME, version))

View File

@@ -0,0 +1,52 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API client library for Cloud DNS managed zones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.dns import util
class Client(object):
"""API client for Cloud DNS managed zones."""
_API_NAME = 'dns'
def __init__(self, version, client, messages=None):
self.version = version
self.client = client
self._service = self.client.dnsKeys
self.messages = messages or client.MESSAGES_MODULE
@classmethod
def FromApiVersion(cls, version):
return cls(version, util.GetApiClient(version))
def Get(self, key_ref):
return self._service.Get(
self.messages.DnsDnsKeysGetRequest(
dnsKeyId=key_ref.Name(),
managedZone=key_ref.managedZone,
project=key_ref.project))
def List(self, zone_ref):
request = self.messages.DnsDnsKeysListRequest(
project=zone_ref.project,
managedZone=zone_ref.Name())
return list_pager.YieldFromList(self._service, request, field='dnsKeys')

View File

@@ -0,0 +1,73 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for exporting record-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from dns import name
from dns import rdata
from dns import rdataclass
from dns import rdatatype
from dns import zone
from googlecloudsdk.api_lib.dns import svcb_stub
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.resource import resource_printer
# Enable support for exporting SVCB and HTTPS records.
svcb_stub.register()
class Error(exceptions.Error):
"""Base exception for all export errors."""
class UnableToExportRecordsToFile(Error):
"""Unable to export records to specified file."""
def WriteToZoneFile(zone_file, record_sets, domain):
"""Writes the given record-sets in zone file format to the given file.
Args:
zone_file: file, File into which the records should be written.
record_sets: list, ResourceRecordSets to be written out.
domain: str, The origin domain for the zone file.
"""
zone_contents = zone.Zone(name.from_text(domain))
for record_set in record_sets:
rdset = zone_contents.get_rdataset(record_set.name,
record_set.type,
create=True)
for rrdata in record_set.rrdatas:
rdset.add(rdata.from_text(rdataclass.IN,
rdatatype.from_text(record_set.type),
str(rrdata),
origin=zone_contents.origin),
ttl=record_set.ttl)
zone_contents.to_file(zone_file, relativize=False)
def WriteToYamlFile(yaml_file, record_sets):
"""Writes the given record-sets in yaml format to the given file.
Args:
yaml_file: file, File into which the records should be written.
record_sets: list, ResourceRecordSets to be written out.
"""
resource_printer.Print(record_sets, print_format='yaml', out=yaml_file)

View File

@@ -0,0 +1,464 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for importing record-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from apitools.base.py import encoding as api_encoding
from dns import rdatatype
from dns import zone
from googlecloudsdk.api_lib.dns import record_types
from googlecloudsdk.api_lib.dns import svcb_stub
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import encoding
import six
# Enable support for importing SVCB and HTTPS records.
svcb_stub.register()
class Error(exceptions.Error):
"""Base exception for all import errors."""
class RecordsFileNotFound(Error):
"""The specified records file was not found."""
class RecordsFileIsADirectory(Error):
"""The specified records file is a directory."""
class UnableToReadRecordsFile(Error):
"""Unable to read record sets from the specified records file."""
class ConflictingRecordsFound(Error):
"""Conflicts found between records being imported and current records."""
def _SOATranslation(rdata, origin):
"""Returns the translation of the given SOA rdata.
Args:
rdata: Rdata, The data to be translated.
origin: Name, The origin domain name.
Returns:
str, The translation of the given SOA rdata which includes all the required
SOA fields. Note that the primary NS name is left in a substitutable form
because it is always provided by Cloud DNS.
"""
# pylint: disable=g-complex-comprehension
return ' '.join(
six.text_type(x) for x in [
'{0}',
rdata.rname.derelativize(origin),
rdata.serial,
rdata.refresh,
rdata.retry,
rdata.expire,
rdata.minimum])
# pylint: enable=g-complex-comprehension
def QuotedText(text):
"""Returns the given text within quotes.
Args:
text: str, The text to be escaped.
Returns:
str, The given text within quotes. For further details on why this is
necessary, please look at the TXT section at:
https://cloud.google.com/dns/what-is-cloud-dns#supported_record_types.
"""
text = encoding.Decode(text)
if text.startswith('"') and text.endswith('"'):
# Nothing to do if already escaped.
return text
else:
return '"{0}"'.format(text)
def _NullTranslation(rdata, origin=None):
"""Returns the given rdata as text (formatted by its .to_text() method).
Args:
rdata: Rdata, The data to be translated.
origin: Name, The origin domain name.
Returns:
str, The textual presentation form of the given rdata.
"""
return rdata.to_text(origin=origin, relativize=False)
def GetRdataTranslation(rr_type):
"""Returns the rdata translation function for a record type.
Args:
rr_type: The record type
Returns:
The record type's translation function.
"""
if rr_type == rdatatype.SOA:
return _SOATranslation
return _NullTranslation
def _FilterOutRecord(name, rdtype, origin, replace_origin_ns=False):
"""Returns whether the given record should be filtered out.
Args:
name: string, The name of the resord set we are considering
rdtype: RDataType or string, type of Record we are considering approving.
origin: Name, The origin domain of the zone we are considering
replace_origin_ns: Bool, Whether origin NS records should be imported
Returns:
True if the given record should be filtered out, false otherwise.
"""
if replace_origin_ns:
return False
elif name == origin and rdtype == rdatatype.NS:
return True
else:
return False
def _RecordSetFromZoneRecord(name, rdset, origin, api_version='v1'):
"""Returns the Cloud DNS ResourceRecordSet for the given zone file record.
Args:
name: Name, Domain name of the zone record.
rdset: Rdataset, The zone record object.
origin: Name, The origin domain of the zone file.
api_version: [str], the api version to use for creating the records.
Returns:
The ResourceRecordSet equivalent for the given zone record, or None for
unsupported record types.
"""
if rdset.rdtype not in record_types.SUPPORTED_TYPES:
return None
messages = core_apis.GetMessagesModule('dns', api_version)
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = name.derelativize(origin).to_text()
record_set.ttl = rdset.ttl
record_set.type = rdatatype.to_text(rdset.rdtype)
rdatas = []
for rdata in rdset:
rdatas.append(GetRdataTranslation(rdset.rdtype)(rdata, origin))
record_set.rrdatas = rdatas
return record_set
def RecordSetsFromZoneFile(zone_file, domain, api_version='v1'):
"""Returns record-sets for the given domain imported from the given zone file.
Args:
zone_file: file, The zone file with records for the given domain.
domain: str, The domain for which record-sets should be obtained.
api_version: [str], the api version to use for creating the records.
Returns:
A (name, type) keyed dict of ResourceRecordSets that were obtained from the
zone file. Note that only records of supported types are retrieved. Also,
the primary NS field for SOA records is discarded since that is
provided by Cloud DNS.
"""
zone_contents = zone.from_file(zone_file, domain, check_origin=False)
record_sets = {}
for name, rdset in zone_contents.iterate_rdatasets():
record_set = _RecordSetFromZoneRecord(
name, rdset, zone_contents.origin, api_version=api_version)
if record_set:
record_sets[(record_set.name, record_set.type)] = record_set
return record_sets
def _ToStandardEnumTypeSafe(string_type):
"""Converts string_type to an RdataType enum value if it is a standard type.
Only standard record types can be converted to a RdataType, all other types
will cause an exception. This method allow getting the standard enum type if
available without throwing an exception if an extended type is provided.
Args:
string_type: [str] The record type as a string.
Returns:
The record type as an RdataType enum or None if the type is not a standard
DNS type.
"""
if string_type in record_types.CLOUD_DNS_EXTENDED_TYPES:
return None
return rdatatype.from_text(string_type)
def RecordSetsFromYamlFile(yaml_file,
include_extended_records=False,
api_version='v1'):
"""Returns record-sets read from the given yaml file.
Args:
yaml_file: file, A yaml file with records.
include_extended_records: [bool], If extended record should be included
(otherwise they are silently skipped).
api_version: [str], the api version to use for creating the records.
Returns:
A (name, type) keyed dict of ResourceRecordSets that were obtained from the
yaml file. Note that only records of supported types are retrieved. Also,
the primary NS field for SOA records is discarded since that is
provided by Cloud DNS.
"""
record_sets = {}
messages = core_apis.GetMessagesModule('dns', api_version)
yaml_record_sets = yaml.load_all(yaml_file)
for yaml_record_set in yaml_record_sets:
rdata_type = _ToStandardEnumTypeSafe(yaml_record_set['type'])
if rdata_type not in record_types.SUPPORTED_TYPES and (
not include_extended_records or
yaml_record_set['type'] not in record_types.CLOUD_DNS_EXTENDED_TYPES):
continue
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = yaml_record_set['name']
record_set.ttl = yaml_record_set['ttl']
record_set.type = yaml_record_set['type']
if 'rrdatas' in yaml_record_set:
record_set.rrdatas = yaml_record_set['rrdatas']
elif 'routingPolicy' in yaml_record_set:
record_set.routingPolicy = api_encoding.PyValueToMessage(
messages.RRSetRoutingPolicy,
yaml_record_set['routingPolicy'],
)
if rdata_type is rdatatype.SOA:
# Make primary NS name substitutable.
record_set.rrdatas[0] = re.sub(r'\S+', '{0}', record_set.rrdatas[0],
count=1)
record_sets[(record_set.name, record_set.type)] = record_set
return record_sets
def _RecordSetCopy(record_set, api_version='v1'):
"""Returns a copy of the given record-set.
Args:
record_set: ResourceRecordSet, Record-set to be copied.
api_version: [str], the api version to use for creating the records.
Returns:
Returns a copy of the given record-set.
"""
messages = core_apis.GetMessagesModule('dns', api_version)
copy = messages.ResourceRecordSet()
copy.kind = record_set.kind
copy.name = record_set.name
copy.type = record_set.type
copy.ttl = record_set.ttl
copy.rrdatas = list(record_set.rrdatas)
return copy
def _SOAReplacement(current_record, record_to_be_imported, api_version='v1'):
"""Returns the replacement SOA record with restored primary NS name.
Args:
current_record: ResourceRecordSet, Current record-set.
record_to_be_imported: ResourceRecordSet, Record-set to be imported.
api_version: [str], the api version to use for creating the records.
Returns:
ResourceRecordSet, the replacement SOA record with restored primary NS name.
"""
replacement = _RecordSetCopy(record_to_be_imported, api_version=api_version)
replacement.rrdatas[0] = replacement.rrdatas[0].format(
current_record.rrdatas[0].split()[0])
if replacement == current_record:
# There should always be a different 'next' SOA record.
return NextSOARecordSet(replacement, api_version)
else:
return replacement
def _RDataReplacement(current_record, record_to_be_imported, api_version='v1'):
"""Returns a record-set containing rrdata to be imported.
Args:
current_record: ResourceRecordSet, Current record-set.
record_to_be_imported: ResourceRecordSet, Record-set to be imported.
api_version: [str], the api version to use for creating the records.
Returns:
ResourceRecordSet, a record-set containing rrdata to be imported.
None, if rrdata to be imported is identical to current rrdata.
"""
replacement = _RecordSetCopy(record_to_be_imported, api_version=api_version)
if replacement == current_record:
return None
else:
return replacement
def _GetRDataReplacement(rdtype):
"""Gets the RData replacement function for this type.
Args:
rdtype: RDataType, the type for which to fetch a replacement function.
Returns:
A function for replacing rdata of a record-set with rdata from another
record-set with the same name and type.
"""
if rdtype == rdatatype.SOA:
return _SOAReplacement
return _RDataReplacement
def NextSOARecordSet(soa_record_set, api_version='v1'):
"""Returns a new SOA record set with an incremented serial number.
Args:
soa_record_set: ResourceRecordSet, Current SOA record-set.
api_version: [str], the api version to use for creating the records.
Returns:
A a new SOA record-set with an incremented serial number.
"""
next_soa_record_set = _RecordSetCopy(soa_record_set, api_version=api_version)
rdata_parts = soa_record_set.rrdatas[0].split()
# Increment the 32 bit serial number by one and wrap around if needed.
rdata_parts[2] = str((int(rdata_parts[2]) + 1) % (1 << 32))
next_soa_record_set.rrdatas[0] = ' '.join(rdata_parts)
return next_soa_record_set
def IsOnlySOAIncrement(change, api_version='v1'):
"""Returns True if the change only contains an SOA increment, False otherwise.
Args:
change: Change, the change to be checked
api_version: [str], the api version to use for creating the records.
Returns:
True if the change only contains an SOA increment, False otherwise.
"""
return (len(change.additions) == len(change.deletions) == 1 and
_ToStandardEnumTypeSafe(change.deletions[0].type) is rdatatype.SOA and
NextSOARecordSet(change.deletions[0],
api_version) == change.additions[0])
def _NameAndType(record):
return '{0} {1}'.format(record.name, record.type)
def ComputeChange(current,
to_be_imported,
replace_all=False,
origin=None,
replace_origin_ns=False,
api_version='v1'):
"""Returns a change for importing the given record-sets.
Args:
current: dict, (name, type) keyed dict of current record-sets.
to_be_imported: dict, (name, type) keyed dict of record-sets to be imported.
replace_all: bool, Whether the record-sets to be imported should replace the
current record-sets.
origin: string, the name of the apex zone ex. "foo.com"
replace_origin_ns: bool, Whether origin NS records should be imported.
api_version: [str], the api version to use for creating the records.
Raises:
ConflictingRecordsFound: If conflicting records are found.
Returns:
A Change that describes the actions required to import the given
record-sets.
"""
messages = core_apis.GetMessagesModule('dns', api_version)
change = messages.Change()
change.additions = []
change.deletions = []
current_keys = set(current.keys())
keys_to_be_imported = set(to_be_imported.keys())
intersecting_keys = current_keys.intersection(keys_to_be_imported)
if not replace_all and intersecting_keys:
raise ConflictingRecordsFound(
'The following records (name type) already exist: {0}'.format(
[_NameAndType(current[key]) for key in sorted(intersecting_keys)]))
for key in intersecting_keys:
current_record = current[key]
record_to_be_imported = to_be_imported[key]
rdtype = _ToStandardEnumTypeSafe(key[1])
if not _FilterOutRecord(current_record.name,
rdtype,
origin,
replace_origin_ns):
replacement = _GetRDataReplacement(rdtype)(
current_record, record_to_be_imported, api_version=api_version)
if replacement:
change.deletions.append(current_record)
change.additions.append(replacement)
for key in keys_to_be_imported.difference(current_keys):
change.additions.append(to_be_imported[key])
for key in current_keys.difference(keys_to_be_imported):
current_record = current[key]
rdtype = _ToStandardEnumTypeSafe(key[1])
if rdtype is rdatatype.SOA:
change.deletions.append(current_record)
change.additions.append(NextSOARecordSet(current_record, api_version))
elif replace_all and not _FilterOutRecord(current_record.name,
rdtype,
origin,
replace_origin_ns):
change.deletions.append(current_record)
# If the only change is an SOA increment, there is nothing to be done.
if IsOnlySOAIncrement(change, api_version):
return None
change.additions.sort(key=_NameAndType)
change.deletions.sort(key=_NameAndType)
return change

View File

@@ -0,0 +1,162 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API client library for Cloud DNS managed zones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dns import operations
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.core import log
class Client(object):
"""API client for Cloud DNS managed zones."""
def __init__(self, version, client, messages=None, location=None):
self.version = version
self.client = client
self._service = self.client.managedZones
self.messages = messages or self.client.MESSAGES_MODULE
self.location = location
@classmethod
def FromApiVersion(cls, version, location=None):
return cls(version, util.GetApiClient(version), location=location)
def Get(self, zone_ref):
if self.location:
return self._service.Get(
self.messages.DnsManagedZonesGetRequest(
project=zone_ref.project,
managedZone=zone_ref.managedZone,
location=self.location))
return self._service.Get(
self.messages.DnsManagedZonesGetRequest(
project=zone_ref.project,
managedZone=zone_ref.managedZone))
def Patch(self,
zone_ref,
is_async,
dnssec_config=None,
description=None,
labels=None,
private_visibility_config=None,
forwarding_config=None,
peering_config=None,
service_directory_config=None,
cloud_logging_config=None,
cleared_fields=None):
"""Managed Zones Update Request.
Args:
zone_ref: the managed zones being patched.
is_async: if the PATCH operation is asynchronous.
dnssec_config: zone DNSSEC config.
description: zone description.
labels: zone labels.
private_visibility_config: zone visibility config.
forwarding_config: zone forwarding config.
peering_config: zone peering config.
service_directory_config: zone service directory config.
cloud_logging_config: Stackdriver logging config.
cleared_fields: the fields that should be included in the request JSON as
their default value (fields that are their default value will be omitted
otherwise).
Returns:
The PATCH response, if operation is not asynchronous.
"""
zone = self.messages.ManagedZone(
name=zone_ref.Name(),
dnssecConfig=dnssec_config,
description=description,
labels=labels)
if private_visibility_config:
zone.privateVisibilityConfig = private_visibility_config
if forwarding_config:
zone.forwardingConfig = forwarding_config
if peering_config:
zone.peeringConfig = peering_config
if service_directory_config:
zone.serviceDirectoryConfig = service_directory_config
if cloud_logging_config:
zone.cloudLoggingConfig = cloud_logging_config
request = self.messages.DnsManagedZonesPatchRequest(
managedZoneResource=zone,
project=zone_ref.project,
managedZone=zone_ref.Name())
if self.location:
request.location = self.location
# Tell the client that the cleared fields should be included in the JSON as
# their default value, otherwise they will be omitted.
with self.client.IncludeFields(cleared_fields):
operation = self.client.managedZones.Patch(request)
operation_param = {
'project': zone_ref.project,
'managedZone': zone_ref.Name(),
}
if self.location:
operation_param['location'] = self.location
operation_ref = util.GetRegistry(self.version).Parse(
operation.id,
params=operation_param,
collection='dns.managedZoneOperations')
if is_async:
log.status.write(
'Updating [{0}] with operation [{1}].'.format(
zone_ref.Name(), operation_ref.Name()))
return
return operations.WaitFor(
self.version,
operation_ref,
'Updating managed zone [{}]'.format(zone_ref.Name()),
self.location
)
def UpdateLabels(self, zone_ref, labels):
"""Update labels using Managed Zones Update request."""
zone = self.Get(zone_ref)
zone.labels = labels
operation = self._service.Update(
self.messages.DnsManagedZonesUpdateRequest(
managedZoneResource=zone,
project=zone_ref.project,
managedZone=zone_ref.Name()))
operation_param = {
'project': zone_ref.project,
'managedZone': zone_ref.Name(),
}
if self.location:
operation_param['location'] = self.location
operation_ref = util.GetRegistry(self.version).Parse(
operation.id,
params=operation_param,
collection='dns.managedZoneOperations')
return operations.WaitFor(
self.version, operation_ref,
'Updating managed zone [{}]'.format(zone_ref.Name()))

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API client library for Cloud DNS operatoins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.api_lib.util import waiter
class Poller(waiter.OperationPoller):
"""Manages a longrunning Operations.
See https://cloud.google.com/speech/reference/rpc/google.longrunning
"""
def __init__(self, operations_client, api_version='v1'):
"""Sets up poller for dns operations.
Args:
operations_client: Client, client for retrieving information about
ongoing operation.
api_version: Cloud DNS api version this poller should use.
"""
self.operations_client = operations_client
self.api_version = api_version
def IsDone(self, operation):
"""Overrides."""
done_enum = (
self.operations_client.messages.Operation.StatusValueValuesEnum.DONE
if self.api_version == 'v2' or self.api_version == 'dev'
else self.operations_client.messages.Operation.StatusValueValuesEnum.done
)
if operation.status == done_enum:
return True
return False
def Poll(self, operation_ref):
"""Overrides.
Args:
operation_ref: googlecloudsdk.core.resources.Resource.
Returns:
fetched operation message.
"""
return self.operations_client.Get(operation_ref)
def GetResult(self, operation):
"""Overrides.
Args:
operation: api_name_messages.Operation.
Returns:
result of result_service.Get request.
"""
return operation.zoneContext.newValue
def WaitFor(version, operation_ref, message, location=None):
operation_poller = Poller(Client.FromApiVersion(version, location), version)
return waiter.WaitFor(operation_poller, operation_ref, message)
class Client(object):
"""API client for Cloud DNS operations."""
_API_NAME = 'dns'
def __init__(self, version, client, messages=None, location=None):
self.version = version
self.client = client
self._service = self.client.managedZoneOperations
self.messages = messages or client.MESSAGES_MODULE
self.location = location
@classmethod
def FromApiVersion(cls, version, location=None):
return cls(
version, util.GetApiClient(version), messages=None, location=location)
def Get(self, operation_ref):
request = self.messages.DnsManagedZoneOperationsGetRequest(
operation=operation_ref.Name(),
managedZone=operation_ref.managedZone,
project=operation_ref.project)
if self.location:
request.location = self.location
return self._service.Get(request)
def List(self, zone_ref, limit=None):
request = self.messages.DnsManagedZoneOperationsListRequest(
managedZone=zone_ref.Name(),
project=zone_ref.project)
if self.location:
request.location = self.location
return list_pager.YieldFromList(
self._service, request, limit=limit, field='operations')

View File

@@ -0,0 +1,52 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of shared DNS record types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from dns import rdatatype
from googlecloudsdk.api_lib.dns import svcb_stub
# Extended (i.e. Cloud DNS specific/internal) record types supported by Cloud
# DNS. Note that since these are internal to Cloud DNS, they do not have a wire
# value like the standard types below.
CLOUD_DNS_EXTENDED_TYPES = frozenset(['ALIAS'])
# Standard record types supported by Cloud DNS. See
# https://cloud.google.com/dns/docs/overview#supported_dns_record_types
SUPPORTED_TYPES = frozenset((
rdatatype.A,
rdatatype.AAAA,
rdatatype.CAA,
rdatatype.CNAME,
rdatatype.DNSKEY,
rdatatype.DS,
svcb_stub.HTTPS, # Replace after updating to dnspython 2.x.
rdatatype.IPSECKEY,
rdatatype.MX,
rdatatype.NAPTR,
rdatatype.NS,
rdatatype.PTR,
rdatatype.SOA,
rdatatype.SPF,
rdatatype.SRV,
svcb_stub.SVCB, # Replace after updating to dnspython 2.x.
rdatatype.SSHFP,
rdatatype.TLSA,
rdatatype.TXT,
))

View File

@@ -0,0 +1,804 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for record-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import dataclasses
import re
from typing import Any, Collection, Mapping
from dns import rdatatype
from googlecloudsdk.api_lib.dns import import_util
from googlecloudsdk.api_lib.dns import record_types
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import resources
import ipaddr
class UnsupportedRecordType(exceptions.Error):
"""Unsupported record-set type."""
class ForwardingRuleWithoutHealthCheck(exceptions.Error):
"""Forwarding rules specified without enabling health check."""
class HealthCheckWithoutForwardingRule(exceptions.Error):
"""Health check enabled but no forwarding rules present."""
class ExternalEndpointsWithoutHealthCheck(exceptions.Error):
"""External endpoints specified without enabling health check."""
class HealthCheckWithoutExternalEndpoints(exceptions.Error):
"""Health check enabled but no external endpoints present."""
class ForwardingRuleNotFound(exceptions.Error):
"""Either the forwarding rule doesn't exist, or multiple forwarding rules present with the same name - across different regions."""
class UnsupportedLoadBalancingScheme(exceptions.Error):
"""Unsupported load balancing scheme."""
class EitherWeightOrLocationSpecified(exceptions.Error):
"""The Routing policy item should have either weight or location specified depending on the routing policy type."""
class HealthCheckOnlyWithRoutingPolicyItem(exceptions.Error):
"""The internet health check flag should be set only with routing policy item and not the routing policy data."""
class HealthCheckOnlyWithExternalEndpoints(exceptions.Error):
"""The internet health check flag should be set iff thre are external endpoints."""
class HealthCheckOnlyForARecordType(exceptions.Error):
"""The health check flags should be set only for A/AAAA record type."""
@dataclasses.dataclass(frozen=True)
class RoutingPolicyItem:
"""A routing policy item."""
item_key: str
routing_policy_data: 'RoutingPolicyData'
@dataclasses.dataclass(frozen=True)
class RoutingPolicyData:
"""A routing policy data.
Includes the rrdata, health checked public ips, and health checked internal
load balancers.
"""
rrdatas: Collection[str]
health_checked_ips: Collection[str]
internal_load_balancers: Collection[str]
def _TryParseRRTypeFromString(type_str):
"""Tries to parse the rrtype wire value from the given string.
Args:
type_str: The record type as a string (e.g. "A", "MX"...).
Raises:
UnsupportedRecordType: If given record-set type is not supported
Returns:
The wire value rrtype as an int or rdatatype enum.
"""
rd_type = rdatatype.from_text(type_str)
if rd_type not in record_types.SUPPORTED_TYPES:
raise UnsupportedRecordType('Unsupported record-set type [%s]' % type_str)
return rd_type
def GetLoadBalancerTarget(forwarding_rule, api_version, project):
"""Creates and returns a LoadBalancerTarget for the given forwarding rule name.
Args:
forwarding_rule: The name of the forwarding rule followed by '@' followed by
the scope of the forwarding rule.
api_version: [str], the api version to use for creating the RecordSet.
project: The GCP project where the forwarding_rule exists.
Raises:
ForwardingRuleNotFound: Either the forwarding rule doesn't exist, or
multiple forwarding rules present with the same name - across different
regions.
UnsupportedLoadBalancingScheme: The requested load balancer uses a load
balancing scheme that is not supported by Cloud DNS Policy Manager.
Returns:
LoadBalancerTarget, the load balancer target for the given forwarding rule.
"""
compute_client = apis.GetClientInstance('compute', 'v1')
compute_messages = apis.GetMessagesModule('compute', 'v1')
dns_messages = apis.GetMessagesModule('dns', api_version)
load_balancer_target = apis.GetMessagesModule(
'dns', api_version).RRSetRoutingPolicyLoadBalancerTarget()
load_balancer_target.project = project
load_balancer_type = ''
if len(forwarding_rule.split('@')) == 2:
name, scope = forwarding_rule.split('@')
if scope == 'global':
config = compute_client.globalForwardingRules.Get(
compute_messages.ComputeGlobalForwardingRulesGetRequest(
project=project, forwardingRule=name
)
)
else:
load_balancer_target.region = scope
config = compute_client.forwardingRules.Get(
compute_messages.ComputeForwardingRulesGetRequest(
project=project, forwardingRule=name, region=scope
)
)
if config is None:
raise ForwardingRuleNotFound(
"Either the forwarding rule doesn't exist, or multiple forwarding "
'rules are present with the same name - across different regions.'
)
else:
try:
config = GetLoadBalancerConfigFromUrl(
compute_client, compute_messages, forwarding_rule
)
project_match = re.match(r'.*/projects/([^/]+)/.*', config.selfLink)
load_balancer_target.project = project_match.group(1)
if config.region:
# region returned in the response is the url of the form:
# https://www.googleapis.com/compute/v1/projects/project/regions/region
region_match = re.match(r'.*/regions/(.*)$', config.region)
load_balancer_target.region = region_match.group(1)
except (
resources.WrongResourceCollectionException,
resources.RequiredFieldOmittedException,
):
# This means the forwarding rule was specified as just a name.
regions = [
item.name for item in compute_client.regions.List(
compute_messages.ComputeRegionsListRequest(project=project)).items
]
configs = []
for region in regions:
configs.extend(
compute_client.forwardingRules.List(
compute_messages.ComputeForwardingRulesListRequest(
filter=('name = %s' % forwarding_rule),
project=project,
region=region)).items)
configs.extend(
compute_client.globalForwardingRules.List(
compute_messages.ComputeGlobalForwardingRulesListRequest(
filter='name = %s' % forwarding_rule, project=project
)
).items
)
if not configs:
raise ForwardingRuleNotFound('The forwarding rule %s was not found.' %
forwarding_rule)
if len(configs) > 1:
raise ForwardingRuleNotFound(
'There are multiple forwarding rules present with the same name '
'across different regions. Specify the intended region along with '
'the rule in the format: forwardingrulename@region.'
)
config = configs[0]
if config.region:
# region returned in the response is the url of the form:
# https://www.googleapis.com/compute/v1/projects/project/regions/region
region_match = re.match(r'.*/regions/(.*)$', config.region)
load_balancer_target.region = region_match.group(1)
# L4 ILBs will have a backend service and load_balancing_scheme=INTERNAL.
if (
config.loadBalancingScheme
== compute_messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum(
'INTERNAL'
)
):
if config.backendService:
load_balancer_type = 'regionalL4ilb'
else:
raise UnsupportedLoadBalancingScheme(
'Network Passthrough Internal Load Balancers must have a backend'
' service.'
)
# L7 ILBs will have a HTTPx proxy and load_balancing_scheme=INTERNAL_MANAGED.
elif (
config.loadBalancingScheme
== compute_messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum(
'INTERNAL_MANAGED'
)
and (
'/targetHttpProxies/' in config.target
or '/targetHttpsProxies/' in config.target
)
):
if '/regions/' in config.target:
load_balancer_type = 'regionalL7ilb'
else:
load_balancer_type = 'globalL7ilb'
load_balancer_target.ipAddress = config.IPAddress
compute_tcp_enum = compute_messages.ForwardingRule.IPProtocolValueValuesEnum(
'TCP'
)
ip_protocol = 'tcp' if config.IPProtocol == compute_tcp_enum else 'udp'
load_balancer_target.networkUrl = config.network
if config.allPorts:
load_balancer_target.port = '80' # Any random port
elif not config.ports:
load_balancer_target.port = config.portRange.split('-')[0]
else:
load_balancer_target.port = config.ports[0]
if api_version in ['dev', 'v2']:
load_balancer_type = util.CamelCaseToSnakeCase(load_balancer_type)
ip_protocol = util.CamelCaseToSnakeCase(ip_protocol)
load_balancer_target.ipProtocol = dns_messages.RRSetRoutingPolicyLoadBalancerTarget.IpProtocolValueValuesEnum(
ip_protocol
)
if load_balancer_type:
load_balancer_target.loadBalancerType = dns_messages.RRSetRoutingPolicyLoadBalancerTarget.LoadBalancerTypeValueValuesEnum(
load_balancer_type
)
return load_balancer_target
def GetLoadBalancerConfigFromUrl(
compute_client, compute_messages, forwarding_rule
):
"""Attempts to fetch the configuration for the given forwarding rule.
If forwarding_rule is not the self_link for a forwarding rule,
one of resources.RequiredFieldOmittedException or
resources.RequiredFieldOmittedException will be thrown, which must be handled
by the caller.
Args:
compute_client: The configured GCE client for this invocation
compute_messages: The configured GCE API protobufs for this invocation
forwarding_rule: The (presumed) selfLink for a GCE forwarding rule
Returns:
ForwardingRule, the forwarding rule configuration specified by
forwarding_rule
"""
try:
resource = resources.REGISTRY.Parse(
forwarding_rule, collection='compute.forwardingRules'
).AsDict()
return compute_client.forwardingRules.Get(
compute_messages.ComputeForwardingRulesGetRequest(
project=resource['project'],
region=resource['region'],
forwardingRule=resource['forwardingRule'],
)
)
except (
resources.WrongResourceCollectionException,
resources.RequiredFieldOmittedException,
):
resource = resources.REGISTRY.Parse(
forwarding_rule, collection='compute.globalForwardingRules'
).AsDict()
return compute_client.globalForwardingRules.Get(
compute_messages.ComputeGlobalForwardingRulesGetRequest(
project=resource['project'],
forwardingRule=resource['forwardingRule'],
)
)
def GetHealthCheckSelfLink(health_check: str, project: str):
"""Returns the self link for the given health check."""
return resources.REGISTRY.Parse(
health_check,
collection='compute.healthChecks',
params={'project': project},
).SelfLink()
def IsIPv4(ip: str) -> bool:
"""Returns True if ip is an IPv4."""
try:
ipaddr.IPv4Address(ip)
return True
except ValueError:
return False
def IsIPv6(ip: str) -> bool:
"""Returns True if ip is an IPv6."""
try:
ipaddr.IPv6Address(ip)
return True
except ValueError:
return False
def SplitItemByDelimiter(
item: Mapping[str, Any], key: str, delimiter: str
) -> Collection[str]:
"""Splits an item by a delimiter."""
return (
item.get(key).split(delimiter)
if item.get(key)
else []
)
def ParseRoutingPolicy(
args: arg_parsers.ArgDict,
item: Mapping[str, Any],
quoted_text: bool,
) -> RoutingPolicyItem:
"""Parses the routing policy from the given args.
Args:
args: The arguments to use to parse the routing policy.
item: The routing policy item to parse.
quoted_text: [bool], whether to quote the rrdatas.
Returns:
RoutingPolicyItem, containing the parsed routing policy.
item_key: The value of the routing policy.
rrdatas: The rrdatas for the routing policy.
health_checked_ips: The health checked ips for the routing policy.
internal_load_balancers: The internal load balancers for the routing policy
item.
Raises:
EitherWeightOrLocationSpecified: The Routing policy item should have either
weight or location specified depending on the routing policy type.
ForwardingRuleWithoutHealthCheck: Forwarding rules specified without
enabling health check.
ExternalEndpointsWithoutHealthCheck: External endpoints specified without
enabling health check.
HealthCheckOnlyWithExternalEndpoints: The internet health check flag should
be
set iff thre are external endpoints.
"""
routing_policy_type = args.routing_policy_type
key = ''
is_routing_policy_item = False
rrtype_supports_health_checking = args.type == 'A' or args.type == 'AAAA'
if routing_policy_type == 'WRR':
key = 'weight'
is_routing_policy_item = args.IsSpecified('routing_policy_item')
if is_routing_policy_item and item.get('location') is not None:
raise EitherWeightOrLocationSpecified(
'Weighted round robin routing policies should only specify the item'
' weight.'
)
elif routing_policy_type == 'GEO':
key = 'location'
is_routing_policy_item = args.IsSpecified('routing_policy_item')
if is_routing_policy_item and item.get('weight') is not None:
raise EitherWeightOrLocationSpecified(
'Geolocation routing policies should only specify the item location.'
)
elif routing_policy_type == 'FAILOVER':
is_routing_policy_item = args.IsSpecified('routing_policy_backup_item')
key = 'location'
# Failover is only valid for A/AAAA
rrtype_supports_health_checking = True
if is_routing_policy_item:
item_key = item.get(key)
routing_policy_data = ParseRoutingPolicyItem(
item, rrtype_supports_health_checking
)
else:
item_key = item['key']
routing_policy_data = ParseRoutingPolicyData(
item['values'], rrtype_supports_health_checking
)
rrdatas = routing_policy_data.rrdatas
health_checked_ips = routing_policy_data.health_checked_ips
internal_load_balancers = routing_policy_data.internal_load_balancers
if quoted_text:
for i, datum in enumerate(rrdatas):
rrdatas[i] = import_util.QuotedText(datum)
# Validate the lists
# Public Policy
if health_checked_ips and not args.health_check:
raise ExternalEndpointsWithoutHealthCheck(
'Specifying external_endpoints enables health checking. '
'If this is intended, set --health-check.'
)
if (
hasattr(args, 'health_check')
and args.health_check
and internal_load_balancers
):
raise HealthCheckOnlyWithExternalEndpoints(
'--health-check cannot be specified alongside internal load balancers.'
)
# Private Policy
if internal_load_balancers and not args.enable_health_checking:
raise ForwardingRuleWithoutHealthCheck(
'Specifying a forwarding rule enables health checking. '
'If this is intended, set --enable-health-checking.'
)
if args.enable_health_checking and health_checked_ips:
raise HealthCheckOnlyWithExternalEndpoints(
'When --enable-health-checking is specified you cannot specify'
' health checked ips.'
)
return RoutingPolicyItem(
item_key, routing_policy_data
)
def ParseRoutingPolicyItem(
item: Mapping[str, Any], rrtype_supports_health_checking: bool
) -> RoutingPolicyData:
"""Parses the routing policy item from the given item.
Args:
item: The routing policy item to parse.
rrtype_supports_health_checking: [bool], Is the record type A or AAAA.
Returns:
rrdatas: The rrdatas for the routing policy item.
health_checked_ips: The health checked ips for the routing policy item.
internal_load_balancers: The internal load balancers for the routing policy
item.
"""
health_checked_ips = SplitItemByDelimiter(item, 'external_endpoints', ';')
for ip in health_checked_ips:
if not IsIpAddress(ip):
raise arg_parsers.ArgumentTypeError(
'Each health checked IP should be an IP address.'
)
internal_load_balancers = SplitItemByDelimiter(
item, 'internal_load_balancers', ';'
)
for lb in internal_load_balancers:
if not IsForwardingRule(lb):
raise arg_parsers.ArgumentTypeError(
'Each internal load balancer should be in the format of'
' forwarding rule name optionally followed by its scope.'
)
rrdatas = SplitItemByDelimiter(item, 'rrdatas', ';')
if not rrtype_supports_health_checking:
if internal_load_balancers or health_checked_ips:
raise arg_parsers.ArgumentTypeError(
'Routing policy items for this record type can only specify rrdatas.'
)
else:
for rdata in rrdatas:
if not IsIpAddress(rdata):
raise arg_parsers.ArgumentTypeError(
'Each rrdata should be an IP address.'
)
return RoutingPolicyData(rrdatas, health_checked_ips, internal_load_balancers)
def ParseRoutingPolicyData(
data: str, rrtype_supports_health_checking: bool
) -> RoutingPolicyData:
"""Parses the routing policy data from the given data.
Args:
data: The routing policy data to parse.
rrtype_supports_health_checking: [bool], Is the record type A or AAAA.
Returns:
rrdatas: The rrdatas for the routing policy data.
health_checked_ips: The health checked ips for the routing policy data.
Currently empty.
internal_load_balancers: The internal load balancers for the routing policy
data.
"""
rrdatas = []
internal_load_balancers = []
for val in data.split(','):
if IsIpAddress(val):
rrdatas.append(val)
elif IsForwardingRule(val):
internal_load_balancers.append(val)
# For A/AAAA, we only support IP address or a forwarding rule name.
elif rrtype_supports_health_checking:
raise arg_parsers.ArgumentTypeError(
'Each policy rdata item should either be an IP address or a'
' forwarding rule name optionally followed by its scope.'
)
else:
# We merge the rrdatas and internal load balancers later on.
internal_load_balancers.append(val)
if not rrtype_supports_health_checking:
# merge the rrdaras and internal load balancers.
rrdatas += internal_load_balancers
internal_load_balancers = []
# Return empty health_checked_ips for now.
return RoutingPolicyData(rrdatas, [], internal_load_balancers)
def IsForwardingRule(forwarding_rule: str) -> bool:
"""Returns True if forwarding_rule is a forwarding rule."""
return len(forwarding_rule.split('@')) == 2 or (
len(forwarding_rule.split('@')) == 1 and not IsIpAddress(forwarding_rule)
)
def IsIpAddress(ip: str) -> bool:
"""Returns True if IP is an IPv4 or IPv6."""
return len(ip.split('@')) == 1 and (IsIPv4(ip) or IsIPv6(ip))
def CreateRecordSetFromArgs(
args,
project,
api_version='v1',
):
"""Creates and returns a record-set from the given args.
Args:
args: The arguments to use to create the record-set.
project: The GCP project where these resources are to be created.
api_version: [str], the api version to use for creating the RecordSet.
Raises:
UnsupportedRecordType: If given record-set type is not supported
ForwardingRuleWithoutHealthCheck: If forwarding rules are specified without
enabling health check.
ForwardingRuleNotFound: Either the forwarding rule doesn't exist, or
multiple forwarding rules present with the same name - across different
regions.
HealthCheckWithoutForwardingRule: Health check enabled but no forwarding
rules present.
ExternalEndpointsWithoutHealthCheck: External endpoints specified without
enabling health check.
HealthCheckWithoutExternalEndpoints: Health check enabled but no external
endpoints present.
EitherWeightOrLocationSpecified: The Routing policy item should have either
weight or location specified depending on the routing policy type.
HealthCheckOnlyWithRoutingPolicyItem: The internet health check flag should
be
set only with routing policy item and not the routing policy data.
HealthCheckOnlyWithExternalEndpoints: The internet health check flag should
be
set iff thre are external endpoints.
HealthCheckOnlyForARecordType: The health check flags should be set only for
A/AAAA record type.
Returns:
ResourceRecordSet, the record-set created from the given args.
"""
messages = apis.GetMessagesModule('dns', api_version)
if args.type in record_types.CLOUD_DNS_EXTENDED_TYPES:
# Extended records are internal to Cloud DNS, so don't have wire values.
rd_type = rdatatype.NONE
else:
rd_type = _TryParseRRTypeFromString(args.type)
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = util.AppendTrailingDot(args.name)
record_set.ttl = args.ttl
record_set.type = args.type
includes_forwarding_rules = False
includes_external_endpoints = False
if args.type != 'A' and args.type != 'AAAA':
if (hasattr(args, 'health_check') and args.health_check) or (
hasattr(args, 'enable_health_checking') and args.enable_health_checking
):
raise HealthCheckOnlyForARecordType(
'--health-check or --enable-health-checking can only be set for'
' A/AAAA record type.'
)
if args.rrdatas:
record_set.rrdatas = args.rrdatas
if rd_type is rdatatype.TXT or rd_type is rdatatype.SPF:
record_set.rrdatas = [
import_util.QuotedText(datum) for datum in args.rrdatas
]
elif args.routing_policy_type == 'WRR' or args.routing_policy_type == 'GEO':
if args.routing_policy_type == 'WRR':
record_set.routingPolicy = messages.RRSetRoutingPolicy(
wrr=messages.RRSetRoutingPolicyWrrPolicy(items=[])
)
else:
record_set.routingPolicy = messages.RRSetRoutingPolicy(
geo=messages.RRSetRoutingPolicyGeoPolicy(items=[])
)
if args.enable_geo_fencing:
record_set.routingPolicy.geo.enableFencing = args.enable_geo_fencing
if hasattr(args, 'health_check') and args.health_check:
if args.IsSpecified('routing_policy_data'):
raise HealthCheckOnlyWithRoutingPolicyItem(
'--health-check can only be set alongside --routing-policy-item.'
)
items = (
args.routing_policy_item
if args.IsSpecified('routing_policy_item')
else args.routing_policy_data
)
for item in items:
parsed_routing_policy = ParseRoutingPolicy(
args,
item,
rd_type is rdatatype.TXT or rd_type is rdatatype.SPF,
)
val = parsed_routing_policy.item_key
rrdatas = parsed_routing_policy.routing_policy_data.rrdatas
health_checked_ips = (
parsed_routing_policy.routing_policy_data.health_checked_ips
)
internal_load_balancers = (
parsed_routing_policy.routing_policy_data.internal_load_balancers
)
if internal_load_balancers:
# At least one forwarding rule is specified
includes_forwarding_rules = True
if health_checked_ips:
# At least one external endpoint is specified
includes_external_endpoints = True
targets = [
GetLoadBalancerTarget(config, api_version, project)
for config in internal_load_balancers
]
health_checked_targets = messages.RRSetRoutingPolicyHealthCheckTargets()
if targets:
health_checked_targets.internalLoadBalancers = targets
if health_checked_ips:
health_checked_targets.externalEndpoints = health_checked_ips
if args.routing_policy_type == 'WRR':
record_set.routingPolicy.wrr.items.append(
messages.RRSetRoutingPolicyWrrPolicyWrrPolicyItem(
weight=float(val),
rrdatas=rrdatas,
healthCheckedTargets=health_checked_targets,
)
)
else:
record_set.routingPolicy.geo.items.append(
messages.RRSetRoutingPolicyGeoPolicyGeoPolicyItem(
location=val,
rrdatas=rrdatas,
healthCheckedTargets=health_checked_targets,
)
)
elif args.routing_policy_type == 'FAILOVER':
if not args.enable_health_checking and not args.health_check:
raise ForwardingRuleWithoutHealthCheck(
'Failover policy needs to have health checking enabled. '
'Set --enable-health-checking or --health-check.'
)
record_set.routingPolicy = messages.RRSetRoutingPolicy(
primaryBackup=messages.RRSetRoutingPolicyPrimaryBackupPolicy(
primaryTargets=messages.RRSetRoutingPolicyHealthCheckTargets(),
backupGeoTargets=messages.RRSetRoutingPolicyGeoPolicy(items=[]),
)
)
if args.backup_data_trickle_ratio:
record_set.routingPolicy.primaryBackup.trickleTraffic = (
args.backup_data_trickle_ratio
)
if hasattr(args, 'health_check') and args.health_check:
if args.IsSpecified('routing_policy_backup_data'):
raise HealthCheckOnlyWithRoutingPolicyItem(
'--health-check can only be set alongside'
' --routing-policy-backup-item.'
)
for ip_address in args.routing_policy_primary_data:
if IsIpAddress(ip_address):
record_set.routingPolicy.primaryBackup.primaryTargets.externalEndpoints.append(
ip_address
)
else:
raise arg_parsers.ArgumentTypeError(
'The primary data should be a list of IP addresses.'
)
includes_external_endpoints = True
elif args.enable_health_checking:
for target in args.routing_policy_primary_data:
if IsForwardingRule(target):
record_set.routingPolicy.primaryBackup.primaryTargets.internalLoadBalancers.append(
GetLoadBalancerTarget(target, api_version, project)
)
else:
raise arg_parsers.ArgumentTypeError(
'The primary data should be a list of forwarding rules.'
)
includes_forwarding_rules = True
if args.routing_policy_backup_data_type == 'GEO':
if args.enable_geo_fencing:
record_set.routingPolicy.primaryBackup.backupGeoTargets.enableFencing = (
args.enable_geo_fencing
)
items = (
args.routing_policy_backup_item
if args.IsSpecified('routing_policy_backup_item')
else args.routing_policy_backup_data
)
for item in items:
parsed_routing_policy = ParseRoutingPolicy(
args,
item,
False,
)
val = parsed_routing_policy.item_key
rrdatas = parsed_routing_policy.routing_policy_data.rrdatas
health_checked_ips = (
parsed_routing_policy.routing_policy_data.health_checked_ips
)
internal_load_balancers = (
parsed_routing_policy.routing_policy_data.internal_load_balancers
)
targets = [
GetLoadBalancerTarget(config, api_version, project)
for config in internal_load_balancers
]
health_checked_targets = messages.RRSetRoutingPolicyHealthCheckTargets()
if targets:
health_checked_targets.internalLoadBalancers = targets
if health_checked_ips:
health_checked_targets.externalEndpoints = health_checked_ips
record_set.routingPolicy.primaryBackup.backupGeoTargets.items.append(
messages.RRSetRoutingPolicyGeoPolicyGeoPolicyItem(
location=val,
rrdatas=rrdatas,
healthCheckedTargets=health_checked_targets,
)
)
if hasattr(args, 'health_check') and args.health_check:
record_set.routingPolicy.healthCheck = GetHealthCheckSelfLink(
args.health_check, project
)
if (
not includes_forwarding_rules
and hasattr(args, 'enable_health_checking')
and args.enable_health_checking
):
raise HealthCheckWithoutForwardingRule(
'--enable-health-check is set, but no forwarding rules are provided. '
'Either remove the --enable-health-check flag, or provide the '
'forwarding rule names instead of IP addresses for the rules to be '
'health checked.'
)
if (
not includes_external_endpoints
and hasattr(args, 'health_check')
and args.health_check
):
raise HealthCheckWithoutExternalEndpoints(
'--health-check is set, but no external endpoints are provided. '
'Either remove the --health-check flag, or provide the '
'external endpoints to be health checked.'
)
return record_set

View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub implementation of SVCB and HTTPS records.
This module can be removed after updating to dnspython 2.x, which has built-in
support for these types. (dnspython 2.x only supports Python 3, but this
codebase requires support for Python 2, so it is still using dnspython 1.x.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from dns import rdata
from dns.name import Name
from dns.tokenizer import Tokenizer
class _StubSVCB(rdata.Rdata):
"""Stub implementation of SVCB RDATA.
Wire format support is not needed here, so only trivial storage of the
presentation format is implemented.
"""
def __init__(self, rdclass, rdtype, priority, target, params):
# type: (int, int, int, Name, list[str]) -> None
super(_StubSVCB, self).__init__(rdclass, rdtype)
self._priority = priority
self._target = target
self._params = params
def to_text(self, origin=None, relativize=True, **kwargs):
tokens = [
'%d' % self._priority,
self._target.choose_relativity(origin, relativize).to_text(),
] + self._params
return ' '.join(tokens)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
# type: (int, int, Tokenizer) -> _StubSVCB
priority = tok.get_uint16()
target = tok.get_name(origin).choose_relativity(origin, relativize)
params = [] # type: list[str]
while True:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
params.append(token.value)
return cls(rdclass, rdtype, priority, target, params)
class _FakeModule:
"""Fake module corresponding to dns.rdtypes.IN.SVCB.
This is needed due to the calling convention of rdata.register_type().
"""
SVCB = _StubSVCB
HTTPS = _StubSVCB
SVCB = 64
HTTPS = 65
def register():
try:
rdata.register_type(_FakeModule, SVCB, 'SVCB')
rdata.register_type(_FakeModule, HTTPS, 'HTTPS')
except rdata.RdatatypeExists:
# Either this registration has already run, or we are using dnspython 2.1+,
# which already implements these types.
pass

View File

@@ -0,0 +1,155 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for record-set transactions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import files
DEFAULT_PATH = 'transaction.yaml'
class Error(exceptions.Error):
"""Base exception for all transaction errors."""
class TransactionFileAlreadyExists(Error):
"""Transaction file already exists."""
class UnableToAccessTransactionFile(Error):
"""Unable to access transaction file."""
class TransactionFileNotFound(Error):
"""Transaction file not found."""
class CorruptedTransactionFileError(Error):
def __init__(self):
super(CorruptedTransactionFileError, self).__init__(
'Corrupted transaction file.\n\n'
'Please abort and start a new transaction.')
class RecordDoesNotExist(Error):
"""Specified record-set does not exist."""
def WriteToYamlFile(yaml_file, change):
"""Writes the given change in yaml format to the given file.
Args:
yaml_file: file, File into which the change should be written.
change: Change, Change to be written out.
"""
resource_printer.Print([change], print_format='yaml', out=yaml_file)
def _RecordSetsFromDictionaries(messages, record_set_dictionaries):
"""Converts list of record-set dictionaries into list of ResourceRecordSets.
Args:
messages: Messages object for the API with Record Sets to be created.
record_set_dictionaries: [{str:str}], list of record-sets as dictionaries.
Returns:
list of ResourceRecordSets equivalent to given list of yaml record-sets
"""
record_sets = []
for record_set_dict in record_set_dictionaries:
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = record_set_dict['name']
record_set.ttl = record_set_dict['ttl']
record_set.type = record_set_dict['type']
record_set.rrdatas = record_set_dict['rrdatas']
record_sets.append(record_set)
return record_sets
def ChangeFromYamlFile(yaml_file, api_version='v1'):
"""Returns the change contained in the given yaml file.
Args:
yaml_file: file, A yaml file with change.
api_version: [str], the api version to use for creating the change object.
Returns:
Change, the change contained in the given yaml file.
Raises:
CorruptedTransactionFileError: if the record_set_dictionaries are invalid
"""
messages = apis.GetMessagesModule('dns', api_version)
try:
change_dict = yaml.load(yaml_file) or {}
except yaml.YAMLParseError:
raise CorruptedTransactionFileError()
if (change_dict.get('additions') is None or
change_dict.get('deletions') is None):
raise CorruptedTransactionFileError()
change = messages.Change()
change.additions = _RecordSetsFromDictionaries(
messages, change_dict['additions'])
change.deletions = _RecordSetsFromDictionaries(
messages, change_dict['deletions'])
return change
class TransactionFile(object):
"""Context for reading/writing from/to a transaction file."""
def __init__(self, trans_file_path, mode='r'):
if not os.path.isfile(trans_file_path):
raise TransactionFileNotFound(
'Transaction not found at [{0}]'.format(trans_file_path))
self.__trans_file_path = trans_file_path
try:
if mode == 'r':
self.__trans_file = files.FileReader(trans_file_path)
elif mode == 'w':
self.__trans_file = files.FileWriter(trans_file_path)
else:
raise ValueError('Unrecognized mode [{}]'.format(mode))
except IOError as exp:
msg = 'Unable to open transaction [{0}] because [{1}]'
msg = msg.format(trans_file_path, exp)
raise UnableToAccessTransactionFile(msg)
def __enter__(self):
return self.__trans_file
def __exit__(self, typ, value, traceback):
self.__trans_file.close()
if typ is IOError or typ is yaml.Error:
msg = 'Unable to read/write transaction [{0}] because [{1}]'
msg = msg.format(self.__trans_file_path, value)
raise UnableToAccessTransactionFile(msg)

View File

@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for the dns tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
def AppendTrailingDot(name):
return name if not name or name.endswith('.') else name + '.'
# Camel case to snake case utils
_first_cap_re = re.compile('(.)([A-Z][a-z0-9]+)')
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
def CamelCaseToSnakeCase(name):
s1 = _first_cap_re.sub(r'\1_\2', name)
return _all_cap_re.sub(r'\1_\2', s1).upper()
def GetRegistry(version):
registry = resources.REGISTRY.Clone()
registry.RegisterApiByName('dns', version)
return registry
def GetApiFromTrack(track):
if track == base.ReleaseTrack.BETA:
return 'v1beta2'
if track == base.ReleaseTrack.ALPHA:
return 'v1alpha2'
if track == base.ReleaseTrack.GA:
return 'v1'
def GetApiClient(version):
return apis.GetClientInstance('dns', version)
# Prepare necessary parameters for registry to return the correct resource name.
def GetParamsForRegistry(version, args, parent=None):
params = {'project': properties.VALUES.core.project.GetOrFail}
if version == 'v2':
params['location'] = args.location
if parent is not None:
if parent == 'managedZones':
params['managedZone'] = args.zone
if parent == 'responsePolicies':
params['responsePolicy'] = args.response_policy
return params
def GetApiFromTrackAndArgs(track, args):
if args.IsSpecified('location'):
# Has specified a zone, use v2 api
return 'v2'
else:
# Has not specified a zone, use a v1 api depending on the version track.
return GetApiFromTrack(track)