feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,48 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for Managed Kafka arguments."""
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.apis import yaml_data
from googlecloudsdk.command_lib.util.concepts import concept_parsers
def AddSchemaRegistryArgToParser(parser):
"""Sets up an argument for the schema registry resource."""
schema_registry_data = yaml_data.ResourceYAMLData.FromPath(
'managed_kafka.schema_registry'
)
concept_parsers.ConceptParser.ForResource(
'schema_registry',
concepts.ResourceSpec.FromYaml(
schema_registry_data.GetData(), is_positional=True
),
'The schema registry resource.',
required=True,
).AddToParser(parser)
def AddSubjectArgToParser(parser):
"""Sets up an argument for the subject resource."""
subject_data = yaml_data.ResourceYAMLData.FromPath('managed_kafka.subject')
concept_parsers.ConceptParser.ForResource(
'subject',
concepts.ResourceSpec.FromYaml(
subject_data.GetData(), is_positional=True
),
'The subject resource.',
required=True,
flag_name_overrides={'schema-registry': '--registry'},
).AddToParser(parser)

View File

@@ -0,0 +1,425 @@
cpu:
api_field: cluster.capacityConfig.vcpuCount
arg_name: cpu
processor: googlecloudsdk.command_lib.managed_kafka.util:ValidateCPU
help_text: |
The number of vCPUs to provision for the cluster. The minimum is 3.
memory:
api_field: cluster.capacityConfig.memoryBytes
arg_name: memory
type: googlecloudsdk.core.util.scaled_integer:ParseInteger
help_text: |
The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB
per vCPU.
Ex. 1024Mi, 4Gi.
subnets:
arg_name: subnets
type: "googlecloudsdk.calliope.arg_parsers:ArgList:"
help_text: |
A comma-separated list of VPC subnets from which the cluster is accessible. Both broker and bootstrap server
IP addresses and DNS entries are automatically created in each subnet. Only one subnet per
network is allowed, and the subnet must be located in the same region as the cluster.
The project may differ. A minimum of 1 subnet is required. A maximum of 10 subnets can be
specified. Use commas to separate multiple subnets. The name of the subnet must be in the format
projects/``PROJECT_ID''/regions/``REGION''/subnetworks/``SUBNET''.
labels:
api_field: cluster.labels
arg_name: labels
metavar: KEY=VALUE
type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
help_text: |
List of label KEY=VALUE pairs to add.
Keys must start with a lowercase character and contain only hyphens (`-`),
underscores (```_```), lowercase characters, and numbers. Values must contain only
hyphens (`-`), underscores (```_```), lowercase characters, and numbers.
encryption-key:
api_field: cluster.gcpConfig.kmsKey
arg_name: encryption-key
help_text: |
The relative resource path of the Cloud KMS key to use for encryption in the form:
projects/``PROJECT_ID''/locations/``LOCATION''/keyRings/``KEY_RING''/cryptoKeys/``KEY''.
The key must be located in the same region as the cluster. The key cannot be changed once set.
auto-rebalance:
api_field: cluster.rebalanceConfig.mode
arg_name: auto-rebalance
action: store_true
help_text: |
Whether the automatic rebalancing is enabled. If automatic rebalancing
is enabled, topic partitions are rebalanced among brokers when the number of
CPUs in the cluster changes. Automatic rebalancing is enabled by default.
Use --no-auto-rebalance to disable this flag.
choices:
- arg_value: true
enum_value: AUTO_REBALANCE_ON_SCALE_UP
- arg_value: false
enum_value: NO_REBALANCE
mtls-ca-pools:
arg_name: mtls-ca-pools
type: "googlecloudsdk.calliope.arg_parsers:ArgList:"
help_text: |
A comma-separated list of CA pools from the Google Cloud Certificate Authority Service.
The root certificates of these CA pools will be installed in the truststore of each broker
in the cluster for use with mTLS. A maximum of 10 CA pools can be specified.
CA pools can be in a different project and region than the cluster.
This command overwrites the entire set of pools currently configured on the cluster.
If you want to add a new pool to an existing configuration, you must provide the full list of
both the old and new CA pools in the command.
Each CA pool must be in the format
projects/``PROJECT_ID''/locations/``LOCATION''/caPools/``CA_POOL''. Clear the CA pools
using the `--clear-mtls-ca-pools` flag.
clear-mtls-ca-pools:
arg_name: clear-mtls-ca-pools
action: store_true
help_text: |
Remove all the CA pools from the cluster. This will remove all root certificates
from the truststore of each broker in the cluster.
allow-broker-downscale-on-cluster-upscale:
api_field: cluster.updateOptions.allowBrokerDownscaleOnClusterUpscale
arg_name: allow-broker-downscale-on-cluster-upscale
action: store_true
default: null
help_text: |
If enabled, this setting allows an update operation that could significantly decrease the
per-broker vCPU and/or memory allocation, which can lead to reduced performance and
availability. By default, an update operation will fail if it results in a reduction of 10% or
more to the brokers' vCPU or memory allocation.
ssl-principal-mapping-rules:
api_field: cluster.tlsConfig.sslPrincipalMappingRules
arg_name: ssl-principal-mapping-rules
type: str
help_text: |
The rules for mapping mTLS certificate Distinguished Names (DNs) to shortened principal names
for Kafka ACLs. This flag corresponds exactly to the `ssl.principal.mapping.rules` broker config
and matches the format and syntax defined in the Apache Kafka documentation.
Setting or modifying this field will trigger a rolling restart of the Kafka brokers
to apply the change. An empty string means that the default Kafka behavior is used.
Example: "RULE:^CN=(.*?),OU=ServiceUsers.*$/$1@example.com/,DEFAULT"
broker-disk-size-gib:
api_field: cluster.brokerCapacityConfig.diskSizeGib
arg_name: broker-disk-size-gib
help_text: |
The amount of local disk to provision for each broker in Gibibytes. Minimum: 100 GiB.
# Trying to define this as a resouce causes test failures in yaml_command_schema_test so we define
# it as a flag instead.
kafka-cluster:
arg_name: kafka-cluster
help_text: |
The resource path of the Kafka cluster to connect to, or the name of the Kafka cluster to
connect to if the cluster is in the same project as the Connect cluster.
connect-cpu:
api_field: connectCluster.capacityConfig.vcpuCount
arg_name: cpu
processor: googlecloudsdk.command_lib.managed_kafka.util:ValidateCPU
help_text: |
The number of vCPUs to provision for the cluster. The minimum is 3.
connect-memory:
api_field: connectCluster.capacityConfig.memoryBytes
arg_name: memory
type: googlecloudsdk.core.util.scaled_integer:ParseInteger
help_text: |
The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB
per vCPU.
Ex. 1024Mi, 4Gi.
connect-labels:
api_field: connectCluster.labels
arg_name: labels
metavar: KEY=VALUE
type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
help_text: |
List of label KEY=VALUE pairs to add.
Keys must start with a lowercase character and contain only hyphens (`-`),
underscores (```_```), lowercase characters, and numbers. Values must contain only
hyphens (`-`), underscores (```_```), lowercase characters, and numbers.
clear-connect-labels:
arg_name: clear-labels
action: store_true
help_text: |
Remove all the labels from the connect cluster.
connect-encryption-key:
api_field: connectCluster.gcpConfig.kmsKey
arg_name: encryption-key
help_text: |
The relative resource path of the Cloud KMS key to use for encryption in the form:
projects/``PROJECT_ID''/locations/``LOCATION''/keyRings/``KEY_RING''/cryptoKeys/``KEY''.
The key must be located in the same region as the cluster. The key cannot be changed once set.
dns-name:
arg_name: dns-name
api_field: connectCluster.gcpConfig.accessConfig.networkConfigs.dnsDomainNames
repeated: true
action: append
help_text: |
DNS domain name from the subnet's network to be made visible to the Connect Cluster.
secret:
arg_name: secret
api_field: connectCluster.gcpConfig.secretPaths
repeated: true
action: append
help_text: |
Secrets to load into workers. Exact SecretVersions from Secret Manager must
be provided -- aliases are not supported. Up to 32 secrets may be loaded
into one cluster.
Format: projects/<project-id>/secrets/<secret-name>/versions/<version-id>
connect-primary-subnet:
arg_name: primary-subnet
api_field: connectCluster.gcpConfig.accessConfig.networkConfigs.primarySubnet
help_text: |
VPC subnet to make available to the Kafka Connect cluster. Structured
like: projects/{project}/regions/{region}/subnetworks/{subnet_id}.
The primary subnet is used to create a Private Service Connect (PSC) interface for the Kafka Connect workers.
It must be located in the same region as the Connect cluster.
connect-additional-subnet:
arg_name: additional-subnet
repeated: true
help_text: |
Additional subnet to make available to the Kafka Connect cluster. Structured
like: projects/{project}/regions/{region}/subnetworks/{subnet_id}.
# Custom action since we cannot use both "action: append and action.deprecated"
action: googlecloudsdk.command_lib.managed_kafka.util:AdditionalSubnetDeprecationBeforeAppendAction
partitions:
api_field: topic.partitionCount
arg_name: partitions
help_text: |
The number of partitions in a topic. You can increase the partition count for a topic, but you
cannot decrease it. Increasing partitions for a topic that uses a key might change how messages
are distributed.
replication-factor:
api_field: topic.replicationFactor
arg_name: replication-factor
help_text: |
The number of replicas of each partition. A replication factor of 3 is
recommended for high availability.
configs:
api_field: topic.configs
arg_name: configs
metavar: KEY=VALUE
type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
help_text: |
Configuration for the topic that are overridden from the cluster defaults.
The key of the map is a Kafka topic property name, for example:
`cleanup.policy=compact`,`compression.type=producer`. If you provide a map with a key that
already exists, only that configuration is updated. If the map contains a key that does not
exist, the entry is appended to the topic configuration.
connectCluster-configs:
arg_name: configs
api_field: connectCluster.config
metavar: KEY=VALUE
type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
help_text: |
Configuration for the connect cluster that are overridden from the cluster defaults.
The key of the map is a Kafka topic property name, for example:
`cleanup.policy=compact`,`compression.type=producer`.
connectCluster-config-file:
arg_name: config-file
metavar: JSON|YAML|FILE
type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
help_text: |
The path to the JSON or YAML file containing the configuration that are overridden
from the cluster or connector defaults. This also supports inline JSON or YAML.
connector-configs:
arg_name: configs
api_field: connector.configs
metavar: KEY=VALUE
type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
help_text: |
Configuration for the connector that are overridden from the connector defaults.
The key of the map is a Kafka topic property name, for example:
`cleanup.policy=compact`,`compression.type=producer`.
connector-config-file:
arg_name: config-file
metavar: JSON|YAML|FILE
type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
help_text: |
The path to the JSON or YAML file containing the configuration that are overridden
from the connector defaults. This also supports inline JSON or YAML.
task-restart-min-backoff:
api_field: connector.taskRestartPolicy.minimumBackoff
arg_name: task-restart-min-backoff
type: googlecloudsdk.core.util.times:ParseDuration
processor: googlecloudsdk.core.util.times:FormatDuration
help_text: |
The minimum amount of time to wait before retrying a failed task in seconds.
This sets a lower bound for the backoff delay. The default value is 60s.
See $ gcloud topic datetimes for information on duration formats.
task-restart-max-backoff:
api_field: connector.taskRestartPolicy.maximumBackoff
arg_name: task-restart-max-backoff
type: googlecloudsdk.core.util.times:ParseDuration
processor: googlecloudsdk.core.util.times:FormatDuration
help_text: |
The maximum amount of time to wait before retrying a failed task in seconds.
This sets an upper bound for the backoff delay. The default value is 1800s (30 minutes).
See $ gcloud topic datetimes for information on duration formats.
task-retry-disabled:
api_field: connector.taskRestartPolicy.taskRetryDisabled
arg_name: task-retry-disabled
type: bool
help_text: |
Disable default task retry policy.
clear-secrets:
arg_name: clear-secrets
action: store_true
help_text: |
Remove all the secrets from the connect cluster.
clear-dns-names:
arg_name: clear-dns-names
action: store_true
help_text: |
Remove all the DNS domain names for the connect cluster.
clear-configs:
arg_name: clear-configs
action: store_true
help_text: |
Remove all the configurations for the topic.
# TODO(b/336117815): Provide hard examples and external docs on this flag.
topics-file:
arg_name: topics-file
type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
metavar: JSON|YAML|FILE
help_text: |
The path to the JSON or YAML file containing the configuration of the topics to be updated for
the consumer group. This also supports inline JSON or YAML.
acl-entry:
arg_name: acl-entry
api_field: acl.aclEntries
help_text: |
An acl entry that configures access for a principal, for a specific operation on the acl's
resource pattern. This flag can be repeated.
``PRINCIPAL'' is the principal. Specified as Google Cloud account, with the Kafka StandardAuthorizer prefix
"User:". For example: "User:admin@project.iam.gserviceaccount.com".
Can be the wildcard "User:```*```" to refer to all users.
``OPERATION'' is the operation type. Allowed values are: ALL, READ, WRITE,
CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS,
IDEMPOTENT_WRITE.
``PERMISSION-TYPE'' is the permission type. Allowed values are: ALLOW, DENY.
``HOST'' is the host. Must be set to "```*```" for Managed Service for Apache Kafka.
Example acl-entry:
"principal=User:admin@project.iam.gserviceaccount.com,operation=ALL,permission-type=ALLOW,host=```*```"
type:
arg_dict:
flatten: false
spec:
- api_field: principal
arg_name: principal
type: str
required: true
- api_field: operation
arg_name: operation
type: str
required: true
- api_field: permissionType
arg_name: permission-type
type: str
required: true
- api_field: host
arg_name: host
type: str
required: true
acl-entries-from-file:
arg_name: acl-entries-from-file
api_field: acl
type: "googlecloudsdk.calliope.arg_parsers:FileContents:"
processor: googlecloudsdk.core.yaml:load
help_text: |
Path to a JSON or YAML file containing the acl entries to use in the acl.
etag:
arg_name: etag
api_field: acl.etag
type: str
required: true
help_text: |
etag returned in the response to a previous create or describe
command. The etag is used for concurrency control, to ensure that the
client and server agree on the current set of acl entries in the Kafka
cluster, before full replacement in the update command.
acl-entry-principal:
api_field: aclEntry.principal
arg_name: principal
type: str
required: true
help_text: |
The principal. Specified as Google Cloud account, with the Kafka StandardAuthorizer prefix
"User:". For example: "User:admin@project.iam.gserviceaccount.com".
Can be the wildcard "User:```*```" to refer to all users.
acl-entry-operation:
api_field: aclEntry.operation
arg_name: operation
type: str
required: true
help_text: |
The operation type. Allowed values are: ALL, READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE,
CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE.
See https://kafka.apache.org/documentation/#operations_resources_and_protocols
for the mapping of operations to Kafka protocols.
acl-entry-permission-type:
api_field: aclEntry.permissionType
arg_name: permission-type
type: str
default: "ALLOW"
help_text: |
The permission type. Allowed values are: ALLOW, DENY.
acl-entry-host:
api_field: aclEntry.host
arg_name: host
type: str
default: '*'
help_text: |
The host. Must be set to "```*```" for Managed Service for Apache Kafka.
full:
api_field: view
arg_name: full
action: store_true
default: null
help_text: |
Show detailed information about individual brokers, such as broker id and zone, as well as the
Kafka version running on the cluster.
choices:
- arg_value: true
enum_value: CLUSTER_VIEW_FULL
- arg_value: false
enum_value: CLUSTER_VIEW_BASIC

View File

@@ -0,0 +1,157 @@
project:
name: project
collection: managedkafka.projects
attributes:
- &project
parameter_name: projectsId
attribute_name: project
help: |
The project name.
property: core/project
location:
name: location
collection: managedkafka.projects.locations
attributes:
- *project
- &location
parameter_name: locationsId
attribute_name: location
help: |
ID of the location of the Managed Service for Apache Kafka resource. See
https://cloud.google.com/managed-service-for-apache-kafka/docs/locations for a list of supported
locations.
cluster:
name: cluster
collection: managedkafka.projects.locations.clusters
request_id_field: clusterId
attributes:
- *location
- &cluster
parameter_name: clustersId
attribute_name: cluster
help: |
The cluster name.
connect_cluster:
name: connect_cluster
collection: managedkafka.projects.locations.connectClusters
request_id_field: connectClusterId
attributes:
- *location
- &connect_cluster
parameter_name: connectClustersId
attribute_name: connect_cluster
help: |
The connect cluster name.
connector:
name: connector
collection: managedkafka.projects.locations.connectClusters.connectors
request_id_field: connectorId
attributes:
- *location
- *connect_cluster
- &connector
parameter_name: connectorsId
attribute_name: connector
help: |
The connector name.
topic:
name: topic
collection: managedkafka.projects.locations.clusters.topics
request_id_field: topicId
attributes:
- *location
- *cluster
- &topic
parameter_name: topicsId
attribute_name: topic
help: |
The topic name.
consumer_group:
name: consumer_group
collection: managedkafka.projects.locations.clusters.consumerGroups
request_id_field: consumerGroupId
attributes:
- *location
- *cluster
- &consumer_group
parameter_name: consumerGroupsId
attribute_name: consumer_group
help: |
The consumer group name.
acl:
name: acl
collection: managedkafka.projects.locations.clusters.acls
request_id_field: aclId
attributes:
- *location
- *cluster
- &acl
parameter_name: aclsId
attribute_name: acl
help: |
The acl name. Represents a single Kafka resource pattern for which the acl's entries apply in
the Kafka cluster, based on the structure of the acl ID. The acl ID must be structured like
one of the following:
For acls on the cluster:
cluster
For acls on a single resource within the cluster:
topic/{resource_name}
consumerGroup/{resource_name}
transactionalId/{resource_name}
For acls on all resources that match a prefix:
topicPrefixed/{resource_name}
consumerGroupPrefixed/{resource_name}
transactionalIdPrefixed/{resource_name}
For acls on all resources of a given type (i.e. the wildcard literal "*"):
allTopics (represents topic/*)
allConsumerGroups (represents consumerGroup/*)
allTransactionalIds (represents transactionalId/*)
operation:
name: operation
collection: managedkafka.projects.locations.operations
attributes:
- *location
- &operation
parameter_name: operationsId
attribute_name: operation
help: |
The operation name.
schema_registry:
name: schema_registry
collection: managedkafka.projects.locations.schemaRegistries
request_id_field: createSchemaRegistryRequest.schemaRegistryId
attributes:
- *project
- *location
- &schema_registry
parameter_name: schemaRegistriesId
attribute_name: schema_registry
help: |
The schema registry name.
subject:
name: subject
collection: managedkafka.projects.locations.schemaRegistries.subjects
request_id_field: createSubjectRequest.subjectId
attributes:
- *project
- *location
- *schema_registry
- &subject
parameter_name: subjectsId
attribute_name: subject
help: |
The subject name.

View File

@@ -0,0 +1,660 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library used to support Managed Service for Apache Kafka commands."""
import re
from apitools.base.py import encoding
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk import core
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
# Retrieve all message type for conversions from gcloud primitives to
# apitool types.
_MESSAGE = apis.GetMessagesModule("managedkafka", "v1")
SASL_PORT = "9092"
MTLS_PORT = "9192"
CONTEXTS_RESOURCE_PATH = "/contexts/"
SUBJECTS_RESOURCE_PATH = "/subjects/"
SUBJECTS_MODE_RESOURCE_PATH = "/mode/"
SUBJECTS_CONFIG_RESOURCE_PATH = "/config/"
def ValidateCPU(cpu):
"""Validate CPU >= 3."""
if cpu < 3:
raise exceptions.BadArgumentException("--cpu", "CPU must be at least 3")
return cpu
def PrepareUpdateWithSubnets(_, args, request):
"""Prepare the update request with the information from the subnet flag.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with the subnet.
"""
if not args.subnets:
return request
# The cluster is not created yet if only the subnet flag is set. This is
# because we don't formally map the subnet to the request the same way we do
# for other flags. Instead, subnets require special handling with the use of
# hooks.
if not request.cluster:
request.cluster = {}
subnet_update_mask = "gcpConfig.accessConfig.networkConfigs"
request.updateMask = AppendUpdateMask(request.updateMask, subnet_update_mask)
return MapSubnetsToNetworkConfig(_, args, request)
def PrepareUpdateWithCaPools(_, args, request):
"""Prepare the update request with the information from the mTLS CA pool flag.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with the CA pool.
"""
# If the user is clearing the CA pools, we need to add the update mask.
# This flag is guarded by a mutex and will not conflict with the ca pool flag.
if args.clear_mtls_ca_pools:
request.updateMask = AppendUpdateMask(
request.updateMask, "tlsConfig.trustConfig.casConfigs"
)
return request
# If the there are no CA pools to update, return the request as is.
if not args.mtls_ca_pools:
return request
# The cluster is not created yet if only the CA pool flag is set. This is
# because we don't formally map the CA pool to the request the same way we do
# for other flags. Instead, CA pools require special handling with the use of
# hooks.
if not request.cluster:
request.cluster = {}
ca_pool_update_mask = "tlsConfig.trustConfig.casConfigs"
request.updateMask = AppendUpdateMask(request.updateMask, ca_pool_update_mask)
return MapCaPoolsToCASConfig(_, args, request)
def AppendUpdateMask(update_mask, new_mask):
"""Handles appending a new mask to an existing mask.
Args:
update_mask: the existing update mask.
new_mask: the new mask to append.
Returns:
The fully appended update mask.
"""
update_mask = f"{update_mask},{new_mask}"
return update_mask if update_mask[0] != "," else update_mask[1:]
def MapSubnetsToNetworkConfig(_, args, request):
"""Maps the list of subnets from the flag to the API fields in the request.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with networkConfig in the JSON format.
"""
# Reference the existing GCP config if already created for the request.
if not request.cluster.gcpConfig:
request.cluster.gcpConfig = {}
request.cluster.gcpConfig.accessConfig = {"networkConfigs": []}
for subnet in args.subnets:
network_config = {"subnet": subnet}
request.cluster.gcpConfig.accessConfig.networkConfigs.append(
encoding.DictToMessage(network_config, _MESSAGE.NetworkConfig)
)
return request
def MapCaPoolsToCASConfig(_, args, request):
"""Maps the list of CA pools from the flag to the API fields in the request.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with CertificateAuthorityServiceConfig in the JSON
format.
"""
if not args.mtls_ca_pools:
return request
# Reference the existing CAS config if already created for the request.
if not request.cluster.tlsConfig:
request.cluster.tlsConfig = {}
request.cluster.tlsConfig.trustConfig = {"casConfigs": []}
for ca_pool in args.mtls_ca_pools:
cas_config = {"caPool": ca_pool}
request.cluster.tlsConfig.trustConfig.casConfigs.append(
encoding.DictToMessage(
cas_config, _MESSAGE.CertificateAuthorityServiceConfig
)
)
return request
def ListWithBootstrapAddr(response, _):
"""Synthesizes the bootstrap address to the response for a list request.
Args:
response: the payload to return.
_: list of flags.
Returns:
The updated clusters with the bootstrap.
"""
return [
SynthesizeBootstrapAddr(cluster, cluster.name) for cluster in response
]
def DescribeWithBootstrapAddr(response, _):
"""Synthesizes the bootstrap address to the response for a describe request.
Args:
response: the payload to return.
_: list of flags.
Returns:
The updated cluster with the bootstrap.
"""
return SynthesizeBootstrapAddr(response, response.name)
def SynthesizeBootstrapAddr(response, cluster):
"""Synthesizes the bootstrap address to the response.
Args:
response: the payload to update.
cluster: the fully qualifed name of the cluster.
Returns:
The updated cluster with the bootstrap
"""
# The fully qualified name will always be consistent. We also have to use the
# fully qualifed name instead of the resource directly to support both
# `describe` and `list`.
name = cluster.split("/")[5]
location = cluster.split("/")[3]
project = core.properties.VALUES.core.project.Get()
domain_prefixed_project = project.split(":")
if len(domain_prefixed_project) == 2:
project = f"{domain_prefixed_project[1]}.{domain_prefixed_project[0]}"
bootstrap = f"bootstrap.{name}.{location}.managedkafka.{project}.cloud.goog"
synthesized = core.resource.resource_projector.MakeSerializable(response)
synthesized["bootstrapAddress"] = f"{bootstrap}:{SASL_PORT}"
if hasattr(response, "tlsConfig") and response.tlsConfig:
synthesized["bootstrapAddressMTLS"] = f"{bootstrap}:{MTLS_PORT}"
return synthesized
def UpdateTopics(_, args, request):
"""Load the topics JSON from the argument to the request.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with topics.
"""
topics = core.yaml.load(args.topics_file)
request.consumerGroup = {
"topics": encoding.DictToMessage(
topics, _MESSAGE.ConsumerGroup.TopicsValue
)
}
request.updateMask = "topics"
return request
def MapConnectParamsToNetworkConfig(_, args, request):
"""Maps subnets and DNS names to the network config API field.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The updated request with networkConfig in the JSON format.
"""
# If no network config flags are provided (such as in the case of an update),
# we don't need to create a network config.
if not args.primary_subnet and not args.dns_name:
return request
# Reference the existing GCP config if already created for the request.
if not request.connectCluster.gcpConfig:
request.connectCluster.gcpConfig = {}
request.connectCluster.gcpConfig.accessConfig = {"networkConfigs": []}
# Subnets may not be provided during update
if not args.primary_subnet:
network_config = {"dns_domain_names": []}
else:
network_config = {
"primary_subnet": args.primary_subnet,
"additional_subnets": [],
"dns_domain_names": [],
}
if not args.additional_subnet:
args.additional_subnet = []
network_config["additional_subnets"] = list(args.additional_subnet)
if not args.dns_name:
args.dns_name = []
network_config["dns_domain_names"] = list(args.dns_name)
request.connectCluster.gcpConfig.accessConfig.networkConfigs.append(
encoding.DictToMessage(network_config, _MESSAGE.ConnectNetworkConfig)
)
if isinstance(
# (if the request is an update)
request,
_MESSAGE.ManagedkafkaProjectsLocationsConnectClustersPatchRequest,
):
request.updateMask = re.sub(
r"gcpConfig\.accessConfig\.networkConfigs\.dnsDomainNames",
"gcpConfig.accessConfig.networkConfigs",
request.updateMask,
)
request.updateMask = re.sub(
r"gcpConfig\.accessConfig\.networkConfigs\.primarySubnet",
"gcpConfig.accessConfig.networkConfigs",
request.updateMask,
)
request.updateMask = re.sub(
r"gcpConfig\.accessConfig\.networkConfigs\.additionalSubnets",
"gcpConfig.accessConfig.networkConfigs",
request.updateMask,
)
return request
def PrepareConnectClusterCreate(_, args, request):
"""Load the config JSON from the argument to the request and build the kafka cluster resource path.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
"""
if args.config_file:
config = core.yaml.load(args.config_file)
request.connectCluster.config = encoding.DictToMessage(
config, _MESSAGE.ConnectCluster.ConfigValue
)
project = args.project or core.properties.VALUES.core.project.Get()
# If the user provides the full path, we don't need to build it.
kafka_cluster_path = args.kafka_cluster
if not re.match(r"projects/.+/locations/.+/clusters/.+", args.kafka_cluster):
location = args.location or args.connect_cluster.split("/")[3]
kafka_cluster_path = (
f"projects/{project}/locations/{location}/clusters/{args.kafka_cluster}"
)
request.connectCluster.kafkaCluster = kafka_cluster_path
return request
def PrepareConnectClusterUpdate(_, args, request):
"""Map the update flags to the request and update mask.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
"""
if args.config_file:
config = core.yaml.load(args.config_file)
request.connectCluster.config = encoding.DictToMessage(
config, _MESSAGE.ConnectCluster.ConfigValue
)
request.updateMask = AppendUpdateMask(request.updateMask, "config")
if args.clear_configs:
request.updateMask = AppendUpdateMask(request.updateMask, "config")
if args.clear_dns_names:
request.updateMask = AppendUpdateMask(
request.updateMask,
"gcpConfig.accessConfig.networkConfigs.dnsDomainNames",
)
if args.clear_secrets:
request.updateMask = AppendUpdateMask(
request.updateMask, "gcpConfig.secretPaths"
)
if args.clear_labels:
request.updateMask = AppendUpdateMask(request.updateMask, "labels")
return request
def ConnectorCreateReadConfigAndTaskRestartPolicy(_, args, request):
"""Load the config JSON from the argument to the request.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
"""
if not request.connector:
request.connector = {}
if args.config_file:
config = core.yaml.load(args.config_file)
request.connector.configs = encoding.DictToMessage(
config, _MESSAGE.Connector.ConfigsValue
)
# Gcloud's duration formatting doesn't seem to play nice with the protobuf
# duration parsing, so we convert to seconds and append the unit suffix here.
task_restart_policy_dict = {}
if args.task_restart_max_backoff:
task_restart_policy_dict["maximum_backoff"] = (
str(args.task_restart_max_backoff.total_seconds) + "s"
)
if args.task_restart_min_backoff:
task_restart_policy_dict["minimum_backoff"] = (
str(args.task_restart_min_backoff.total_seconds) + "s"
)
if args.task_retry_disabled:
task_restart_policy_dict["task_retry_disabled"] = (
str(args.task_retry_disabled)
)
if task_restart_policy_dict:
request.connector.taskRestartPolicy = encoding.DictToMessage(
task_restart_policy_dict,
_MESSAGE.TaskRetryPolicy,
)
return request
def ConnectorUpdateReadConfigAndTaskRestartPolicy(_, args, request):
"""Load the config JSON from the argument to the request, and parse out the task restart policy.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
"""
if not request.connector:
request.connector = {}
if args.config_file:
config = core.yaml.load(args.config_file)
request.connector.configs = encoding.DictToMessage(
config, _MESSAGE.Connector.ConfigsValue
)
request.updateMask = AppendUpdateMask(request.updateMask, "configs")
# Gcloud's duration formatting doesn't seem to play nice with the protobuf
# duration parsing, so we convert to seconds and append the unit suffix here.
task_restart_policy_dict = {}
if args.task_restart_max_backoff:
task_restart_policy_dict["maximum_backoff"] = (
str(args.task_restart_max_backoff.total_seconds) + "s"
)
if args.task_restart_min_backoff:
task_restart_policy_dict["minimum_backoff"] = (
str(args.task_restart_min_backoff.total_seconds) + "s"
)
if args.task_retry_disabled:
task_restart_policy_dict["task_retry_disabled"] = (
str(args.task_retry_disabled)
)
if task_restart_policy_dict:
request.connector.taskRestartPolicy = encoding.DictToMessage(
task_restart_policy_dict,
_MESSAGE.TaskRetryPolicy,
)
return request
def PatchConfigs(_, args, request):
"""Unnest the configs dictionary to the update mask.
Args:
_: resource parameter required but unused variable.
args: list of flags.
request: the payload to return.
Returns:
The new update mask with the configs.
"""
if args.configs:
update_mask = request.updateMask.split(",")
update_mask.remove("configs")
configs_list = []
for key, _ in args.configs.items():
configs_list.append(f'configs["{key}"]')
request.updateMask = AppendUpdateMask(
",".join(update_mask), ",".join(configs_list)
)
# This flag is guarded with a mutex so it won't conflict with the above.
if args.clear_configs:
request.updateMask = AppendUpdateMask(request.updateMask, "configs")
return request
def ParseMode(mode) -> str:
"""Parse the mode enum to a string.
Args:
mode: The mode enum of the schema registry or subject.
Returns:
The mode string.
"""
if mode == _MESSAGE.SchemaMode.ModeValueValuesEnum.READWRITE:
return "READWRITE"
elif mode == _MESSAGE.SchemaMode.ModeValueValuesEnum.READONLY:
return "READONLY"
elif mode == _MESSAGE.SchemaMode.ModeValueValuesEnum.IMPORT:
return "IMPORT"
else:
return "NONE"
def ParseCompatibility(compatibility) -> str:
"""Parse the compatibility enum to a string.
Args:
compatibility: The compatibility enum of the schema registry or subject.
Returns:
The compatibility string.
"""
if (
compatibility
== _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.BACKWARD
):
return "BACKWARD"
elif (
compatibility
== _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.BACKWARD_TRANSITIVE
):
return "BACKWARD_TRANSITIVE"
elif (
compatibility
== _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.FORWARD
):
return "FORWARD"
elif (
compatibility
== _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.FORWARD_TRANSITIVE
):
return "FORWARD_TRANSITIVE"
elif compatibility == _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.FULL:
return "FULL"
elif (
compatibility
== _MESSAGE.SchemaConfig.CompatibilityValueValuesEnum.FULL_TRANSITIVE
):
return "FULL_TRANSITIVE"
else:
return "NONE"
def ParseProject(project_id=None):
return project_id or properties.VALUES.core.project.Get(required=True)
def DeleteSubjectMode(subject, subject_run_resource, context):
"""Called when the user runs gcloud managed-kafka schema-registries subject delete ...
Args:
subject: The subject of the attribute to delete.
subject_run_resource: The subject resource path.
context: The context of the schema registry if provided.
Returns:
The updated subject with its mode deleted.
"""
message = apis.GetMessagesModule("managedkafka", "v1")
client = apis.GetClientInstance("managedkafka", "v1")
schema_registry_resource = subject_run_resource
schema_registry_resource = (
f"{schema_registry_resource}{SUBJECTS_MODE_RESOURCE_PATH}{subject}"
)
# Check if context is provided.
if context:
log.status.Print("Deleting subject mode for [%s]." % subject)
request = message.ManagedkafkaProjectsLocationsSchemaRegistriesContextsModeDeleteRequest(
name=schema_registry_resource
)
try:
client.projects_locations_schemaRegistries_contexts_mode.Delete(
request=request
)
log.UpdatedResource(subject, details="mode. It is now unset.")
except apitools_exceptions.HttpNotFoundError as e:
api_error = exceptions.HttpException(e, HTTP_ERROR_FORMAT)
log.status.Print(api_error.message)
if "Resource not found" in api_error.message:
raise exceptions.HttpException(
e, error_format="Subject {} not found.".format(subject)
)
else:
log.status.Print("Deleting subject mode for [%s]." % subject)
request = (
message.ManagedkafkaProjectsLocationsSchemaRegistriesModeDeleteRequest(
name=schema_registry_resource
)
)
try:
client.projects_locations_schemaRegistries_mode.Delete(request=request)
log.UpdatedResource(subject, details="mode. It is now unset.")
except apitools_exceptions.HttpNotFoundError as e:
api_error = exceptions.HttpException(e, HTTP_ERROR_FORMAT)
log.status.Print(api_error.message)
if "Resource not found" in api_error.message:
raise exceptions.HttpException(
e, error_format="Subject {} not found.".format(subject)
)
def DeleteSubjectConfig(subject, schema_registry_resource, context):
"""Called when the user runs gcloud managed-kafka schema-registries subject delete ...
Args:
subject: The subject of the attribute to delete.
schema_registry_resource: The schema registry resource path.
context: The context of the schema registry if provided.
Returns:
The updated subject with its config deleted.
"""
message = apis.GetMessagesModule("managedkafka", "v1")
client = apis.GetClientInstance("managedkafka", "v1")
name = f"{schema_registry_resource}{SUBJECTS_CONFIG_RESOURCE_PATH}{subject}"
# Check if context is provided.
if context:
log.status.Print("Deleting subject config for [%s]." % subject)
request = message.ManagedkafkaProjectsLocationsSchemaRegistriesContextsConfigDeleteRequest(
name=name
)
try:
client.projects_locations_schemaRegistries_contexts_config.Delete(
request=request
)
log.UpdatedResource(subject, details="config. It is now unset.")
except apitools_exceptions.HttpNotFoundError as e:
api_error = exceptions.HttpException(e, HTTP_ERROR_FORMAT)
log.status.Print(api_error.message)
if "Resource not found" in api_error.message:
raise exceptions.HttpException(
e, error_format="Subject {} not found.".format(subject)
)
else:
log.status.Print("Deleting subject config for [%s]." % subject)
request = message.ManagedkafkaProjectsLocationsSchemaRegistriesConfigDeleteRequest(
name=name
)
try:
client.projects_locations_schemaRegistries_config.Delete(request=request)
log.UpdatedResource(subject, details="config. It is now unset.")
except apitools_exceptions.HttpNotFoundError as e:
api_error = exceptions.HttpException(e, HTTP_ERROR_FORMAT)
log.status.Print(api_error.message)
if "Resource not found" in api_error.message:
raise exceptions.HttpException(
e, error_format="Subject {} not found.".format(subject)
)
AdditionalSubnetDeprecationBeforeAppendAction = actions.DeprecationAction(
flag_name="--additional-subnet",
warn="The --additional-subnet flag is deprecated and will be removed in a "
"future version. Managed Kafka Connect clusters can now reach any endpoint "
"accessible from the primary subnet without the need to define additional "
"subnets. Please see "
"https://cloud.google.com/managed-service-for-apache-kafka/docs/connect-cluster/create-connect-cluster#worker-subnet "
"for more information.",
removed=False,
action="append")
HTTP_ERROR_FORMAT = (
"ResponseError: code={status_code}, message={status_message}"
)