feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,42 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.UniverseCompatible
@base.ReleaseTracks(
base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA, base.ReleaseTrack.GA)
class Disks(base.Group):
"""Read and manipulate Compute Engine disks."""
Disks.category = base.DISKS_CATEGORY
Disks.detailed_help = {
'DESCRIPTION': """
Read and manipulate Compute Engine disks.
For more information about disks, see the
[disks documentation](https://cloud.google.com/compute/docs/disks/).
See also: [Disks API](https://cloud.google.com/compute/docs/reference/rest/v1/disks).
""",
}

View File

@@ -0,0 +1,43 @@
release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Add IAM policy binding to a Compute Engine disk.
description: |
Add an IAM policy binding to the IAM policy of a Compute Engine disk. One binding consists of a member,
a role, and an optional condition.
examples: |
To add an IAM policy binding for the role of 'roles/compute.securityAdmin' for the user 'test-user@gmail.com'
with disk 'my-disk' and zone 'ZONE', run:
$ {command} my-disk --zone=ZONE --member='user:test-user@gmail.com' --role='roles/compute.securityAdmin'
To add an IAM policy binding which expires at the end of the year 2018 for the role of
'roles/compute.securityAdmin' and the user 'test-user@gmail.com' with disk 'my-disk' and zone 'ZONE', run:
$ {command} my-disk --zone=ZONE --member='user:test-user@gmail.com' --role='roles/compute.securityAdmin' --condition='expression=request.time < timestamp("2019-01-01T00:00:00Z"),title=expires_end_of_2018,description=Expires at midnight on 2018-12-31'
See https://cloud.google.com/iam/docs/managing-policies for details of
policy role and member types.
request:
collection: compute.disks
use_relative_name: false
api_version: v1
BETA:
api_version: beta
ALPHA:
api_version: alpha
arguments:
resource:
help_text: The disk for which to add IAM policy binding to.
spec: !REF googlecloudsdk.command_lib.compute.resources:disk
iam:
set_iam_policy_request_path: zoneSetPolicyRequest
message_type_overrides:
policy: Policy
set_iam_policy_request: ComputeDisksSetIamPolicyRequest
enable_condition: true
policy_version: 3
get_iam_policy_version_path: optionsRequestedPolicyVersion

View File

@@ -0,0 +1,77 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding labels to disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util as api_util
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import labels_doc_helper
from googlecloudsdk.command_lib.compute import labels_flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.util.args import labels_util
class AddLabels(base.UpdateCommand):
"""add-labels command for disks."""
DISK_ARG = None
@classmethod
def Args(cls, parser):
cls.DISK_ARG = disks_flags.MakeDiskArg(plural=False)
cls.DISK_ARG.AddArgument(parser)
labels_flags.AddArgsForAddLabels(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = self.DISK_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)
disk_info = api_util.GetDiskInfo(disk_ref, client, messages)
disk = disk_info.GetDiskResource()
set_label_req = disk_info.GetSetLabelsRequestMessage()
labels_update = labels_util.Diff(additions=add_labels).Apply(
set_label_req.LabelsValue, disk.labels)
request = disk_info.GetSetDiskLabelsRequestMessage(
disk, labels_update.GetOrNone())
if not labels_update.needs_update:
return disk
service = disk_info.GetService()
operation = service.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection=disk_info.GetOperationCollection())
operation_poller = poller.Poller(service)
return waiter.WaitFor(
operation_poller, operation_ref,
'Updating labels of disk [{0}]'.format(
disk_ref.Name()))
AddLabels.detailed_help = (
labels_doc_helper.GenerateDetailedHelpForAddLabels('disk'))

View File

@@ -0,0 +1,79 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding resource policies to disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class DisksAddResourcePolicies(base.UpdateCommand):
"""Add resource policies to a Compute Engine disk."""
@staticmethod
def Args(parser):
disks_flags.MakeDiskArg(plural=False).AddArgument(
parser, operation_type='add resource policies to')
flags.AddResourcePoliciesArgs(parser, 'added to', 'disk', required=True)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = disks_flags.MakeDiskArg(
plural=False).ResolveAsResource(args, holder.resources)
disk_info = api_util.GetDiskInfo(disk_ref, client, messages)
disk_region = disk_info.GetDiskRegionName()
resource_policies = []
for policy in args.resource_policies:
resource_policy_ref = util.ParseResourcePolicy(
holder.resources,
policy,
project=disk_ref.project,
region=disk_region)
resource_policies.append(resource_policy_ref.SelfLink())
return disk_info.MakeAddResourcePoliciesRequest(resource_policies,
holder.client)
DisksAddResourcePolicies.detailed_help = {
'DESCRIPTION':
"""\
Add resource policies to a Compute Engine disk.
*{command}* adds resource policies to a Compute Engine disk. These policies define a schedule for taking snapshots and a retention period for these snapshots.
For information on how to create resource policies, see:
$ gcloud beta compute resource-policies create --help
""",
'EXAMPLES':
"""\
The following command adds two resource policies to a Compute Engine disk.
$ {command} my-disk --zone=ZONE --resource-policies=policy-1,policy-2
"""
}

View File

@@ -0,0 +1,52 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for manipulating a set of disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
DETAILED_HELP = {
'DESCRIPTION':
"""
Manipulate multiple Compute Engine disks with single command executions.
""",
}
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Bulk(base.Group):
"""Manipulate multiple Compute Engine disks with single command executions."""
Bulk.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class BulkBeta(Bulk):
"""Manipulate multiple Compute Engine disks with single command executions."""
BulkBeta.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class BulkAlpha(Bulk):
"""Manipulate multiple Compute Engine disks with single command executions."""
BulkAlpha.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,251 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import filter_rewrite
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
DETAILED_HELP = {
'brief':
"""
Create multiple Compute Engine disks.
""",
'DESCRIPTION':
"""
*{command}* facilitates the creation of multiple Compute Engine
disks with a single command. This includes cloning a set of Async PD
secondary disks with the same consistency group policy.
""",
'EXAMPLES':
"""
To consistently clone secondary disks with the same consistency group
policy 'projects/example-project/regions/us-central1/resourcePolicies/example-group-policy' to target zone 'us-central1-a', run:
$ {command} --source-consistency-group-policy=projects/example-project/regions/us-central1/resourcePolicies/example-group-policy --zone=us-central1-a
""",
}
def _AlphaArgs(parser):
disks_flags.AddBulkCreateArgsAlpha(parser)
disks_flags.SOURCE_INSTANT_SNAPSHOT_GROUP_ARG.AddArgument(parser)
disks_flags.SOURCE_SNAPSHOT_GROUP_ARG.AddArgument(parser)
def _GetOperations(compute_client,
project,
operation_group_id,
scope_name,
is_zonal):
"""Requests operations with group id matching the given one."""
errors_to_collect = []
_, operation_filter = filter_rewrite.Rewriter().Rewrite(
expression='operationGroupId=' + operation_group_id)
if is_zonal:
operations_response = compute_client.MakeRequests(
[(compute_client.apitools_client.zoneOperations, 'List',
compute_client.apitools_client.zoneOperations.GetRequestType('List')(
filter=operation_filter, zone=scope_name, project=project))],
errors_to_collect=errors_to_collect,
log_result=False,
always_return_operation=True,
no_followup=True)
else:
operations_response = compute_client.MakeRequests(
[(compute_client.apitools_client.regionOperations, 'List',
compute_client.apitools_client.regionOperations.GetRequestType('List')
(filter=operation_filter, region=scope_name, project=project))],
errors_to_collect=errors_to_collect,
log_result=False,
always_return_operation=True,
no_followup=True)
return operations_response, errors_to_collect
def _GetResult(compute_client, request, operation_group_id, parent_errors):
"""Requests operations with group id and parses them as an output."""
is_zonal = hasattr(request, 'zone')
scope_name = request.zone if is_zonal else request.region
operations_response, errors = _GetOperations(compute_client, request.project,
operation_group_id, scope_name,
is_zonal)
result = {'operationGroupId': operation_group_id, 'createdDisksCount': 0}
if not parent_errors and not errors:
def IsPerDiskOperation(op):
return op.operationType == 'insert' and str(
op.status) == 'DONE' and op.error is None
result['createdDisksCount'] = sum(
map(IsPerDiskOperation, operations_response))
return result
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.GA)
class BulkCreate(base.Command):
"""Create multiple Compute Engine disks."""
@classmethod
def Args(cls, parser):
disks_flags.AddBulkCreateArgs(parser)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args, support_multiple_source_restore=False):
compute_holder = self._GetApiHolder()
client = compute_holder.client
policy_url = getattr(args, 'source_consistency_group_policy', None)
project = properties.VALUES.core.project.GetOrFail()
if not support_multiple_source_restore:
if args.IsSpecified('zone'):
request = client.messages.ComputeDisksBulkInsertRequest(
project=project,
zone=args.zone,
bulkInsertDiskResource=client.messages.BulkInsertDiskResource(
sourceConsistencyGroupPolicy=policy_url))
request = (client.apitools_client.disks, 'BulkInsert', request)
else:
request = client.messages.ComputeRegionDisksBulkInsertRequest(
project=project,
region=args.region,
bulkInsertDiskResource=client.messages.BulkInsertDiskResource(
sourceConsistencyGroupPolicy=policy_url))
request = (client.apitools_client.regionDisks, 'BulkInsert', request)
else:
isg_ref = disks_flags.SOURCE_INSTANT_SNAPSHOT_GROUP_ARG.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(client),
)
if isg_ref is not None:
isg_params = client.messages.InstantSnapshotGroupParameters(
sourceInstantSnapshotGroup=isg_ref.SelfLink(),
)
else:
isg_params = None
ssg_ref = disks_flags.SOURCE_SNAPSHOT_GROUP_ARG.ResolveAsResource(
args,
compute_holder.resources,
)
if ssg_ref is not None:
ssg_params = client.messages.SnapshotGroupParameters(
sourceSnapshotGroup=ssg_ref.SelfLink(),
)
else:
ssg_params = None
if args.IsSpecified('zone'):
request = client.messages.ComputeDisksBulkInsertRequest(
project=project,
zone=args.zone,
bulkInsertDiskResource=client.messages.BulkInsertDiskResource(
sourceConsistencyGroupPolicy=policy_url,
instantSnapshotGroupParameters=isg_params,
snapshotGroupParameters=ssg_params))
request = (client.apitools_client.disks, 'BulkInsert', request)
else:
request = client.messages.ComputeRegionDisksBulkInsertRequest(
project=project,
region=args.region,
bulkInsertDiskResource=client.messages.BulkInsertDiskResource(
sourceConsistencyGroupPolicy=policy_url,
instantSnapshotGroupParameters=isg_params,
snapshotGroupParameters=ssg_params))
request = (client.apitools_client.regionDisks, 'BulkInsert', request)
errors_to_collect = []
response = client.MakeRequests([request],
errors_to_collect=errors_to_collect,
no_followup=True,
always_return_operation=True)
# filters error object so that only error message is persisted
if errors_to_collect:
# workaround to change errors_to_collect since tuples are immutable
for i in range(len(errors_to_collect)):
error_tuple = errors_to_collect[i]
error_list = list(error_tuple)
# When requests are accepted, but workflow server processed it
# exceptionally, the error message is in message field. However, when
# requests are rejected, message field doesn't exist, we don't need to
# extract error message from message field.
if hasattr(error_list[1], 'message'):
error_list[1] = error_list[1].message
errors_to_collect[i] = tuple(error_list)
self._errors = errors_to_collect
if not response:
return
operation_group_id = response[0].operationGroupId
result = _GetResult(client, request[2], operation_group_id,
errors_to_collect)
if response[0].statusMessage:
result['statusMessage'] = response[0].statusMessage
return result
def Epilog(self, resources_were_displayed):
del resources_were_displayed
if self._errors:
log.error(self._errors[0][1])
BulkCreate.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class BulkCreateBeta(BulkCreate):
"""Create multiple Compute Engine disks."""
@classmethod
def Args(cls, parser):
disks_flags.AddBulkCreateArgs(parser)
def Run(self, args):
return self._Run(args)
BulkCreateBeta.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class BulkCreateAlpha(BulkCreate):
"""Create multiple Compute Engine disks."""
@classmethod
def Args(cls, parser):
_AlphaArgs(parser)
def Run(self, args):
return self._Run(args,
support_multiple_source_restore=True)
BulkCreateAlpha.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for managing Compute Engine disk configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Config(base.Group):
"""Manage Compute Engine disk configurations."""

View File

@@ -0,0 +1,38 @@
release_tracks: [ALPHA]
command_type: CONFIG_EXPORT
help_text:
brief: Export the configuration for a Compute Engine disk.
description: |
*{command}* exports the configuration for a Compute Engine disk.
Disk configurations can be exported in
Kubernetes Resource Model (krm) or Terraform HCL formats. The
default format is `krm`.
Specifying `--all` allows you to export the configurations for all
disks within the project.
Specifying `--path` allows you to export the configuration(s) to
a local directory.
examples: |
To export the configuration for a disk, run:
$ {command} my-disk
To export the configuration for a disk to a file, run:
$ {command} my-disk --path=/path/to/dir/
To export the configuration for a disk in Terraform
HCL format, run:
$ {command} my-disk --resource-format=terraform
To export the configurations for all disks within a
project, run:
$ {command} --all
arguments:
resource:
help_text: Disk to export the configuration for.
spec: !REF googlecloudsdk.command_lib.compute.resources:disk

View File

@@ -0,0 +1,439 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for converting a disk to a different type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
import textwrap
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util
from googlecloudsdk.api_lib.compute import kms_utils
from googlecloudsdk.api_lib.compute import name_generator
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.kms import resource_args as kms_resource_args
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.console import progress_tracker
CONTINUE_WITH_CONVERT_PROMPT = (
'This command will permanently convert disk {0} to disk type: {1}. Please'
' detach the disk from all instances before continuing. Data written to the'
' original disk during conversion will not appear on the converted disk.'
' Please see'
' https://cloud.google.com/compute/docs/disks/automatically-convert-disks'
' for more details.'
)
class _ConvertState(enum.Enum):
SNAPSHOT_CREATED = 1
DISK_RESTORED = 2
ORIGINAL_DISK_DELETED = 3
ORIGINAL_DISK_RECREATED = 4
RESTORED_DISK_DELETED = 5
SNAPSHOT_DELETED = 6
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class Convert(base.RestoreCommand):
"""Convert a Compute Engine Persistent Disk volume to a Hyperdisk volume."""
_DISK_ARG = disks_flags.MakeDiskArg(plural=False)
@staticmethod
def Args(parser):
Convert._DISK_ARG.AddArgument(parser)
parser.add_argument(
'--target-disk-type',
completer=completers.DiskTypesCompleter,
required=True,
help="""Specifies the type of Hyperdisk to convert to, for example,
to convert a Hyperdisk Balanced volume, specify `hyperdisk-balanced`. To get a
list of available disk types, run `gcloud compute disk-types list`.
""",
)
kms_resource_args.AddKmsKeyResourceArg(
parser, 'disk', region_fallthrough=True
)
def Run(self, args):
self.holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
self.client = self.holder.client.apitools_client
self.messages = self.holder.client.messages
self.state = None
self.created_resources = {}
self.user_messages = ''
self.disk_ref = self._DISK_ARG.ResolveAsResource(
args,
self.holder.resources,
scope_lister=flags.GetDefaultScopeLister(self.holder.client),
)
if self.disk_ref.Collection() == 'compute.regionDisks':
raise exceptions.InvalidArgumentException(
'--region',
'Regional disks are not supported for this command.'
)
if args.target_disk_type == 'hyperdisk-ml':
raise exceptions.InvalidArgumentException(
'--target-disk-type',
'Hyperdisk ML is not supported for this command.',
)
self.target_disk_type = args.target_disk_type
# make sure disk is not attached to any instances
disk_info = disks_util.GetDiskInfo(
self.disk_ref, self.client, self.messages)
original_disk = disk_info.GetDiskResource()
if original_disk.users:
raise exceptions.ToolException(
'Disk is attached to instances. Please detach the disk before'
' converting.'
)
console_io.PromptContinue(
message=textwrap.dedent(
CONTINUE_WITH_CONVERT_PROMPT.format(
self.disk_ref.Name(), self.target_disk_type
)
),
cancel_on_no=True,
)
try:
with self._CreateProgressTracker(self.disk_ref.Name()):
result = self._ConvertDisk(
self.disk_ref, self.target_disk_type, original_disk.sizeGb,
disk_encryption_key=kms_utils.MaybeGetKmsKey(
args, self.messages, None
),
)
except Exception as e:
raise e
finally:
self._CleanUp()
if self.user_messages:
if self.state in [_ConvertState.ORIGINAL_DISK_RECREATED,
_ConvertState.RESTORED_DISK_DELETED,
_ConvertState.SNAPSHOT_DELETED]:
log.warning(self.user_messages)
else:
log.error(self.user_messages)
return result
def _ConvertDisk(
self, disk_ref, target_disk_type, size_gb, disk_encryption_key=None
):
# create a snapshot of the disk
self.snapshot_name = self._GenerateName(disk_ref)
result = self._InsertSnapshot(disk_ref, self.snapshot_name)
snapshot_ref = self.holder.resources.Parse(
self.snapshot_name,
params={'project': disk_ref.project},
collection='compute.snapshots',
)
self._UpdateState(_ConvertState.SNAPSHOT_CREATED, snapshot_ref)
# create a new disk from the snapshot with target disk type
self.restored_disk_name = self._GenerateName(disk_ref)
restored_disk_ref = self.holder.resources.Parse(
self.restored_disk_name,
params={'project': disk_ref.project, 'zone': disk_ref.zone},
collection='compute.disks',
)
result = (
self._RestoreDiskFromSnapshot(
restored_disk_ref,
snapshot_ref,
target_disk_type,
size_gb,
disk_encryption_key=disk_encryption_key,
)
or result
)
self._UpdateState(_ConvertState.DISK_RESTORED, restored_disk_ref)
# delete the original disk
result = self._DeleteDisk(disk_ref) or result
self._UpdateState(_ConvertState.ORIGINAL_DISK_DELETED)
# recreate the original disk with the new disk as source
result = (
self._CloneDisk(
disk_ref.Name(),
restored_disk_ref,
disk_encryption_key=disk_encryption_key,
)
or result
)
self._UpdateState(_ConvertState.ORIGINAL_DISK_RECREATED)
# delete the restored disk because the original disk is recreated
result = self._DeleteDisk(restored_disk_ref) or result
self._UpdateState(_ConvertState.RESTORED_DISK_DELETED)
# delete the snapshot because the disk is recreated
result = self._DeleteSnapshot(snapshot_ref) or result
self._UpdateState(_ConvertState.SNAPSHOT_DELETED)
return result
def _InsertSnapshot(self, disk_ref, snapshot_name):
request = self.messages.ComputeSnapshotsInsertRequest(
project=disk_ref.project,
snapshot=self.messages.Snapshot(
name=snapshot_name,
sourceDisk=disk_ref.SelfLink(),
snapshotType=self.messages.Snapshot.SnapshotTypeValueValuesEnum.STANDARD,
),
)
operation = self._MakeRequest(self.client.snapshots, 'Insert', request)
operation_ref = self.holder.resources.Parse(
operation.selfLink,
collection='compute.globalOperations',
)
return waiter.WaitFor(
poller.Poller(self.client.snapshots),
operation_ref,
custom_tracker=self._CreateNoOpProgressTracker(),
max_wait_ms=None,
)
def _DeleteSnapshot(self, snapshot_ref):
request = self.messages.ComputeSnapshotsDeleteRequest(
snapshot=snapshot_ref.Name(),
project=snapshot_ref.project,
)
operation = self._MakeRequest(self.client.snapshots, 'Delete', request)
operation_ref = self.holder.resources.Parse(
operation.selfLink,
collection='compute.globalOperations',
)
return waiter.WaitFor(
poller.DeletePoller(self.client.snapshots),
operation_ref,
custom_tracker=self._CreateNoOpProgressTracker(),
max_wait_ms=None,
)
def _RestoreDiskFromSnapshot(
self,
restored_disk_ref,
snapshot_ref,
disk_type,
size_gb,
disk_encryption_key=None,
):
kwargs = {}
if disk_encryption_key:
kwargs['diskEncryptionKey'] = disk_encryption_key
disk = self.messages.Disk(
name=restored_disk_ref.Name(),
type=disks_util.GetDiskTypeUri(
disk_type, restored_disk_ref, self.holder
),
sizeGb=size_gb,
sourceSnapshot=snapshot_ref.SelfLink(),
**kwargs,
)
request = self.messages.ComputeDisksInsertRequest(
disk=disk,
project=restored_disk_ref.project,
zone=restored_disk_ref.zone,
)
operation = self._MakeRequest(self.client.disks, 'Insert', request)
operation_ref = self.holder.resources.Parse(
operation.selfLink,
collection='compute.zoneOperations',
)
return waiter.WaitFor(
poller.Poller(self.client.disks),
operation_ref,
custom_tracker=self._CreateNoOpProgressTracker(),
max_wait_ms=None,
)
def _GenerateName(self, resource_ref):
return f'{name_generator.GenerateRandomName()}-{resource_ref.Name()}'[:64]
def _DeleteDisk(self, disk_ref):
request = self.messages.ComputeDisksDeleteRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
)
operation = self._MakeRequest(self.client.disks, 'Delete', request)
operation_ref = self.holder.resources.Parse(
operation.selfLink,
collection='compute.zoneOperations',
)
return waiter.WaitFor(
poller.DeletePoller(self.client.disks),
operation_ref,
custom_tracker=self._CreateNoOpProgressTracker(),
max_wait_ms=None,
)
def _CloneDisk(
self, original_disk_name, restored_disk_ref, disk_encryption_key=None
):
kwargs = {}
if disk_encryption_key:
kwargs['diskEncryptionKey'] = disk_encryption_key
disk = self.messages.Disk(
name=original_disk_name,
sourceDisk=restored_disk_ref.SelfLink(),
**kwargs,
)
request = self.messages.ComputeDisksInsertRequest(
disk=disk,
project=restored_disk_ref.project,
zone=restored_disk_ref.zone,
)
operation = self._MakeRequest(self.client.disks, 'Insert', request)
operation_ref = self.holder.resources.Parse(
operation.selfLink,
collection='compute.zoneOperations',
)
operation_poller = poller.Poller(self.client.disks)
return waiter.WaitFor(
operation_poller,
operation_ref,
custom_tracker=self._CreateNoOpProgressTracker(),
max_wait_ms=None,
)
def _MakeRequest(self, resource_client, method, request):
errors_to_collect = []
responses = self.holder.client.AsyncRequests(
[(resource_client, method, request)], errors_to_collect
)
if errors_to_collect:
raise core_exceptions.MultiError(errors_to_collect)
if not responses:
raise core_exceptions.InternalError('No response received')
return responses[0]
def _UpdateState(self, state, created_resource=None):
self.state = state
if created_resource:
self.created_resources[state] = created_resource
def _CleanUp(self):
if not self.state:
self.user_messages = (
'Creating snapshot failed.' + self._BuildCleanupSnapshotMessage()
)
return
if self.state == _ConvertState.SNAPSHOT_CREATED:
# restore disk failed
self.user_messages = (
f'Creating disk from snapshot {self.snapshot_name} failed. '
+ self._BuildCleanupSnapshotMessage()
)
self._DeleteSnapshot(
self.created_resources[_ConvertState.SNAPSHOT_CREATED]
)
elif self.state == _ConvertState.DISK_RESTORED:
# delete original disk request failed
self.user_messages = (
f'Deleting original disk {self.disk_ref.Name()} failed. '
+ self._BuildCleanupDiskMessage()
+ self._BuildCleanupSnapshotMessage()
)
self._DeleteDisk(self.created_resources[_ConvertState.DISK_RESTORED])
self._DeleteSnapshot(
self.created_resources[_ConvertState.SNAPSHOT_CREATED]
)
elif self.state == _ConvertState.ORIGINAL_DISK_DELETED:
# recreate original disk failed
self.user_messages = (
f'Recreating original disk {self.disk_ref.Name()} failed. Please run'
' `gcloud compute disks create'
f' {self.disk_ref.Name()} --zone={self.disk_ref.zone} --type={self.target_disk_type} --source-disk={self.restored_disk_name}`'
' to recreate the original disk. Please run `gcloud compute'
f' snapshots delete {self.snapshot_name}` to delete the temporary'
' snapshot. Please run `gcloud compute disks delete'
f' {self.restored_disk_name} --zone={self.disk_ref.zone}` to delete'
' the temporary disk.'
)
elif self.state == _ConvertState.ORIGINAL_DISK_RECREATED:
# delete restored disk failed
self.user_messages = (
'Conversion completed successfully, Deleting temporary disk'
f' {self.restored_disk_name} failed.'
+ self._BuildCleanupDiskMessage()
+ self._BuildCleanupSnapshotMessage()
)
elif self.state == _ConvertState.RESTORED_DISK_DELETED:
self.user_messages = (
'Conversion completed successfully. Deleting temporary snapshot'
f' {self.snapshot_name} failed.'
+ self._BuildCleanupSnapshotMessage()
)
def _CreateProgressTracker(self, disk_name):
return progress_tracker.ProgressTracker(
message=f'Converting disk {disk_name}...',
aborted_message='Conversion aborted.',
)
def _CreateNoOpProgressTracker(self):
return progress_tracker.NoOpProgressTracker(
interruptable=True, aborted_message=''
)
def _BuildCleanupSnapshotMessage(self):
return (
f' Please run `gcloud compute snapshots delete {self.snapshot_name}` to'
' delete the temporary snapshot if it still exists.'
)
def _BuildCleanupDiskMessage(self):
return (
' Please run `gcloud compute disks delete'
f' {self.restored_disk_name} --zone={self.disk_ref.zone}` to delete the'
' temporary disk if it still exists.'
)
Convert.detailed_help = {
'DESCRIPTION': """\
Convert Compute Engine Persistent Disk volumes to Hyperdisk volumes.
*{command}* converts a Compute Engine Persistent Disk volume to a Hyperdisk volume. For a comprehensive guide, refer to: https://cloud.google.com/sdk/gcloud/reference/compute/disks/convert.
""",
'EXAMPLES': """\
The following command converts a Persistent Disk volume to a Hyperdisk Balanced volume:
$ {command} my-disk-1 --zone=ZONE --target-disk-type=hyperdisk-balanced
""",
}

View File

@@ -0,0 +1,931 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating disks."""
from __future__ import absolute_import
from __future__ import annotations
from __future__ import division
from __future__ import unicode_literals
import argparse
import re
import textwrap
from typing import Any, Optional
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import csek_utils
from googlecloudsdk.api_lib.compute import disks_util
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import kms_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute import zone_utils
from googlecloudsdk.api_lib.compute.regions import utils as region_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.disks import create
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags as resource_flags
from googlecloudsdk.command_lib.compute.resource_policies import util as resource_util
from googlecloudsdk.command_lib.kms import resource_args as kms_resource_args
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
import six
DETAILED_HELP = {
'brief':
'Create Compute Engine persistent disks',
'DESCRIPTION':
"""\
*{command}* creates one or more Compute Engine
persistent disks. When creating virtual machine instances,
disks can be attached to the instances through the
`gcloud compute instances create` command. Disks can also be
attached to instances that are already running using
`gcloud compute instances attach-disk`.
Disks are zonal resources, so they reside in a particular zone
for their entire lifetime. The contents of a disk can be moved
to a different zone by snapshotting the disk (using
`gcloud compute disks snapshot`) and creating a new disk using
`--source-snapshot` in the desired zone. The contents of a
disk can also be moved across project or zone by creating an
image (using `gcloud compute images create`) and creating a
new disk using `--image` in the desired project and/or
zone.
For a comprehensive guide, including details on minimum and maximum
disk size, refer to:
https://cloud.google.com/compute/docs/disks
""",
'EXAMPLES':
"""\
When creating disks, be sure to include the `--zone` option. To create
disks 'my-disk-1' and 'my-disk-2' in zone us-east1-a:
$ {command} my-disk-1 my-disk-2 --zone=us-east1-a
""",
}
def _SourceArgs(
parser,
support_source_snapshot_region,
source_instant_snapshot_enabled=False,
):
"""Add mutually exclusive source args."""
source_parent_group = parser.add_group()
source_group = source_parent_group.add_mutually_exclusive_group()
def AddImageHelp():
"""Returns detailed help for `--image` argument."""
template = """\
An image to apply to the disks being created. When using
this option, the size of the disks must be at least as large as
the image size. Use ``--size'' to adjust the size of the disks.
This flag is mutually exclusive with ``--source-snapshot'' and
``--image-family''.
"""
return template
source_group.add_argument('--image', help=AddImageHelp)
image_utils.AddImageProjectFlag(source_parent_group)
source_group.add_argument(
'--image-family',
help="""\
The image family for the operating system that the boot disk will be
initialized with. Compute Engine offers multiple Linux
distributions, some of which are available as both regular and
Shielded VM images. When a family is specified instead of an image,
the latest non-deprecated image associated with that family is
used. It is best practice to use --image-family when the latest
version of an image is needed.
""")
image_utils.AddImageFamilyScopeFlag(source_parent_group)
if support_source_snapshot_region:
disks_flags.SOURCE_SNAPSHOT_ARG_ALPHA.AddArgument(
parser, mutex_group=source_group
)
else:
disks_flags.SOURCE_SNAPSHOT_ARG.AddArgument(source_group)
if source_instant_snapshot_enabled:
disks_flags.AddSourceInstantSnapshotProject(parser)
disks_flags.SOURCE_INSTANT_SNAPSHOT_ARG.AddArgument(source_group)
disks_flags.SOURCE_DISK_ARG.AddArgument(parser, mutex_group=source_group)
disks_flags.ASYNC_PRIMARY_DISK_ARG.AddArgument(
parser, mutex_group=source_group
)
disks_flags.AddPrimaryDiskProject(parser)
disks_flags.AddLocationHintArg(parser)
def _CommonArgs(
messages,
parser,
include_physical_block_size_support=False,
vss_erase_enabled=False,
support_pd_interface=False,
support_user_licenses=False,
support_source_snapshot_region=False,
support_gmi_restore=False,
source_instant_snapshot_enabled=False,
):
"""Add arguments used for parsing in all command tracks."""
Create.disks_arg.AddArgument(parser, operation_type='create')
parser.add_argument(
'--description',
help='An optional, textual description for the disks being created.')
parser.add_argument(
'--size',
type=arg_parsers.BinarySize(
lower_bound='1GB',
suggested_binary_size_scales=['GB', 'GiB', 'TB', 'TiB', 'PiB', 'PB']),
help="""\
Size of the disks. The value must be a whole
number followed by a size unit of ``GB'' for gigabyte, or ``TB''
for terabyte. If no size unit is specified, GB is
assumed. For example, ``10GB'' will produce 10 gigabyte
disks. Disk size must be a multiple of 1 GB. If disk size is
not specified, the default size of {}GB for pd-standard disks, {}GB for
pd-balanced disks, {}GB for pd-ssd disks, and {}GB for pd-extreme will
be used. For details about disk size limits, refer to:
https://cloud.google.com/compute/docs/disks
""".format(
constants.DEFAULT_DISK_SIZE_GB_MAP[constants.DISK_TYPE_PD_STANDARD],
constants.DEFAULT_DISK_SIZE_GB_MAP[constants.DISK_TYPE_PD_BALANCED],
constants.DEFAULT_DISK_SIZE_GB_MAP[constants.DISK_TYPE_PD_SSD],
constants.DEFAULT_DISK_SIZE_GB_MAP[constants.DISK_TYPE_PD_EXTREME]))
parser.add_argument(
'--type',
completer=completers.DiskTypesCompleter,
help="""\
Specifies the type of disk to create. To get a
list of available disk types, run `gcloud compute disk-types list`.
The default disk type is pd-standard.
""")
if support_pd_interface:
parser.add_argument(
'--interface',
help="""\
Specifies the disk interface to use for attaching this disk. Valid values
are `SCSI` and `NVME`. The default is `SCSI`.
""")
parser.display_info.AddFormat(
'table(name, zone.basename(), sizeGb, type.basename(), status)')
parser.add_argument(
'--licenses',
type=arg_parsers.ArgList(),
metavar='LICENSE',
help=(
'A list of URIs to license resources. The provided licenses will '
'be added onto the created disks to indicate the licensing and '
'billing policies.'
),
)
_SourceArgs(
parser, support_source_snapshot_region, source_instant_snapshot_enabled
)
disks_flags.AddProvisionedIopsFlag(parser, arg_parsers)
disks_flags.AddArchitectureFlag(parser, messages)
disks_flags.AddProvisionedThroughputFlag(parser, arg_parsers)
disks_flags.STORAGE_POOL_ARG.AddArgument(parser)
disks_flags.AddAccessModeFlag(parser, messages)
if support_gmi_restore:
disks_flags.AddSourceMachineImageNameArg(parser)
disks_flags.AddSourceMachineImageDiskDeviceNameArg(parser)
if support_user_licenses:
parser.add_argument(
'--user-licenses',
type=arg_parsers.ArgList(),
metavar='LICENSE',
help=('List of URIs to license resources. User-provided licenses '
'can be edited after disk is created.'))
csek_utils.AddCsekKeyArgs(parser)
labels_util.AddCreateLabelsFlags(parser)
if include_physical_block_size_support:
parser.add_argument(
'--physical-block-size',
choices=['4096', '16384'],
default='4096',
help="""\
Physical block size of the persistent disk in bytes.
Valid values are 4096(default) and 16384.
""")
if vss_erase_enabled:
flags.AddEraseVssSignature(parser, resource='a source snapshot')
resource_flags.AddResourcePoliciesArgs(parser, 'added to', 'disk')
def _AddReplicaZonesArg(parser):
parser.add_argument(
'--replica-zones',
type=arg_parsers.ArgList(min_length=2, max_length=2),
metavar='ZONE',
help=('A comma-separated list of exactly 2 zones that a regional disk '
'will be replicated to. Required when creating regional disk. '
'The zones must be in the same region as specified in the '
'`--region` flag. See available zones with '
'`gcloud compute zones list`.'))
def _ParseGuestOsFeaturesToMessages(args, client_messages):
"""Parse GuestOS features."""
guest_os_feature_messages = []
if args.guest_os_features:
for feature in args.guest_os_features:
gf_type = client_messages.GuestOsFeature.TypeValueValuesEnum(feature)
guest_os_feature = client_messages.GuestOsFeature()
guest_os_feature.type = gf_type
guest_os_feature_messages.append(guest_os_feature)
return guest_os_feature_messages
def _GetSourceInstantSnapshotProjectFromPath(
source_instant_snapshot: Optional[str],
) -> Optional[str]:
"""Gets the source instant-snapshot project from the path."""
if not source_instant_snapshot:
return None
match = re.search(r'projects/([^/]+)', source_instant_snapshot)
return match.group(1) if match else None
def _GetInstantSnapshotReference(
args: argparse.Namespace, compute_holder: Any, source_project: Optional[str]
) -> Optional[str]:
"""Resolves the instant snapshot reference to a URI."""
instant_snapshot_ref = (
disks_flags.SOURCE_INSTANT_SNAPSHOT_ARG.ResolveAsResource(
args,
compute_holder.resources,
source_project=source_project,
)
)
return instant_snapshot_ref.SelfLink() if instant_snapshot_ref else None
def _GetSourceInstantSnapshotUriWithSourceProjectSpecified(
args: argparse.Namespace, compute_holder: Any
) -> Optional[str]:
"""Gets the URI when source_instant_snapshot_project is specified."""
actual_source_project = getattr(args, 'source_instant_snapshot_project', None)
expected_source_project = _GetSourceInstantSnapshotProjectFromPath(
args.source_instant_snapshot
)
# Checks if source projects match.
if (
expected_source_project
and actual_source_project != expected_source_project
):
# Throw an error here
raise exceptions.BadArgumentException(
'--source_instant_snapshot_project',
'The project specified in --source-instant-snapshot-project does'
' not match the project in the --source-instant-snapshot URI.'
' Please ensure these values are consistent.',
)
elif (
expected_source_project
and actual_source_project == expected_source_project
):
return _GetInstantSnapshotReference(
args, compute_holder, actual_source_project
)
elif not expected_source_project:
return _GetInstantSnapshotReference(
args, compute_holder, source_project=actual_source_project
)
return None
def _GetSourceInstantSnapshotUri(
args: argparse.Namespace, compute_holder: Any
) -> Optional[str]:
"""Determines the source instant snapshot URI."""
if args.source_instant_snapshot:
# Check if source_instant_snapshot_project is not specified
if not getattr(args, 'source_instant_snapshot_project', None):
return _GetInstantSnapshotReference(args, compute_holder, None)
return _GetSourceInstantSnapshotUriWithSourceProjectSpecified(
args, compute_holder
)
return None
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.Command):
"""Create Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
Create.disks_arg = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(messages, parser)
image_utils.AddGuestOsFeaturesArg(parser, messages)
_AddReplicaZonesArg(parser)
kms_resource_args.AddKmsKeyResourceArg(
parser, 'disk', region_fallthrough=True)
disks_flags.AddEnableConfidentialComputeFlag(parser)
def ParseLicenses(self, args):
"""Parse license.
Subclasses may override it to customize parsing.
Args:
args: The argument namespace
Returns:
List of licenses.
"""
if args.licenses:
return args.licenses
return []
def ValidateAndParseDiskRefs(self, args, compute_holder):
return _ValidateAndParseDiskRefsRegionalReplica(args, compute_holder)
def GetFromImage(self, args):
return args.image or args.image_family
def GetFromSourceInstantSnapshot(self, args):
return args.source_instant_snapshot
def GetFromSourceMachineImage(self, args, support_gmi_restore):
if support_gmi_restore:
return getattr(args, 'source_machine_image', None)
return None
def GetDiskSizeGb(self, args, from_image, support_gmi_restore):
size_gb = utils.BytesToGb(args.size)
if size_gb:
# Legacy disk type cannot be smaller than 10 GB and it is enforced in
# gcloud.
if args.type in constants.LEGACY_DISK_TYPE_LIST and size_gb < 10:
raise exceptions.InvalidArgumentException(
'--size',
'Value must be greater than or equal to 10 GB; reveived {0} GB'
.format(size_gb),
)
# if disk size is given, use it.
pass
elif (
args.source_snapshot
or from_image
or args.source_disk
or self.GetFromSourceInstantSnapshot(args)
or self.GetFromSourceMachineImage(args, support_gmi_restore)
):
# if source is a snapshot/image/disk/instant-snapshot, it is ok not to
# set size_gb since disk size can be obtained from the source.
pass
elif args.type in constants.DEFAULT_DISK_SIZE_GB_MAP:
# Get default disk size from disk_type.
size_gb = constants.DEFAULT_DISK_SIZE_GB_MAP[args.type]
elif args.type:
# If disk type is specified, then leaves it to backend to decide the size.
pass
else:
# If disk type is unspecified or unknown, we use the default size of
# pd-standard.
size_gb = constants.DEFAULT_DISK_SIZE_GB_MAP[
constants.DISK_TYPE_PD_STANDARD]
utils.WarnIfDiskSizeIsTooSmall(size_gb, args.type)
return size_gb
def GetProjectToSourceImageDict(self, args, disk_refs, compute_holder,
from_image):
project_to_source_image = {}
image_expander = image_utils.ImageExpander(compute_holder.client,
compute_holder.resources)
for disk_ref in disk_refs:
if from_image:
if disk_ref.project not in project_to_source_image:
source_image_uri, _ = image_expander.ExpandImageFlag(
user_project=disk_ref.project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project,
return_image_resource=False,
image_family_scope=args.image_family_scope,
support_image_family_scope=True)
project_to_source_image[disk_ref.project] = argparse.Namespace()
project_to_source_image[disk_ref.project].uri = source_image_uri
else:
project_to_source_image[disk_ref.project] = argparse.Namespace()
project_to_source_image[disk_ref.project].uri = None
return project_to_source_image
def WarnAboutScopeDeprecationsAndMaintenance(self, disk_refs, client):
# Check if the zone is deprecated or has maintenance coming.
zone_resource_fetcher = zone_utils.ZoneResourceFetcher(client)
zone_resource_fetcher.WarnForZonalCreation(
(ref for ref in disk_refs if ref.Collection() == 'compute.disks'))
# Check if the region is deprecated or has maintenance coming.
region_resource_fetcher = region_utils.RegionResourceFetcher(client)
region_resource_fetcher.WarnForRegionalCreation(
(ref for ref in disk_refs if ref.Collection() == 'compute.regionDisks'))
def GetSnapshotUri(
self, args, compute_holder, support_source_snapshot_region
):
if not support_source_snapshot_region:
snapshot_ref = disks_flags.SOURCE_SNAPSHOT_ARG.ResolveAsResource(
args,
compute_holder.resources,
)
else:
snapshot_ref = disks_flags.SOURCE_SNAPSHOT_ARG_ALPHA.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(compute_holder.client),
default_scope=compute_scope.ScopeEnum.GLOBAL,
)
if snapshot_ref:
return snapshot_ref.SelfLink()
return None
def GetSourceDiskUri(self, args, disk_ref, compute_holder):
source_disk_ref = None
if args.source_disk:
if args.source_disk_zone:
source_disk_ref = disks_flags.SOURCE_DISK_ARG.ResolveAsResource(
args, compute_holder.resources)
else:
if disk_ref.Collection() == 'compute.disks':
source_disk_ref = disks_flags.SOURCE_DISK_ARG.ResolveAsResource(
args,
compute_holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE)
elif disk_ref.Collection() == 'compute.regionDisks':
source_disk_ref = disks_flags.SOURCE_DISK_ARG.ResolveAsResource(
args,
compute_holder.resources,
default_scope=compute_scope.ScopeEnum.REGION)
if source_disk_ref:
return source_disk_ref.SelfLink()
return None
def GetAsyncPrimaryDiskUri(self, args, compute_holder):
primary_disk_ref = None
if args.primary_disk:
primary_disk_project = getattr(args, 'primary_disk_project', None)
primary_disk_ref = disks_flags.ASYNC_PRIMARY_DISK_ARG.ResolveAsResource(
args, compute_holder.resources, source_project=primary_disk_project
)
if primary_disk_ref:
return primary_disk_ref.SelfLink()
return None
def GetStoragePoolUri(self, args, compute_holder):
if args.storage_pool:
storage_pool_ref = disks_flags.STORAGE_POOL_ARG.ResolveAsResource(
args,
compute_holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
)
if storage_pool_ref:
return storage_pool_ref.SelfLink()
return None
def GetLabels(self, args, client):
labels = None
args_labels = getattr(args, 'labels', None)
if args_labels:
labels = client.messages.Disk.LabelsValue(additionalProperties=[
client.messages.Disk.LabelsValue.AdditionalProperty(
key=key, value=value)
for key, value in sorted(six.iteritems(args.labels))
])
return labels
def GetReplicaZones(self, args, compute_holder, disk_ref):
result = []
for zone in args.replica_zones:
zone_ref = compute_holder.resources.Parse(
zone,
collection='compute.zones',
params={'project': disk_ref.project})
result.append(zone_ref.SelfLink())
return result
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args, supports_kms_keys=True)
def _Run(
self,
args,
supports_kms_keys=False,
supports_physical_block=False,
support_multiwriter_disk=False,
support_vss_erase=False,
support_pd_interface=False,
support_user_licenses=False,
support_enable_confidential_compute=True,
support_source_snapshot_region=False,
support_gmi_restore=False,
):
compute_holder = self._GetApiHolder()
client = compute_holder.client
self.show_unformated_message = not (
args.IsSpecified('image')
or args.IsSpecified('image_family')
or args.IsSpecified('source_snapshot')
or args.IsSpecified('source_disk')
)
self.show_unformated_message = self.show_unformated_message and not (
args.IsSpecified('source_instant_snapshot')
)
disk_refs = self.ValidateAndParseDiskRefs(args, compute_holder)
from_image = self.GetFromImage(args)
size_gb = self.GetDiskSizeGb(args, from_image, support_gmi_restore)
self.WarnAboutScopeDeprecationsAndMaintenance(disk_refs, client)
project_to_source_image = self.GetProjectToSourceImageDict(
args, disk_refs, compute_holder, from_image
)
snapshot_uri = self.GetSnapshotUri(
args, compute_holder, support_source_snapshot_region
)
labels = self.GetLabels(args, client)
csek_keys = csek_utils.CsekKeyStore.FromArgs(args, True)
for project in project_to_source_image:
source_image_uri = project_to_source_image[project].uri
project_to_source_image[project].keys = (
csek_utils.MaybeLookupKeyMessagesByUri(
csek_keys, compute_holder.resources,
[source_image_uri, snapshot_uri], client.apitools_client))
guest_os_feature_messages = _ParseGuestOsFeaturesToMessages(
args, client.messages)
requests = []
for disk_ref in disk_refs:
type_uri = disks_util.GetDiskTypeUri(args.type, disk_ref, compute_holder)
kwargs = {}
if csek_keys:
disk_key_or_none = csek_keys.LookupKey(disk_ref,
args.require_csek_key_create)
disk_key_message_or_none = csek_utils.MaybeToMessage(
disk_key_or_none, client.apitools_client)
kwargs['diskEncryptionKey'] = disk_key_message_or_none
kwargs['sourceImageEncryptionKey'] = (
project_to_source_image[disk_ref.project].keys[0])
kwargs['sourceSnapshotEncryptionKey'] = (
project_to_source_image[disk_ref.project].keys[1])
if labels:
kwargs['labels'] = labels
if supports_kms_keys:
kwargs['diskEncryptionKey'] = kms_utils.MaybeGetKmsKey(
args, client.messages, kwargs.get('diskEncryptionKey', None))
# Those features are only exposed in alpha/beta, it would be nice to have
# code supporting them only in alpha and beta versions of the command.
# TODO(b/65161039): Stop checking release path in the middle of GA code.
if support_pd_interface and args.interface:
kwargs['interface'] = arg_utils.ChoiceToEnum(
args.interface, client.messages.Disk.InterfaceValueValuesEnum)
# end of alpha/beta features.
if args.primary_disk:
primary_disk = client.messages.DiskAsyncReplication()
primary_disk.disk = self.GetAsyncPrimaryDiskUri(args, compute_holder)
kwargs['asyncPrimaryDisk'] = primary_disk
if supports_physical_block and args.IsSpecified('physical_block_size'):
physical_block_size_bytes = int(args.physical_block_size)
else:
physical_block_size_bytes = None
resource_policies = getattr(args, 'resource_policies', None)
if resource_policies:
if disk_ref.Collection() == 'compute.regionDisks':
disk_region = disk_ref.region
else:
disk_region = utils.ZoneNameToRegionName(disk_ref.zone)
parsed_resource_policies = []
for policy in resource_policies:
resource_policy_ref = resource_util.ParseResourcePolicy(
compute_holder.resources,
policy,
project=disk_ref.project,
region=disk_region)
parsed_resource_policies.append(resource_policy_ref.SelfLink())
kwargs['resourcePolicies'] = parsed_resource_policies
disk = client.messages.Disk(
name=disk_ref.Name(),
description=args.description,
sizeGb=size_gb,
sourceSnapshot=snapshot_uri,
sourceImage=project_to_source_image[disk_ref.project].uri,
type=type_uri,
physicalBlockSizeBytes=physical_block_size_bytes,
**kwargs)
disk.sourceDisk = self.GetSourceDiskUri(args, disk_ref, compute_holder)
disk.sourceInstantSnapshot = _GetSourceInstantSnapshotUri(
args, compute_holder)
if (support_multiwriter_disk and
disk_ref.Collection() in ['compute.disks', 'compute.regionDisks'] and
args.IsSpecified('multi_writer')):
disk.multiWriter = args.multi_writer
if support_enable_confidential_compute and args.IsSpecified(
'confidential_compute'
):
disk.enableConfidentialCompute = args.confidential_compute
if guest_os_feature_messages:
disk.guestOsFeatures = guest_os_feature_messages
if support_vss_erase and args.IsSpecified('erase_windows_vss_signature'):
disk.eraseWindowsVssSignature = args.erase_windows_vss_signature
disk.licenses = self.ParseLicenses(args)
if args.IsSpecified('provisioned_iops'):
if type_uri and disks_util.IsProvisioningTypeIops(type_uri):
disk.provisionedIops = args.provisioned_iops
else:
raise exceptions.InvalidArgumentException(
'--provisioned-iops',
'--provisioned-iops cannot be used with the given disk type.')
if args.IsSpecified(
'provisioned_throughput'):
if type_uri and disks_util.IsProvisioningTypeThroughput(type_uri):
disk.provisionedThroughput = args.provisioned_throughput
else:
raise exceptions.InvalidArgumentException(
'--provisioned-throughput',
'--provisioned-throughput cannot be used with the given disk '
'type.')
if args.IsSpecified('architecture'):
disk.architecture = disk.ArchitectureValueValuesEnum(args.architecture)
if args.IsSpecified('access_mode'):
disk.accessMode = disk.AccessModeValueValuesEnum(args.access_mode)
if support_user_licenses and args.IsSpecified('user_licenses'):
disk.userLicenses = args.user_licenses
if args.IsSpecified('location_hint'):
disk.locationHint = args.location_hint
if args.IsSpecified('storage_pool'):
disk.storagePool = self.GetStoragePoolUri(args, compute_holder)
if support_gmi_restore:
_SetSourceMachineImageOptions(args, disk)
if disk_ref.Collection() == 'compute.disks':
request = client.messages.ComputeDisksInsertRequest(
disk=disk, project=disk_ref.project, zone=disk_ref.zone)
request = (client.apitools_client.disks, 'Insert', request)
elif disk_ref.Collection() == 'compute.regionDisks':
if args.IsSpecified('replica_zones'):
disk.replicaZones = self.GetReplicaZones(
args, compute_holder, disk_ref
)
request = client.messages.ComputeRegionDisksInsertRequest(
disk=disk, project=disk_ref.project, region=disk_ref.region)
request = (client.apitools_client.regionDisks, 'Insert', request)
requests.append(request)
return client.MakeRequests(requests)
def Epilog(self, resources_were_displayed=True):
message = """\
New disks are unformatted. You must format and mount a disk before it
can be used. You can find instructions on how to do this at:
https://cloud.google.com/compute/docs/disks/add-persistent-disk#formatting
"""
if self.show_unformated_message:
log.status.Print(textwrap.dedent(message))
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Create Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
Create.disks_arg = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(
messages,
parser,
include_physical_block_size_support=True,
vss_erase_enabled=True,
support_pd_interface=True,
support_source_snapshot_region=True,
source_instant_snapshot_enabled=False,
)
image_utils.AddGuestOsFeaturesArg(parser, messages)
_AddReplicaZonesArg(parser)
kms_resource_args.AddKmsKeyResourceArg(
parser, 'disk', region_fallthrough=True)
disks_flags.AddMultiWriterFlag(parser)
disks_flags.AddEnableConfidentialComputeFlag(parser)
def Run(self, args):
return self._Run(
args,
supports_kms_keys=True,
supports_physical_block=True,
support_vss_erase=True,
support_multiwriter_disk=True,
support_pd_interface=True,
support_enable_confidential_compute=True,
support_source_snapshot_region=True,
)
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(CreateBeta):
"""Create Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
Create.disks_arg = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(
messages,
parser,
include_physical_block_size_support=True,
vss_erase_enabled=True,
support_pd_interface=True,
support_user_licenses=True,
support_source_snapshot_region=True,
support_gmi_restore=True,
source_instant_snapshot_enabled=True
)
image_utils.AddGuestOsFeaturesArg(parser, messages)
_AddReplicaZonesArg(parser)
kms_resource_args.AddKmsKeyResourceArg(
parser, 'disk', region_fallthrough=True)
disks_flags.AddMultiWriterFlag(parser)
disks_flags.AddEnableConfidentialComputeFlag(parser)
def Run(self, args):
return self._Run(
args,
supports_kms_keys=True,
supports_physical_block=True,
support_multiwriter_disk=True,
support_vss_erase=True,
support_pd_interface=True,
support_user_licenses=True,
support_enable_confidential_compute=True,
support_source_snapshot_region=True,
support_gmi_restore=True,
)
def _ValidateAndParseDiskRefsRegionalReplica(
args, compute_holder
):
"""Validate flags and parse disks references.
Subclasses may override it to customize parsing.
Args:
args: The argument namespace
compute_holder: base_classes.ComputeApiHolder instance
Returns:
List of compute.regionDisks resources.
"""
if (
not args.IsSpecified('replica_zones')
and args.IsSpecified('region')
and not (args.IsSpecified('source_instant_snapshot'))
):
raise exceptions.RequiredArgumentException(
'--replica-zones',
'--replica-zones is required for regional disk creation')
if args.replica_zones is not None:
return create.ParseRegionDisksResources(compute_holder.resources,
args.DISK_NAME, args.replica_zones,
args.project, args.region)
disk_refs = Create.disks_arg.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(compute_holder.client))
# --replica-zones is required for regional disks unless a source instant
# snapshot is specified - also when region is selected in prompt.
for disk_ref in disk_refs:
if (
disk_ref.Collection() == 'compute.regionDisks'
and not args.IsSpecified('source_instant_snapshot')
):
raise exceptions.RequiredArgumentException(
'--replica-zones',
'--replica-zones is required for regional disk creation [{}]'.format(
disk_ref.SelfLink()))
return disk_refs
def _SetSourceMachineImageOptions(args, disk):
"""Sets source machine image options on the disk.
Args:
args: The arguments namespace.
disk: The disk message.
Raises:
exceptions.RequiredArgumentException: If only one of the source machine
image arguments is specified.
"""
has_source_machine_image = args.IsSpecified('source_machine_image')
has_disk_device_name = args.IsSpecified(
'source_machine_image_disk_device_name'
)
if has_source_machine_image ^ has_disk_device_name:
missing_option = (
'--source-machine-image-disk-device-name'
if has_source_machine_image
else '--source-machine-image'
)
provided_option = (
'--source-machine-image'
if has_source_machine_image
else '--source-machine-image-disk-device-name'
)
raise exceptions.RequiredArgumentException(
missing_option,
f'{missing_option} must be specified when {provided_option} is'
' specified.',
)
elif has_source_machine_image and has_disk_device_name:
disk.sourceMachineImageDiskDeviceName = (
args.source_machine_image_disk_device_name
)
disk.sourceMachineImage = args.source_machine_image
Create.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,114 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
@base.ReleaseTracks(
base.ReleaseTrack.GA, base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class Delete(base.DeleteCommand):
"""Delete Compute Engine persistent disks.
*{command}* deletes one or more Compute Engine
persistent disks. Disks can be deleted only if they are not
being used by any virtual machine instances.
"""
@staticmethod
def Args(parser):
Delete.disks_arg = disks_flags.MakeDiskArg(plural=True)
Delete.disks_arg.AddArgument(parser, operation_type='delete')
parser.display_info.AddCacheUpdater(completers.DisksCompleter)
def _GetCommonScopeNameForRefs(self, refs):
"""Gets common scope for references."""
has_zone = any(hasattr(ref, 'zone') for ref in refs)
has_region = any(hasattr(ref, 'region') for ref in refs)
if has_zone and not has_region:
return 'zone'
elif has_region and not has_zone:
return 'region'
else:
return None
def _CreateDeleteRequests(self, client, disk_refs):
"""Returns a list of delete messages for disks."""
messages = client.MESSAGES_MODULE
requests = []
for disk_ref in disk_refs:
if disk_ref.Collection() == 'compute.disks':
service = client.disks
request = messages.ComputeDisksDeleteRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone)
elif disk_ref.Collection() == 'compute.regionDisks':
service = client.regionDisks
request = messages.ComputeRegionDisksDeleteRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region)
else:
raise ValueError('Unknown reference type {0}'.
format(disk_ref.Collection()))
requests.append((service, 'Delete', request))
return requests
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
disk_refs = Delete.disks_arg.ResolveAsResource(
args, holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=flags.GetDefaultScopeLister(holder.client))
scope_name = self._GetCommonScopeNameForRefs(disk_refs)
utils.PromptForDeletion(
disk_refs, scope_name=scope_name, prompt_title=None)
requests = list(self._CreateDeleteRequests(
holder.client.apitools_client, disk_refs))
return holder.client.MakeRequests(requests)
Delete.detailed_help = {
'brief': 'Delete a Compute Engine disk',
'DESCRIPTION':
"""\
*{command}* deletes a Compute Engine disk. A disk can be
deleted only if it is not attached to any virtual machine instances.
""",
'EXAMPLES':
"""\
To delete the disk 'my-disk' in zone 'us-east1-a', run:
$ {command} my-disk --zone=us-east1-a
""",
}

View File

@@ -0,0 +1,93 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
def _CommonArgs(parser):
Describe.disk_arg.AddArgument(parser, operation_type='describe')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe a Compute Engine disk."""
@staticmethod
def Args(parser):
Describe.disk_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
def Collection(self):
return 'compute.disks'
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = Describe.disk_arg.ResolveAsResource(args, holder.resources)
if disk_ref.Collection() == 'compute.disks':
service = client.disks
request_type = messages.ComputeDisksGetRequest
elif disk_ref.Collection() == 'compute.regionDisks':
service = client.regionDisks
request_type = messages.ComputeRegionDisksGetRequest
return service.Get(request_type(**disk_ref.AsDict()))
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class DescribeBeta(Describe):
"""Describe a Compute Engine disk."""
@staticmethod
def Args(parser):
Describe.disk_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class DescribeAlpha(Describe):
"""Describe a Compute Engine disk."""
@staticmethod
def Args(parser):
Describe.disk_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
Describe.detailed_help = {
'brief': 'Describe a Compute Engine disk',
'DESCRIPTION':
"""\
*{command}* displays all data associated with a Compute Engine
disk in a project.
""",
'EXAMPLES':
"""\
To describe the disk 'my-disk' in zone 'us-east1-a', run:
$ {command} my-disk --zone=us-east1-a
""",
}

View File

@@ -0,0 +1,112 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for retrieving the status of asynchronous replication for a disk-pair."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
DETAILED_HELP = {
'brief': (
'Retrieves the status of asynchronous replication for a Compute'
' Engine persistent disk-pair'
),
'DESCRIPTION': """\
*{command}* fetches the current status of async replication on a Compute
Engine persistent disk-pair. This command can be invoked on either the
primary disk or the secondary-disk but the scope respective to the disk
must be provided.
""",
'EXAMPLES': """\
Replication status can be fetched by using either the primary or the
secondary disk. To get the current replication status of the disk-pair
with the primary disk 'primary-disk-1' in zone 'us-east1-a', and project
'my-project1' and the secondary disk 'secondary-disk-1' in zone
'us-west1-a', and the project 'my-project2', the following commands can
be used:
$ {command} primary-disk-1 --zone=us-east1-a --project=my-project1
or
$ {command} secondary-disk-1 --zone=us-west1-a --project=my-project2
""",
}
def _CommonArgs(parser):
"""Add arguments used for parsing in all command tracks."""
GetAsyncReplicationStatus.disks_arg.AddArgument(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
@base.DefaultUniverseOnly
class GetAsyncReplicationStatus(base.Command):
"""Get Async Replication Status for Compute Engine persistent disk-pairs in an asynchronous replication ."""
@classmethod
def Args(cls, parser):
GetAsyncReplicationStatus.disks_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
compute_holder = self._GetApiHolder()
client = compute_holder.client
disk_ref = GetAsyncReplicationStatus.disks_arg.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(client),
)
request = None
if disk_ref.Collection() == 'compute.disks':
request = client.messages.ComputeDisksGetAsyncReplicationStatusRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
)
request = (
client.apitools_client.disks,
'GetAsyncReplicationStatus',
request,
)
elif disk_ref.Collection() == 'compute.regionDisks':
request = (
client.messages.ComputeRegionDisksGetAsyncReplicationStatusRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region,
)
)
request = (
client.apitools_client.regionDisks,
'GetAsyncReplicationStatus',
request,
)
return client.MakeRequests([request])
GetAsyncReplicationStatus.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,31 @@
release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Get the IAM policy for a Compute Engine disk.
description: |
*{command}* displays the IAM policy associated with a
Compute Engine disk in a project. If formatted as JSON,
the output can be edited and used as a policy file for
set-iam-policy. The output includes an "etag" field
identifying the version emitted and allowing detection of
concurrent policy updates; see
$ {parent} set-iam-policy for additional details.
examples: |
To print the IAM policy for a given disk, run:
$ {command} my-disk --zone=my-zone
request:
collection: compute.disks
use_relative_name: false
api_version: v1
modify_request_hooks:
- googlecloudsdk.command_lib.iam.hooks:UseMaxRequestedPolicyVersion:api_field=optionsRequestedPolicyVersion
BETA:
api_version: beta
ALPHA:
api_version: alpha
arguments:
resource:
help_text: The disk to display the IAM policy for.
spec: !REF googlecloudsdk.command_lib.compute.resources:disk

View File

@@ -0,0 +1,61 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing persistent disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute.disks import flags
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class List(base.ListCommand):
"""List Compute Engine persistent disks."""
@staticmethod
def Args(parser):
parser.display_info.AddFormat(flags.MULTISCOPE_LIST_FORMAT)
parser.display_info.AddUriFunc(utils.MakeGetUriFunc())
lister.AddMultiScopeListerFlags(parser, zonal=True, regional=True)
parser.display_info.AddCacheUpdater(completers.DisksCompleter)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
request_data = lister.ParseMultiScopeFlags(args, holder.resources)
list_implementation = lister.MultiScopeLister(
client,
zonal_service=client.apitools_client.disks,
regional_service=client.apitools_client.regionDisks,
aggregation_service=client.apitools_client.disks)
return lister.Invoke(request_data, list_implementation)
List.detailed_help = base_classes.GetMultiScopeListerHelp(
'disks',
scopes=[
base_classes.ScopeType.zonal_scope,
base_classes.ScopeType.regional_scope
])

View File

@@ -0,0 +1,123 @@
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for moving disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class Move(base.SilentCommand):
"""Move a disk between zones."""
@staticmethod
def Args(parser):
Move.disk_arg = disks_flags.MakeDiskArgZonal(plural=False)
Move.disk_arg.AddArgument(parser)
parser.add_argument(
'--destination-zone',
help='The zone to move the disk to.',
completer=completers.ZonesCompleter,
required=True)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""Returns a request for moving a disk."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
target_disk = Move.disk_arg.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
destination_zone = holder.resources.Parse(
args.destination_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='compute.zones')
client = holder.client.apitools_client
messages = holder.client.messages
request = messages.ComputeProjectsMoveDiskRequest(
diskMoveRequest=messages.DiskMoveRequest(
destinationZone=destination_zone.SelfLink(),
targetDisk=target_disk.SelfLink(),
),
project=target_disk.project,
)
result = client.projects.MoveDisk(request)
operation_ref = resources.REGISTRY.Parse(
result.name,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='compute.globalOperations')
if args.async_:
log.UpdatedResource(
operation_ref,
kind='disk {0}'.format(target_disk.Name()),
is_async=True,
details='Run the [gcloud compute operations describe] command '
'to check the status of this operation.'
)
return result
destination_disk_ref = holder.resources.Parse(
target_disk.Name(),
params={
'project': destination_zone.project,
'zone': destination_zone.Name()
},
collection='compute.disks')
operation_poller = poller.Poller(client.disks, destination_disk_ref)
return waiter.WaitFor(
operation_poller, operation_ref,
'Moving disk {0}'.format(target_disk.Name()))
Move.detailed_help = {
'brief': 'Move a disk between zones',
'DESCRIPTION': (
'*{command}* facilitates moving a Compute Engine disk volume '
'from one zone to another. You cannot move a disk if it is attached to '
'a running or stopped instance; use the gcloud compute instances move '
'command instead.\n\n'
'The `gcloud compute disks move` command does not support regional '
'persistent disks. See '
'https://cloud.google.com/compute/docs/disks/regional-persistent-disk '
'for more details.\n'),
'EXAMPLES': (
'To move the disk called example-disk-1 from us-central1-b to '
'us-central1-f, run:\n\n'
' $ {command} example-disk-1 --zone=us-central1-b '
'--destination-zone=us-central1-f\n')
}

View File

@@ -0,0 +1,44 @@
release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Remove IAM policy binding from a Compute Engine disk.
description: |
Remove an IAM policy binding from the IAM policy of a Compute Engine disk. One binding consists of a member,
a role, and an optional condition.
examples: |
To remove an IAM policy binding for the role of 'roles/compute.securityAdmin' for the user 'test-user@gmail.com'
with disk 'my-disk' and zone 'ZONE', run:
$ {command} my-disk --zone=ZONE --member='user:test-user@gmail.com' --role='roles/compute.securityAdmin'
To remove an IAM policy binding which expires at the end of the year 2018 for the role of
'roles/compute.securityAdmin' and the user 'test-user@gmail.com' with disk 'my-disk' and zone 'ZONE', run:
$ {command} my-disk --zone=ZONE --member='user:test-user@gmail.com' --role='roles/compute.securityAdmin' --condition='expression=request.time < timestamp("2019-01-01T00:00:00Z"),title=expires_end_of_2018,description=Expires at midnight on 2018-12-31'
See https://cloud.google.com/iam/docs/managing-policies for details of
policy role and member types.
request:
collection: compute.disks
use_relative_name: false
api_version: v1
BETA:
api_version: beta
ALPHA:
api_version: alpha
arguments:
resource:
help_text: The disk for which to remove IAM policy binding from.
spec: !REF googlecloudsdk.command_lib.compute.resources:disk
iam:
set_iam_policy_request_path: zoneSetPolicyRequest
message_type_overrides:
policy: Policy
set_iam_policy_request: ComputeDisksSetIamPolicyRequest
enable_condition: true
policy_version: 3
get_iam_policy_version_path: optionsRequestedPolicyVersion

View File

@@ -0,0 +1,84 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for removing labels from disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util as api_util
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import labels_doc_helper
from googlecloudsdk.command_lib.compute import labels_flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.util.args import labels_util
class RemoveLabels(base.UpdateCommand):
"""remove-labels command for disks."""
DISK_ARG = None
@classmethod
def Args(cls, parser):
cls.DISK_ARG = disks_flags.MakeDiskArg(plural=False)
cls.DISK_ARG.AddArgument(parser)
labels_flags.AddArgsForRemoveLabels(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = self.DISK_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
remove_labels = labels_util.GetUpdateLabelsDictFromArgs(args)
disk_info = api_util.GetDiskInfo(disk_ref, client, messages)
disk = disk_info.GetDiskResource()
if args.all:
# removing all existing labels from the disk.
remove_labels = {}
if disk.labels:
for label in disk.labels.additionalProperties:
remove_labels[label.key] = label.value
labels_diff = labels_util.Diff(subtractions=remove_labels)
set_label_req = disk_info.GetSetLabelsRequestMessage()
labels_update = labels_diff.Apply(set_label_req.LabelsValue, disk.labels)
request = disk_info.GetSetDiskLabelsRequestMessage(
disk, labels_update.GetOrNone())
if not labels_update.needs_update:
return disk
service = disk_info.GetService()
operation = service.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection=disk_info.GetOperationCollection())
operation_poller = poller.Poller(service)
return waiter.WaitFor(
operation_poller, operation_ref,
'Updating labels of disk [{0}]'.format(
disk_ref.Name()))
RemoveLabels.detailed_help = (
labels_doc_helper.GenerateDetailedHelpForRemoveLabels('disk'))

View File

@@ -0,0 +1,77 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for removing resource policies from a disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class DisksRemoveResourcePolicies(base.UpdateCommand):
"""Remove resource policies from a Compute Engine disk."""
@staticmethod
def Args(parser):
disks_flags.MakeDiskArg(plural=False).AddArgument(
parser, operation_type='remove resource policies from')
flags.AddResourcePoliciesArgs(
parser, 'removed from', 'disk', required=True)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = disks_flags.MakeDiskArg(
plural=False).ResolveAsResource(args, holder.resources)
disk_info = api_util.GetDiskInfo(disk_ref, client, messages)
disk_region = disk_info.GetDiskRegionName()
resource_policies = []
for policy in args.resource_policies:
resource_policy_ref = util.ParseResourcePolicy(
holder.resources,
policy,
project=disk_ref.project,
region=disk_region)
resource_policies.append(resource_policy_ref.SelfLink())
return disk_info.MakeRemoveResourcePoliciesRequest(resource_policies,
holder.client)
DisksRemoveResourcePolicies.detailed_help = {
'DESCRIPTION':
"""\
Remove resource policies from a Compute Engine disk.
*{command}* removes resource policies from a Compute Engine disk.
""",
'EXAMPLES':
"""\
The following command removes one resource policy from a Compute Engine disk.
$ {command} my-disk --zone=ZONE --resource-policies=POLICY
"""
}

View File

@@ -0,0 +1,135 @@
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting size of instance group manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.core.console import console_io
CONTINUE_WITH_RESIZE_PROMPT = textwrap.dedent("""
This command increases disk size. This change is not reversible.
For more information, see:
https://cloud.google.com/sdk/gcloud/reference/compute/disks/resize""")
def _CommonArgs(parser):
Resize.DISKS_ARG.AddArgument(parser)
parser.add_argument(
'--size',
required=True,
type=arg_parsers.BinarySize(lower_bound='1GB'),
help="""\
Indicates the new size of the disks. The value must be a whole
number followed by a size unit of ``GB'' for gigabyte, or
``TB'' for terabyte. If no size unit is specified, GB is
assumed. For example, ``10GB'' will produce 10 gigabyte disks.
Disk size must be a multiple of 1 GB.
""")
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Resize(base.Command):
"""Set size of a persistent disk."""
@classmethod
def Args(cls, parser):
Resize.DISKS_ARG = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(parser)
def Run(self, args):
"""Issues request for resizing a disk."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
size_gb = utils.BytesToGb(args.size)
disk_refs = Resize.DISKS_ARG.ResolveAsResource(
args, holder.resources)
console_io.PromptContinue(
message=CONTINUE_WITH_RESIZE_PROMPT,
cancel_on_no=True)
requests = []
for disk_ref in disk_refs:
if disk_ref.Collection() == 'compute.disks':
request = client.messages.ComputeDisksResizeRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
disksResizeRequest=client.messages.DisksResizeRequest(
sizeGb=size_gb))
requests.append((client.apitools_client.disks, 'Resize', request))
elif disk_ref.Collection() == 'compute.regionDisks':
request = client.messages.ComputeRegionDisksResizeRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region,
regionDisksResizeRequest=client.messages.RegionDisksResizeRequest(
sizeGb=size_gb))
requests.append((client.apitools_client.regionDisks, 'Resize', request))
return client.MakeRequests(requests)
Resize.detailed_help = {
'brief': 'Resize a disk or disks',
'DESCRIPTION': """\
*{command}* resizes a Compute Engine disk(s).
Only increasing disk size is supported. Disks can be resized
regardless of whether they are attached.
""",
'EXAMPLES': """\
To resize a disk called example-disk-1 to new size 6TB, run:
$ {command} example-disk-1 --size=6TB
To resize two disks called example-disk-2 and example-disk-3 to
new size 6TB, run:
$ {command} example-disk-2 example-disk-3 --size=6TB
This assumes that original size of each of these disks is 6TB or less.
"""}
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ResizeBeta(Resize):
@classmethod
def Args(cls, parser):
Resize.DISKS_ARG = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ResizeAlpha(Resize):
@classmethod
def Args(cls, parser):
Resize.DISKS_ARG = disks_flags.MakeDiskArg(plural=True)
_CommonArgs(parser)
ResizeAlpha.detailed_help = Resize.detailed_help

View File

@@ -0,0 +1,37 @@
release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Set the IAM policy for a Compute Engine disk.
description: |
Sets the IAM policy for the given disk as defined in a JSON or YAML file.
examples: |
The following command will read am IAM policy defined in a JSON file
'policy.json' and set it for the disk `my-disk`:
$ {command} my-disk --zone=ZONE policy.json
See https://cloud.google.com/iam/docs/managing-policies for details of the
policy file format and contents.
request:
collection: compute.disks
use_relative_name: false
modify_request_hooks:
- googlecloudsdk.command_lib.iam.hooks:UseMaxRequestedPolicyVersion:api_field=zoneSetPolicyRequest.policy.version
api_version: v1
BETA:
api_version: beta
ALPHA:
api_version: alpha
arguments:
resource:
help_text: The disk to set the IAM policy for.
spec: !REF googlecloudsdk.command_lib.compute.resources:disk
iam:
set_iam_policy_request_path: zoneSetPolicyRequest
message_type_overrides:
policy: Policy
set_iam_policy_request: ComputeDisksSetIamPolicyRequest

View File

@@ -0,0 +1,242 @@
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for snapshotting disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import csek_utils
from googlecloudsdk.api_lib.compute import name_generator
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.compute.snapshots import flags as snap_flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from six.moves import zip
DETAILED_HELP = {
'brief': 'Create snapshots of Compute Engine persistent disks.',
'DESCRIPTION': """
*{command}* creates snapshots of persistent disks. Snapshots are useful
for backing up data, copying a persistent disk, and even, creating a
custom image. Snapshots can be created from persistent disks even while
they are attached to running instances. Once created, snapshots may be
managed (listed, deleted, etc.) via `gcloud compute snapshots`.
Refer to the Snapshot best practices guide. https://cloud.google.com/compute/docs/disks/snapshot-best-practices
{command} waits until the operation returns a status of `READY` or
`FAILED`, or reaches the maximum timeout, and returns the last known
details of the snapshot.
Note: To create snapshots, the following IAM permissions are necessary
``compute.disks.createSnapshot'', ``compute.snapshots.create'',
``compute.snapshots.get'', and ``compute.zoneOperations.get''.
""",
'EXAMPLES': """
To create a snapshot named `snapshot-test` of a persistent disk named `test`
in zone `us-central1-a`, run:
$ {command} test --zone=us-central1-a --snapshot-names=snapshot-test --description="This is an example snapshot"
"""
}
def _CommonArgs(parser):
"""Add parser arguments common to all tracks."""
SnapshotDisks.disks_arg.AddArgument(parser)
parser.add_argument(
'--description',
help=('Text to describe the snapshots being created.'))
parser.add_argument(
'--snapshot-names',
type=arg_parsers.ArgList(min_length=1),
metavar='SNAPSHOT_NAME',
help="""\
Names to assign to the created snapshots. Without this option, the
name of each snapshot will be a random 12-character alphanumeric
string that starts with a letter. The values of
this option run parallel to the disks specified. For example,
{command} my-disk-1 my-disk-2 my-disk-3 --snapshot-names snapshot-1,snapshot-2,snapshot-3
will result in `my-disk-1` being snapshotted as
`snapshot-1`, `my-disk-2` as `snapshot-2`, and so on. The name must match
the `(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)` regular expression, which
means it must start with an alphabetic character followed by one or more
alphanumeric characters or dashes. The name must not exceed 63 characters
and must not contain special symbols. All characters must be lowercase.
""")
snap_flags.AddChainArg(parser)
flags.AddGuestFlushFlag(parser, 'snapshot')
flags.AddStorageLocationFlag(parser, 'snapshot')
csek_utils.AddCsekKeyArgs(parser, flags_about_creation=False)
base.ASYNC_FLAG.AddToParser(parser)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SnapshotDisks(base.SilentCommand):
"""Create snapshots of Google Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
SnapshotDisks.disks_arg = disks_flags.MakeDiskArg(plural=True)
labels_util.AddCreateLabelsFlags(parser)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
"""Returns a list of requests necessary for snapshotting disks."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
disk_refs = SnapshotDisks.disks_arg.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
if args.snapshot_names:
if len(disk_refs) != len(args.snapshot_names):
raise exceptions.InvalidArgumentException(
'--snapshot-names',
'[--snapshot-names] must have the same number of values as disks '
'being snapshotted.')
snapshot_names = args.snapshot_names
else:
# Generates names like "d52jsqy3db4q".
snapshot_names = [name_generator.GenerateRandomName()
for _ in disk_refs]
snapshot_refs = [
holder.resources.Parse(
snapshot_name,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='compute.snapshots')
for snapshot_name in snapshot_names]
client = holder.client.apitools_client
messages = holder.client.messages
requests = []
for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs):
csek_keys = csek_utils.CsekKeyStore.FromArgs(args, True)
snapshot_key_or_none = csek_utils.MaybeLookupKeyMessage(
csek_keys, snapshot_ref, client)
disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
csek_keys, disk_ref, client)
snapshot_message = messages.Snapshot(
name=snapshot_ref.Name(),
description=args.description,
snapshotEncryptionKey=snapshot_key_or_none,
sourceDiskEncryptionKey=disk_key_or_none,
chainName=args.chain_name)
if (hasattr(args, 'storage_location') and
args.IsSpecified('storage_location')):
snapshot_message.storageLocations = [args.storage_location]
if (hasattr(args, 'labels') and args.IsSpecified('labels')):
snapshot_message.labels = labels_util.ParseCreateArgs(
args, messages.Snapshot.LabelsValue)
if disk_ref.Collection() == 'compute.disks':
request = messages.ComputeDisksCreateSnapshotRequest(
disk=disk_ref.Name(),
snapshot=snapshot_message,
project=disk_ref.project,
zone=disk_ref.zone,
guestFlush=args.guest_flush)
requests.append((client.disks, 'CreateSnapshot', request))
elif disk_ref.Collection() == 'compute.regionDisks':
request = messages.ComputeRegionDisksCreateSnapshotRequest(
disk=disk_ref.Name(),
snapshot=snapshot_message,
project=disk_ref.project,
region=disk_ref.region)
if hasattr(request, 'guestFlush'): # only available in alpha API
guest_flush = getattr(args, 'guest_flush', None)
if guest_flush is not None:
request.guestFlush = guest_flush
requests.append((client.regionDisks, 'CreateSnapshot', request))
errors_to_collect = []
responses = holder.client.AsyncRequests(requests, errors_to_collect)
for r in responses:
err = getattr(r, 'error', None)
if err:
errors_to_collect.append(poller.OperationErrors(err.errors))
if errors_to_collect:
raise core_exceptions.MultiError(errors_to_collect)
operation_refs = [holder.resources.Parse(r.selfLink) for r in responses]
if args.async_:
for operation_ref in operation_refs:
log.status.Print('Disk snapshot in progress for [{}].'
.format(operation_ref.SelfLink()))
log.status.Print('Use [gcloud compute operations describe URI] command '
'to check the status of the operation(s).')
return responses
operation_poller = poller.BatchPoller(
holder.client, client.snapshots, snapshot_refs)
return waiter.WaitFor(
operation_poller, poller.OperationBatch(operation_refs),
'Creating snapshot(s) {0}'
.format(', '.join(s.Name() for s in snapshot_refs)),
max_wait_ms=None
)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SnapshotDisksBeta(SnapshotDisks):
"""Create snapshots of Google Compute Engine persistent disks beta."""
@classmethod
def Args(cls, parser):
SnapshotDisks.disks_arg = disks_flags.MakeDiskArg(plural=True)
labels_util.AddCreateLabelsFlags(parser)
_CommonArgs(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SnapshotDisksAlpha(SnapshotDisksBeta):
"""Create snapshots of Google Compute Engine persistent disks alpha."""
@classmethod
def Args(cls, parser):
SnapshotDisks.disks_arg = disks_flags.MakeDiskArg(plural=True)
labels_util.AddCreateLabelsFlags(parser)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
SnapshotDisks.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,153 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for starting async replication on disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
DETAILED_HELP = {
'brief':
'Start asynchronous replication on a Compute Engine persistent disk',
'DESCRIPTION':
"""\
*{command}* starts async replication on a Compute Engine persistent
disk. This command must be invoked on the primary disk and
`--secondary-disk` must be provided.
""",
'EXAMPLES':
"""\
Start replication from the primary disk 'my-disk-1' in zone us-east1-a
to the secondary disk 'my-disk-2' in zone us-west1-a:
$ {command} my-disk-1 --zone=us-east1-a --secondary-disk=my-disk-2 --secondary-disk-zone=us-west1-a
""",
}
def _CommonArgs(parser):
"""Add arguments used for parsing in all command tracks."""
StartAsyncReplication.disks_arg.AddArgument(parser, scope_required=True)
secondary_disk_category = 'SECONDARY DISK'
StartAsyncReplication.secondary_disk_arg.AddArgument(
parser, category=secondary_disk_category, scope_required=True
)
disks_flags.AddSecondaryDiskProject(parser, secondary_disk_category)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class StartAsyncReplication(base.Command):
"""Start Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StartAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
StartAsyncReplication.secondary_disk_arg = disks_flags.MakeSecondaryDiskArg(
required=True)
_CommonArgs(parser)
def GetAsyncSecondaryDiskUri(self, args, compute_holder):
secondary_disk_ref = None
if args.secondary_disk:
secondary_disk_project = getattr(args, 'secondary_disk_project', None)
secondary_disk_ref = self.secondary_disk_arg.ResolveAsResource(
args, compute_holder.resources, source_project=secondary_disk_project
)
if secondary_disk_ref:
return secondary_disk_ref.SelfLink()
return None
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
compute_holder = self._GetApiHolder()
client = compute_holder.client
disk_ref = StartAsyncReplication.disks_arg.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(client))
request = None
secondary_disk_uri = self.GetAsyncSecondaryDiskUri(args, compute_holder)
if disk_ref.Collection() == 'compute.disks':
request = client.messages.ComputeDisksStartAsyncReplicationRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
disksStartAsyncReplicationRequest=client.messages
.DisksStartAsyncReplicationRequest(
asyncSecondaryDisk=secondary_disk_uri))
request = (client.apitools_client.disks, 'StartAsyncReplication', request)
elif disk_ref.Collection() == 'compute.regionDisks':
request = client.messages.ComputeRegionDisksStartAsyncReplicationRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region,
regionDisksStartAsyncReplicationRequest=client.messages
.RegionDisksStartAsyncReplicationRequest(
asyncSecondaryDisk=secondary_disk_uri))
request = (client.apitools_client.regionDisks, 'StartAsyncReplication',
request)
return client.MakeRequests([request])
StartAsyncReplication.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class StartAsyncReplicationBeta(StartAsyncReplication):
"""Start Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StartAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
StartAsyncReplication.secondary_disk_arg = disks_flags.MakeSecondaryDiskArg(
required=True)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StartAsyncReplicationBeta.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class StartAsyncReplicationAlpha(StartAsyncReplication):
"""Start Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StartAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
StartAsyncReplication.secondary_disk_arg = disks_flags.MakeSecondaryDiskArg(
required=True)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StartAsyncReplicationAlpha.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,127 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for stopping async replication on disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
DETAILED_HELP = {
'brief': 'Stop async replication on a Compute Engine persistent disk',
'DESCRIPTION': """\
*{command}* stops async replication on a Compute Engine persistent
disk. This command can be invoked either on the primary or on the
secondary disk.
""",
'EXAMPLES': """\
Stop replication on the primary disk 'my-disk-1' in zone us-east1-a:
$ {command} my-disk-1 --zone=us-east1-a
Stop replication on the secondary disk 'my-disk-2' in zone us-west1-a:
$ {command} my-disk-2 --zone=us-west1-a
""",
}
def _CommonArgs(parser):
"""Add arguments used for parsing in all command tracks."""
StopAsyncReplication.disks_arg.AddArgument(
parser, operation_type='stop async replication')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class StopAsyncReplication(base.Command):
"""Stop Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StopAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
compute_holder = self._GetApiHolder()
client = compute_holder.client
disk_ref = StopAsyncReplication.disks_arg.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(client))
request = None
if disk_ref.Collection() == 'compute.disks':
request = client.messages.ComputeDisksStopAsyncReplicationRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
)
request = (client.apitools_client.disks, 'StopAsyncReplication', request)
elif disk_ref.Collection() == 'compute.regionDisks':
request = client.messages.ComputeRegionDisksStopAsyncReplicationRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region,
)
request = (client.apitools_client.regionDisks, 'StopAsyncReplication',
request)
return client.MakeRequests([request])
StopAsyncReplication.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class StopAsyncReplicationBeta(StopAsyncReplication):
"""Stop Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StopAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StopAsyncReplicationBeta.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class StopAsyncReplicationAlpha(StopAsyncReplication):
"""Stop Async Replication on Compute Engine persistent disks."""
@classmethod
def Args(cls, parser):
StopAsyncReplication.disks_arg = disks_flags.MakeDiskArg(plural=False)
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StopAsyncReplicationAlpha.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,130 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for stopping group async replication on disks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.core import properties
DETAILED_HELP = {
'brief':
'Consistently stops a group of asynchronously replicating disks',
'DESCRIPTION':
"""\
*{command}* consistently stops a group of asynchronously replicating
disks. This command can be invoked in either in the primary or secondary
scope of the replicating disks.
""",
'EXAMPLES':
"""\
To stop group replication in the primary scope, include the zone or
region of the primary disks. The URL of the disk consistency group
resource policy always uses the region of the primary disks:
$ {command} projects/my-project/regions/us-west1/resourcePolicies/my-policy --zone=us-west1-a
Alternatively, you can stop replication in the secondary scope. Include
the region or zone of the secondary disks. The URL of the disk
consistency group resource policy always uses the region of the primary
disks:
$ {command} projects/my-project/regions/us-west1/resourcePolicies/my-policy --zone=us-west2-a
""",
}
def _CommonArgs(parser):
"""Add arguments used for parsing in all command tracks."""
disks_flags.AddStopGroupAsyncReplicationArgs(parser)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class StopGroupAsyncReplication(base.Command):
"""Stop Group Async Replication for a Consistency Group Resource Policy."""
@classmethod
def Args(cls, parser):
_CommonArgs(parser)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
compute_holder = self._GetApiHolder()
client = compute_holder.client
policy_url = getattr(args, 'DISK_CONSISTENCY_GROUP_POLICY', None)
project = properties.VALUES.core.project.GetOrFail()
if args.IsSpecified('zone'):
request = client.messages.ComputeDisksStopGroupAsyncReplicationRequest(
project=project,
zone=args.zone,
disksStopGroupAsyncReplicationResource=client.messages
.DisksStopGroupAsyncReplicationResource(
resourcePolicy=policy_url))
request = (client.apitools_client.disks, 'StopGroupAsyncReplication',
request)
else:
request = client.messages.ComputeRegionDisksStopGroupAsyncReplicationRequest(
project=project,
region=args.region,
disksStopGroupAsyncReplicationResource=client.messages
.DisksStopGroupAsyncReplicationResource(
resourcePolicy=policy_url))
request = (client.apitools_client.regionDisks,
'StopGroupAsyncReplication', request)
return client.MakeRequests([request], no_followup=True)
StopGroupAsyncReplication.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class StopGroupAsyncReplicationBeta(StopGroupAsyncReplication):
"""Stop Group Async Replication for a Consistency Group Resource Policy."""
@classmethod
def Args(cls, parser):
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StopGroupAsyncReplicationBeta.detailed_help = DETAILED_HELP
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class StopGroupAsyncReplicationAlpha(StopGroupAsyncReplication):
"""Stop Group Async Replication for a Consistency Group Resource Policy."""
@classmethod
def Args(cls, parser):
_CommonArgs(parser)
def Run(self, args):
return self._Run(args)
StopGroupAsyncReplicationAlpha.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,598 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for labels update to disks."""
import dataclasses
from typing import List
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import disks_util as api_util
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
DETAILED_HELP = {
'DESCRIPTION': '*{command}* updates a Compute Engine persistent disk.',
'EXAMPLES': """\
To update labels 'k0' and 'k1' and remove label 'k3' of a disk, run:
$ {command} example-disk --zone=us-central1-a --update-labels=k0=value1,k1=value2 --remove-labels=k3
``k0'' and ``k1'' are added as new labels if not already present.
Labels can be used to identify the disk. To list disks with the 'k1:value2' label, run:
$ {parent_command} list --filter='labels.k1:value2'
To list only the labels when describing a resource, use --format to filter the result:
$ {parent_command} describe example-disk --format="default(labels)"
To append licenses to the disk, run:
$ {command} example-disk --zone=us-central1-a --append-licenses=projects/license-project/global/licenses/license-1,projects/license-project/global/licenses/license-2
To remove licenses from the disk, run:
$ {command} example-disk --zone=us-central1-a --replace-licenses=projects/license-project/global/licenses/license-1,projects/license-project/global/licenses/license-2
To replace a license on the disk, run:
$ {command} example-disk --zone=us-central1-a --replace-license=projects/license-project/global/licenses/old-license,projects/license-project/global/licenses/new-license
""",
}
def _CommonArgs(
messages,
cls,
parser,
support_user_licenses=False,
support_licenses=True,
support_add_guest_os_features=False,
):
"""Add arguments used for parsing in all command tracks."""
cls.DISK_ARG = disks_flags.MakeDiskArg(plural=False)
cls.DISK_ARG.AddArgument(parser, operation_type='update')
labels_util.AddUpdateLabelsFlags(parser)
if support_user_licenses:
scope = parser.add_mutually_exclusive_group()
scope.add_argument(
'--update-user-licenses',
type=arg_parsers.ArgList(),
metavar='LICENSE',
action=arg_parsers.UpdateAction,
help=(
'List of user licenses to be updated on a disk. These user licenses'
' will replace all existing user licenses. If this flag is not '
'provided, all existing user licenses will remain unchanged.'))
scope.add_argument(
'--clear-user-licenses',
action='store_true',
help='Remove all existing user licenses on a disk.')
if support_licenses:
scope = parser.add_group()
scope.add_argument(
'--append-licenses',
type=arg_parsers.ArgList(min_length=1),
metavar='LICENSE',
action=arg_parsers.UpdateAction,
help=(
'"A list of license URIs or license codes. These licenses will'
' be appended to the existing licenses on the disk. Provided'
' licenses can be either license URIs or license codes but'
' not a mix of both.'
),
)
scope.add_argument(
'--remove-licenses',
type=arg_parsers.ArgList(min_length=1),
metavar='LICENSE',
action=arg_parsers.UpdateAction,
help=(
'A list of license URIs or license codes. If'
' present in the set of existing licenses, these licenses will be'
' removed. If not present, this is a no-op. Provided licenses can'
' be either license URIs or license codes but not a mix of'
' both.'
),
)
scope.add_argument(
'--replace-license',
type=arg_parsers.ArgList(min_length=2, max_length=2),
metavar='LICENSE',
action=arg_parsers.UpdateAction,
help=(
'A list of license URIs or license codes. The first'
' license is the license to be replaced and the second license is'
' the replacement license. Provided licenses can be either'
' license URIs or license codes but not a mix of both.'
),
)
scope = parser.add_mutually_exclusive_group()
architecture_enum_type = messages.Disk.ArchitectureValueValuesEnum
excluded_enums = [architecture_enum_type.ARCHITECTURE_UNSPECIFIED.name]
architecture_choices = sorted(
[e for e in architecture_enum_type.names() if e not in excluded_enums])
scope.add_argument(
'--update-architecture',
choices=architecture_choices,
help=(
'Updates the architecture or processor type that this disk '
'can support. For available processor types on Compute Engine, '
'see https://cloud.google.com/compute/docs/cpu-platforms.'
))
scope.add_argument(
'--clear-architecture',
action='store_true',
help=('Removes the architecture or processor '
'type annotation from the disk.')
)
if support_add_guest_os_features:
disks_flags.AddGuestOsFeatureArgs(parser, messages)
disks_flags.AddAccessModeFlag(parser, messages)
parser.add_argument(
'--provisioned-iops',
type=arg_parsers.BoundedInt(),
help=(
'Provisioned IOPS of disk to update. '
'Only for use with disks of type '
'hyperdisk-extreme.'
),
)
parser.add_argument('--provisioned-throughput',
type=arg_parsers.BoundedInt(),
help=(
'Provisioned throughput of disk to update. '
'The throughput unit is MB per sec. '))
parser.add_argument(
'--size',
type=arg_parsers.BinarySize(
suggested_binary_size_scales=['GB', 'GiB', 'TB', 'TiB', 'PiB', 'PB']),
help="""\
Size of the disks. The value must be a whole
number followed by a size unit of ``GB'' for gigabyte, or ``TB''
for terabyte. If no size unit is specified, GB is
assumed. For details about disk size limits, refer to:
https://cloud.google.com/compute/docs/disks
""")
def _LabelsFlagsIncluded(args):
return args.IsSpecified('update_labels') or args.IsSpecified(
'clear_labels') or args.IsSpecified('remove_labels')
def _UserLicensesFlagsIncluded(args):
return args.IsSpecified('update_user_licenses') or args.IsSpecified(
'clear_user_licenses')
def _LicensesFlagsIncluded(args):
return (
args.IsSpecified('append_licenses')
or args.IsSpecified('remove_licenses')
or args.IsSpecified('replace_license')
)
def _ArchitectureFlagsIncluded(args):
return args.IsSpecified('update_architecture') or args.IsSpecified(
'clear_architecture')
def _AccessModeFlagsIncluded(args):
return args.IsSpecified('access_mode')
def _ProvisionedIopsIncluded(args):
return args.IsSpecified('provisioned_iops')
def _ProvisionedThroughputIncluded(args):
return args.IsSpecified('provisioned_throughput')
def _SizeIncluded(args):
return args.IsSpecified('size')
def _GuestOsFeatureFlagsIncluded(args):
return args.IsKnownAndSpecified('add_guest_os_features')
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Update(base.UpdateCommand):
r"""Update a Compute Engine persistent disk."""
DISK_ARG = None
@dataclasses.dataclass
class LicenseUpdateData:
update_via_license_code: bool
licenses: List[str]
license_codes: List[int]
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
_CommonArgs(
messages, cls, parser, False)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def _isInt(self, license_code):
try:
int(license_code)
return True
except ValueError:
return False
def _UpdateRequiresDiskRead(self, args, support_licenses):
return (
support_licenses and _LicensesFlagsIncluded(args)
) or _GuestOsFeatureFlagsIncluded(args)
def _VerifyLicenseArgsDoNotMixLicensesAndLicenseCodes(self, args):
"""Verifies that license args do not mix licenses and license codes.
Args:
args: The arguments that were provided by the user, which contains the
license mutations.
Raises:
exceptions.InvalidArgumentException: If the user provided a mix of
licenses and license codes.
"""
all_licenses = []
if args.IsSpecified('append_licenses'):
all_licenses.extend(args.append_licenses)
if args.IsSpecified('remove_licenses'):
all_licenses.extend(args.remove_licenses)
if args.IsSpecified('replace_license'):
all_licenses.extend(args.replace_license)
is_mixing_licenses_and_license_codes = any(
self._isInt(license) for license in all_licenses
) and any(not self._isInt(license) for license in all_licenses)
if is_mixing_licenses_and_license_codes:
if args.IsSpecified('append_licenses'):
raise exceptions.InvalidArgumentException(
'--append-licenses',
'Values must be either all license codes or all licenses, not a mix'
' of both.',
)
if args.IsSpecified('remove_licenses'):
raise exceptions.InvalidArgumentException(
'--remove-licenses',
'Values must be either all license codes or all licenses, not a mix'
' of both.',
)
if args.IsSpecified('replace_license'):
raise exceptions.InvalidArgumentException(
'--replace-license',
'Values must be either all license codes or all licenses, not a mix'
' of both.',
)
def _LicenseUpdateFormatIsCode(self, appended_licenses, removed_licenses):
return all(self._isInt(license) for license in appended_licenses) and all(
self._isInt(license) for license in removed_licenses
)
def _ParseLicenseCodesForUpdate(
self, current_license_codes, appended_licenses, removed_licenses
):
log.debug('Updating licenses via license codes')
appended_licenses = [int(license) for license in appended_licenses]
removed_licenses = [int(license) for license in removed_licenses]
result_licenses = current_license_codes + appended_licenses
for removed_license in removed_licenses:
if removed_license in result_licenses:
result_licenses.remove(removed_license)
log.debug('License codes sent to api: ' + str(result_licenses))
return result_licenses
def _ParseLicensesForUpdate(
self, holder, disk_ref, disk, appended_licenses, removed_licenses
):
log.debug('Updating licenses via license names')
# Parse input and existing licenses as relative paths for comparison.
appended_licenses = [
holder.resources.Parse(
license,
collection='compute.licenses',
params={'project': disk_ref.project},
).RelativeName()
for license in (disk.licenses + appended_licenses)
]
log.debug(
'appended_licenses & existing licenses: ' + str(appended_licenses)
)
removed_licenses = [
holder.resources.Parse(
license,
collection='compute.licenses',
params={'project': disk_ref.project},
).RelativeName()
for license in removed_licenses
]
log.debug('removed_licenses: ' + str(removed_licenses))
for removed_license in removed_licenses:
if removed_license in appended_licenses:
appended_licenses.remove(removed_license)
log.debug('Licenses sent to API: ' + str(appended_licenses))
return appended_licenses
def _ConstructLicenseUpdateData(self, args, holder, disk, disk_ref):
appended_licenses = []
removed_licenses = []
if args.append_licenses:
log.debug('Appending licenses: ' + str(args.append_licenses))
appended_licenses = args.append_licenses
if args.remove_licenses:
log.debug('Removing licenses: ' + str(args.remove_licenses))
removed_licenses = args.remove_licenses
if args.replace_license:
log.debug(
'Replacing license '
+ str(args.replace_license[0])
+ ' with '
+ str(args.replace_license[1])
)
appended_licenses.append(args.replace_license[1])
removed_licenses.append(args.replace_license[0])
if self._LicenseUpdateFormatIsCode(appended_licenses, removed_licenses):
license_codes = self._ParseLicenseCodesForUpdate(
disk.licenseCodes, appended_licenses, removed_licenses
)
return self.LicenseUpdateData(
update_via_license_code=True,
licenses=[],
license_codes=license_codes,
)
else:
license_names = self._ParseLicensesForUpdate(
holder, disk_ref, disk, appended_licenses, removed_licenses
)
return self.LicenseUpdateData(
update_via_license_code=False,
licenses=license_names,
license_codes=[],
)
def Run(self, args):
return self._Run(
args,
support_user_licenses=False,
support_licenses=True,
)
def _Run(
self,
args,
support_user_licenses=False,
support_licenses=True,
):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
disk_ref = self.DISK_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
disk_info = api_util.GetDiskInfo(disk_ref, client, messages)
service = disk_info.GetService()
if (
_ProvisionedIopsIncluded(args)
or _ProvisionedThroughputIncluded(args)
or _ArchitectureFlagsIncluded(args)
or _SizeIncluded(args)
or (support_user_licenses and _UserLicensesFlagsIncluded(args))
or (support_licenses and _LicensesFlagsIncluded(args))
or _AccessModeFlagsIncluded(args)
or _GuestOsFeatureFlagsIncluded(args)
):
disk_res = messages.Disk(name=disk_ref.Name())
disk_update_request = None
if disk_ref.Collection() == 'compute.disks':
disk_update_request = messages.ComputeDisksUpdateRequest(
project=disk_ref.project,
disk=disk_ref.Name(),
diskResource=disk_res,
zone=disk_ref.zone,
paths=[])
else:
disk_update_request = messages.ComputeRegionDisksUpdateRequest(
project=disk_ref.project,
disk=disk_ref.Name(),
diskResource=disk_res,
region=disk_ref.region,
paths=[])
# Some updates require the current state of the disk.
disk = None
if self._UpdateRequiresDiskRead(args, support_licenses):
disk = disk_info.GetDiskResource()
if support_user_licenses and _UserLicensesFlagsIncluded(args):
if args.update_user_licenses:
disk_res.userLicenses = args.update_user_licenses
disk_update_request.paths.append('userLicenses')
if support_licenses and _LicensesFlagsIncluded(args):
self._VerifyLicenseArgsDoNotMixLicensesAndLicenseCodes(args)
license_update_data = self._ConstructLicenseUpdateData(
args, holder, disk, disk_ref
)
if license_update_data.update_via_license_code:
disk_res.licenseCodes = license_update_data.license_codes
disk_update_request.paths.append('licenseCodes')
else:
disk_res.licenses = license_update_data.licenses
disk_update_request.paths.append('licenses')
if _ArchitectureFlagsIncluded(args):
if args.update_architecture:
disk_res.architecture = disk_res.ArchitectureValueValuesEnum(
args.update_architecture)
disk_update_request.paths.append('architecture')
if _AccessModeFlagsIncluded(args):
disk_res.accessMode = disk_res.AccessModeValueValuesEnum(
args.access_mode
)
disk_update_request.paths.append('accessMode')
if _ProvisionedIopsIncluded(args):
if args.provisioned_iops:
disk_res.provisionedIops = args.provisioned_iops
disk_update_request.paths.append('provisionedIops')
if _ProvisionedThroughputIncluded(
args):
if args.provisioned_throughput:
disk_res.provisionedThroughput = args.provisioned_throughput
disk_update_request.paths.append('provisionedThroughput')
if _SizeIncluded(args) and args.size:
disk_res.sizeGb = utils.BytesToGb(args.size)
disk_update_request.paths.append('sizeGb')
if _GuestOsFeatureFlagsIncluded(args):
if args.add_guest_os_features:
disk_res.guestOsFeatures = [
messages.GuestOsFeature(
type=messages.GuestOsFeature.TypeValueValuesEnum(
args.add_guest_os_features
)
)
] + disk.guestOsFeatures
disk_update_request.paths.append('guestOsFeatures')
update_operation = service.Update(disk_update_request)
update_operation_ref = holder.resources.Parse(
update_operation.selfLink,
collection=disk_info.GetOperationCollection())
update_operation_poller = poller.Poller(service)
result = waiter.WaitFor(
update_operation_poller, update_operation_ref,
'Updating fields of disk [{0}]'.format(disk_ref.Name()))
if not _LabelsFlagsIncluded(args):
return result
labels_diff = labels_util.GetAndValidateOpsFromArgs(args)
disk = disk_info.GetDiskResource()
set_label_req = disk_info.GetSetLabelsRequestMessage()
labels_update = labels_diff.Apply(set_label_req.LabelsValue, disk.labels)
request = disk_info.GetSetDiskLabelsRequestMessage(
disk, labels_update.GetOrNone())
if not labels_update.needs_update:
return disk
operation = service.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection=disk_info.GetOperationCollection())
operation_poller = poller.Poller(service)
return waiter.WaitFor(
operation_poller, operation_ref,
'Updating labels of disk [{0}]'.format(
disk_ref.Name()))
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateBeta(Update):
r"""Update a Compute Engine persistent disk."""
DISK_ARG = None
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
_CommonArgs(
messages, cls, parser, support_user_licenses=True)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(
args,
support_user_licenses=True,
support_licenses=True)
@base.DefaultUniverseOnly
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateAlpha(UpdateBeta):
r"""Update a Compute Engine persistent disk."""
DISK_ARG = None
@classmethod
def Args(cls, parser):
messages = cls._GetApiHolder(no_http=True).client.messages
_CommonArgs(
messages,
cls,
parser,
support_user_licenses=True,
support_licenses=True,
support_add_guest_os_features=True,
)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args, support_user_licenses=True, support_licenses=True)
Update.detailed_help = DETAILED_HELP
UpdateBeta.detailed_help = Update.detailed_help
UpdateAlpha.detailed_help = UpdateBeta.detailed_help

View File

@@ -0,0 +1,86 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating the KMS key of a persistent disk."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
DETAILED_HELP = {
'brief': 'Rotate the KMS key of a persistent disk to the primary version.',
'DESCRIPTION': """
* {command} * updates the KMS key of a Compute Engine persistent disk
by rotating it to the primary version of the key.
""",
'EXAMPLES': """
To rotate the KMS key of a disk named example-disk-1 to the primary version, run:
$ {command} example-disk-1 --zone us-central1-a
""",
}
def _CommonArgs(parser):
"""Add arguments used for parsing in all command tracks."""
disks_flags.MakeDiskArg(plural=False).AddArgument(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
@base.UniverseCompatible
class UpdateKmsKey(base.Command):
"""Rotate the KMS key of a persistent disk to the primary version."""
@classmethod
def Args(cls, parser):
_CommonArgs(parser)
@classmethod
def _GetApiHolder(cls, no_http=False):
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args):
return self._Run(args)
def _Run(self, args):
"""Issues request for updating the KMS key of a disk."""
compute_holder = self._GetApiHolder()
client = compute_holder.client
messages = client.messages
resources = compute_holder.resources
disk_ref = disks_flags.MakeDiskArg(plural=False).ResolveAsResource(
args, resources
)
if disk_ref.Collection() == 'compute.disks':
service = client.apitools_client.disks
request = messages.ComputeDisksUpdateKmsKeyRequest(
project=disk_ref.project,
zone=disk_ref.zone,
disk=disk_ref.Name(),
)
return client.MakeRequests([(service, 'UpdateKmsKey', request)])
elif disk_ref.Collection() == 'compute.regionDisks':
service = client.apitools_client.regionDisks
request = messages.ComputeRegionDisksUpdateKmsKeyRequest(
project=disk_ref.project,
region=disk_ref.region,
disk=disk_ref.Name(),
)
return client.MakeRequests([(service, 'UpdateKmsKey', request)])
UpdateKmsKey.detailed_help = DETAILED_HELP

View File

@@ -0,0 +1,149 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for waiting for the asynchronous replication of a disk-pair to catch up."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.disks import flags as disks_flags
DETAILED_HELP = {
'brief': (
'Provides the operation id for the asynchronous replication of a'
' Compute Engine persistent disk-pair that can be used to wait for the'
' replication to catch up.'
),
'DESCRIPTION': """\
*{command}* fetches the operation id that can be used to track the
status of async replication for a Compute Engine persistent disk-pair.
The operation id can be used to wait for the replication to catch up.
This command can be invoked only on the primary disk.
""",
'EXAMPLES': """\
Note: The max-wait-duration is optional. If not specified, the default
value would be picked up from the API.
Wait for replication catchup can only be invoked on the primary scope.
To wait for the replication catchup for the primary disk 'my-disk-1' in
zone 'us-east1-a' under project 'my-project1' to catch up with the
secondary disk 'my-disk-2' in zone 'us-west1-a' in any project, the
following command can be used (with custom wait duration of 20s):
$ {command} my-disk-1 --zone=us-east1-a --project=my-project1 --max-wait-duration=20s
""",
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
@base.DefaultUniverseOnly
class WaitForReplicationCatchUp(base.Command):
"""Wait for the Asynchronous Replication of Compute Engine persistent disk-pair to complete."""
disks_arg = disks_flags.MakeDiskArg(plural=False)
detailed_help = DETAILED_HELP
@classmethod
def Args(cls, parser) -> None:
"""Set the arguments for this command.
Args:
parser: An argument parser object that is used to add arguments that can
be specified on the command line.
Returns:
None
"""
WaitForReplicationCatchUp.disks_arg.AddArgument(parser)
parser.add_argument(
'--max-wait-duration',
help='Maximum duration to wait for the replication catchup.',
)
@classmethod
def _GetApiHolder(
cls, no_http: bool = False
) -> base_classes.ComputeApiHolder:
"""Get the compute client API holder instance.
Args:
no_http: Whether to disable http.
Returns:
A ComputeApiHolder object.
"""
return base_classes.ComputeApiHolder(cls.ReleaseTrack(), no_http)
def Run(self, args) -> None:
"""Method that runs the command.
Validates the arguments passed to the command and triggers the API call.
Args:
args: The arguments that were provided to this command invocation.
Returns:
None
"""
compute_holder = self._GetApiHolder()
client = compute_holder.client
disk_ref = WaitForReplicationCatchUp.disks_arg.ResolveAsResource(
args,
compute_holder.resources,
scope_lister=flags.GetDefaultScopeLister(client),
)
if disk_ref.Collection() == 'compute.disks':
wait_for_replication_catchup_request = None
if args.IsSpecified('max_wait_duration'):
wait_for_replication_catchup_request = (
client.messages.WaitForReplicationCatchUpRequest(
maxWaitDuration=args.max_wait_duration
)
)
request = client.messages.ComputeDisksWaitForReplicationCatchUpRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone,
waitForReplicationCatchUpRequest=wait_for_replication_catchup_request,
)
request = (
client.apitools_client.disks,
'WaitForReplicationCatchUp',
request,
)
else:
region_wait_for_replication_catchup_request = None
if args.IsSpecified('max_wait_duration'):
region_wait_for_replication_catchup_request = (
client.messages.RegionWaitForReplicationCatchUpRequest(
maxWaitDuration=args.max_wait_duration
)
)
request = client.messages.ComputeRegionDisksWaitForReplicationCatchUpRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
region=disk_ref.region,
regionWaitForReplicationCatchUpRequest=region_wait_for_replication_catchup_request,
)
request = (
client.apitools_client.regionDisks,
'WaitForReplicationCatchUp',
request,
)
return client.MakeRequests([request])