feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,66 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes related to build settings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from googlecloudsdk.command_lib.code import dataobject
from googlecloudsdk.core import exceptions
class InvalidLocationError(exceptions.Error):
"""File is in an invalid location."""
class DockerfileBuilder(dataobject.DataObject):
"""Data for a request to build with an existing Dockerfile."""
# The 'dockerfile' attribute may be relative to the Settings.context dir or
# it may be an absolute path. Note that Settings.context is determined later
# than this instance is made, so it has to be passed into the methods below.
NAMES = ('dockerfile',)
def DockerfileAbsPath(self, context):
return os.path.abspath(os.path.join(context, self.dockerfile))
def DockerfileRelPath(self, context):
return os.path.relpath(self.DockerfileAbsPath(context), context)
def Validate(self, context):
complete_path = self.DockerfileAbsPath(context)
if os.path.commonprefix([context, complete_path]) != context:
raise InvalidLocationError(
'Invalid Dockerfile path. Dockerfile must be located in the build '
'context directory.\n'
'Dockerfile: {0}\n'
'Build Context Directory: {1}'.format(complete_path, context))
if not os.path.exists(complete_path):
raise InvalidLocationError(complete_path + ' does not exist.')
class BuildpackBuilder(dataobject.DataObject):
"""Settings for building with a buildpack.
Attributes:
builder: Name of the builder.
trust: True if the lifecycle should trust this builder.
devmode: Build with devmode.
"""
NAMES = ('builder', 'trust', 'devmode')

View File

@@ -0,0 +1,14 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with artifact registry in gcloud code dev.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.run import artifact_registry as run_ar
def CreateIfNeeded(ar_repo):
if run_ar.ShouldCreateRepository(ar_repo):
run_ar.CreateRepository(ar_repo)

View File

@@ -0,0 +1,310 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for configuring cloud-based development."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import os
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import messages as messages_util
from googlecloudsdk.command_lib.artifacts import docker_util
from googlecloudsdk.command_lib.code import builders
from googlecloudsdk.command_lib.code import common
from googlecloudsdk.command_lib.code import dataobject
from googlecloudsdk.command_lib.code import yaml_helper
from googlecloudsdk.command_lib.run import exceptions
from googlecloudsdk.command_lib.run import flags as run_flags
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import files
RUN_MESSAGES_MODULE = apis.GetMessagesModule('run', 'v1')
_DEFAULT_BUILDPACK_BUILDER = 'gcr.io/buildpacks/builder'
class ImageFormatError(core_exceptions.Error):
"""An error thrown when the provided image has a tag or hash."""
def __init__(self, image, fmt):
super(ImageFormatError, self).__init__(
message=(
'Image {} has a {} included. To use locally built image, do '
'not include digest or tag'
).format(image, fmt)
)
def _IsGcpBaseBuilder(bldr):
"""Return true if the builder is the GCP base builder.
Args:
bldr: Name of the builder.
Returns:
True if the builder is the GCP base builder.
"""
return bldr == _DEFAULT_BUILDPACK_BUILDER
def _BuilderFromArg(builder_arg):
is_gcp_base_builder = _IsGcpBaseBuilder(builder_arg)
return builders.BuildpackBuilder(
builder=builder_arg, trust=is_gcp_base_builder, devmode=False
)
class Settings(dataobject.DataObject):
"""Settings for a Cloud dev deployment.
Attributes:
image: image to deploy from local sources
project: the gcp project to deploy to
region: the Cloud Run region to deploy to
service_name: the name of the Cloud Run service to deploy
builder: the build configuration. Docker and Buildpacks are supported.
context: the folder in which the build will be executed
service: the base service to build off of. Using this allows any field not
explicitly supported by code dev --cloud to still propagate
cpu: the amount of CPU to be used
memory: the amount of memory to be specified.
ar_repo: the Artifact Registry Docker repo to deploy to.
local_port: the local port to forward the request for.
service_account: the service identity to use for the deployed service.
"""
NAMES = [
'image',
'project',
'region',
'builder',
'service_name',
'service',
'context',
'cpu',
'memory',
'ar_repo',
'local_port',
'service_account',
]
@classmethod
def Defaults(cls):
dir_name = os.path.basename(files.GetCWD())
# Service names may not include space, _ and upper case characters.
service_name = dir_name.replace('_', '-').replace(' ', '-').lower()
service = RUN_MESSAGES_MODULE.Service(
apiVersion='serving.knative.dev/v1', kind='Service'
)
dockerfile_arg_default = 'Dockerfile'
bldr = builders.DockerfileBuilder(dockerfile=dockerfile_arg_default)
return cls(
service_name=service_name,
service=service,
builder=bldr,
context=os.path.abspath(files.GetCWD()),
)
def WithServiceYaml(self, yaml_path):
"""Use a pre-written service yaml for deployment."""
# TODO(b/256683239): this is partially
# copied from surface/run/services/replace.py and
# should be moved somewhere common to avoid duplication
service_dict = yaml.load_path(yaml_path)
# Clear the status to make migration from k8s deployments easier.
# Since a Deployment status will have several fields that Cloud Run doesn't
# support, trying to convert it to a message as-is will fail even though
# status is ignored by the server.
if 'status' in service_dict:
del service_dict['status']
# For cases where YAML contains the project number as metadata.namespace,
# preemptively convert them to a string to avoid validation failures.
metadata = yaml_helper.GetOrCreate(service_dict, ['metadata'])
namespace = metadata.get('namespace', None)
if namespace is not None and not isinstance(namespace, str):
service_dict['metadata']['namespace'] = str(namespace)
try:
service = messages_util.DictToMessageWithErrorCheck(
service_dict, RUN_MESSAGES_MODULE.Service
)
except messages_util.ScalarTypeMismatchError as e:
exceptions.MaybeRaiseCustomFieldMismatch(
e,
help_text=(
'Please make sure that the YAML file matches the Knative '
'service definition spec in https://kubernetes.io/docs/'
'reference/kubernetes-api/service-resources/service-v1/'
'#Service.'
),
)
if self.project:
service.metadata.namespace = str(self.project)
replacements = {'service': service}
# assume first image is the one we're replacing.
container = service.spec.template.spec.containers[0]
replacements['image'] = container.image
if container.resources and container.resources.limits:
for limit in container.resources.limits.additionalProperties:
replacements[limit.key] = limit.value
if service.metadata.name:
replacements['service_name'] = service.metadata.name
return self.replace(**replacements)
def WithArgs(self, args):
"""Update parameters based on arguments."""
project = properties.VALUES.core.project.Get()
region = run_flags.GetRegion(args, prompt=True)
replacements = {'project': project, 'region': region}
for override_arg in [
'local_port',
'memory',
'cpu',
'image',
'service_name',
'service_account',
]:
if args.IsKnownAndSpecified(override_arg):
replacements[override_arg] = getattr(args, override_arg)
context = self.context
if args.source:
context = os.path.abspath(args.source)
replacements['context'] = context
if args.IsKnownAndSpecified('builder'):
replacements['builder'] = _BuilderFromArg(args.builder)
elif args.IsKnownAndSpecified('dockerfile'):
replacements['builder'] = builders.DockerfileBuilder(
dockerfile=args.dockerfile
)
else:
if isinstance(self.builder, builders.DockerfileBuilder):
try:
replacements['builder'] = self.builder
replacements['builder'].Validate(context)
except builders.InvalidLocationError:
log.status.Print(
'No Dockerfile detected. '
'Using GCP buildpacks to build the container'
)
replacements['builder'] = _BuilderFromArg(_DEFAULT_BUILDPACK_BUILDER)
return self.replace(**replacements)
def Build(self):
replacements = {}
if not self.image:
ar_repo = docker_util.DockerRepo(
project_id=self.project,
location_id=self.region,
repo_id='cloud-run-source-deploy',
)
replacements['ar_repo'] = ar_repo
replacements['image'] = _DefaultImageName(ar_repo, self.service_name)
return self.replace(**replacements)
def AssembleSettings(args):
settings = Settings.Defaults()
context_dir = getattr(args, 'source', None) or os.path.curdir
service_config = getattr(args, 'service_config', None)
yaml_file = common.ChooseExistingServiceYaml(context_dir, service_config)
if yaml_file:
settings = settings.WithServiceYaml(yaml_file)
settings = settings.WithArgs(args)
return settings.Build()
def GenerateService(settings):
"""Generate a service configuration from a Cloud Settings configuration."""
service = copy.deepcopy(settings.service)
metadata = service.metadata or RUN_MESSAGES_MODULE.ObjectMeta()
metadata.name = settings.service_name
metadata.namespace = str(settings.project)
service.metadata = metadata
_BuildSpecTemplate(service)
if settings.service_account:
service.spec.template.spec.serviceAccountName = settings.service_account
container = service.spec.template.spec.containers[0]
container.image = settings.image
_FillContainerRequirements(container, settings)
return service
def _BuildSpecTemplate(service):
if not service.spec:
service.spec = RUN_MESSAGES_MODULE.ServiceSpec()
if not service.spec.template:
service.spec.template = RUN_MESSAGES_MODULE.RevisionTemplate()
if not service.spec.template.spec:
service.spec.template.spec = RUN_MESSAGES_MODULE.RevisionSpec()
if not service.spec.template.spec.containers:
service.spec.template.spec.containers = [RUN_MESSAGES_MODULE.Container()]
def _DefaultImageName(ar_repo, service_name):
return '{repo}/{service}'.format(
repo=ar_repo.GetDockerString(), service=service_name
)
def _FillContainerRequirements(container, settings):
"""Set the container CPU and memory limits based on settings."""
found = set()
resources = container.resources or RUN_MESSAGES_MODULE.ResourceRequirements()
limits = (
resources.limits or RUN_MESSAGES_MODULE.ResourceRequirements.LimitsValue()
)
for limit in limits.additionalProperties:
if limit.key == 'cpu' and settings.cpu:
limit.value = settings.cpu
elif limit.key == 'memory' and settings.memory:
limit.value = settings.memory
found.add(limit.key)
# if requirements weren't already specified add them
if 'cpu' not in found and settings.cpu:
cpu = (
RUN_MESSAGES_MODULE.ResourceRequirements.LimitsValue.AdditionalProperty(
key='cpu', value=str(settings.cpu)
)
)
limits.additionalProperties.append(cpu)
if 'memory' not in found and settings.memory:
mem = (
RUN_MESSAGES_MODULE.ResourceRequirements.LimitsValue.AdditionalProperty(
key='memory', value=str(settings.memory)
)
)
limits.additionalProperties.append(mem)
resources.limits = limits
container.resources = resources
def ValidateSettings(settings):
if '@' in settings.image:
raise ImageFormatError(settings.image, 'digest')
elif ':' in settings.image:
raise ImageFormatError(settings.image, 'tag')

View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating cloud-based dev loop configs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.command_lib.code import builders
from googlecloudsdk.command_lib.code import yaml_helper
from googlecloudsdk.command_lib.code.cloud import cloud
from googlecloudsdk.core import yaml
import six
_SKAFFOLD_TEMPLATE = """
apiVersion: skaffold/v3alpha1
kind: Config
build:
artifacts: []
local:
push: true
manifests:
rawYaml: []
deploy:
cloudrun: {}
"""
class CloudRuntimeFiles(object):
"""Generates the development environment files for a project."""
def __init__(self, settings):
self._settings = settings
def KubernetesConfig(self):
return yaml.dump(
encoding.MessageToDict(cloud.GenerateService(self._settings)))
def SkaffoldConfig(self, service_file_path):
"""Generate the Skaffold yaml for the deploy."""
skaffold_yaml = yaml.load(_SKAFFOLD_TEMPLATE)
manifests = yaml_helper.GetOrCreate(
skaffold_yaml, ('manifests', 'rawYaml'), constructor=list)
manifests.append(service_file_path)
artifact = {'image': self._settings.image}
if isinstance(self._settings.builder, builders.BuildpackBuilder):
artifact['buildpacks'] = {
'builder': self._settings.builder.builder,
}
# sync is not currently supported for Cloud Run
artifact['sync'] = {'auto': False}
else:
# Macos needs a relative path or else
# e2e.surface.code.dev_mac_test.MacE2ETest.testNamespace fails.
dockerfile_rel_path = self._settings.builder.DockerfileRelPath(
self._settings.context)
artifact['docker'] = {
'dockerfile':
six.ensure_text(dockerfile_rel_path.encode('unicode_escape'))
}
artifacts = yaml_helper.GetOrCreate(
skaffold_yaml, ('build', 'artifacts'), constructor=list)
artifacts.append(artifact)
skaffold_yaml['deploy']['cloudrun']['projectid'] = self._settings.project
skaffold_yaml['deploy']['cloudrun']['region'] = self._settings.region
if self._settings.local_port:
port_forward_config = {
'resourceType': 'service',
'resourceName': self._settings.service_name,
'port': 8080,
'localPort': self._settings.local_port,
}
skaffold_yaml['portForward'] = [port_forward_config]
return yaml.dump(skaffold_yaml)

View File

@@ -0,0 +1,71 @@
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that directly interact with Cloud Run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import platforms
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.console import console_io
class ServiceAlreadyExistsError(exceptions.Error):
"""Error thrown if the service already exists and overwrite denied.
"""
class _ServiceResource:
def __init__(self, project, service_name):
self.project = project
self.service_name = service_name
def RelativeName(self):
return 'namespaces/{}/services/{}'.format(self.project, self.service_name)
def ServiceExists(args, project, service_name, region, release_track):
"""Check to see if the service with the given name already exists."""
context = connection_context.GetConnectionContext(
args,
release_track=release_track,
platform=platforms.PLATFORM_MANAGED,
region_label=region,
)
with serverless_operations.Connect(context) as client:
return client.GetService(_ServiceResource(project, service_name))
def PromptToOverwriteCloud(args, settings, release_track):
"""If the service already exists, prompt the user before overwriting."""
if ServiceExists(
args,
settings.project,
settings.service_name,
settings.region,
release_track,
):
if console_io.CanPrompt() and console_io.PromptContinue(
message='Serivce {} already exists in project {}'.format(
settings.service_name, settings.project
),
prompt_string='Do you want to overwrite it?',
):
return
raise ServiceAlreadyExistsError('Service already exists.')

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities shared between local and cloud code dev implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
from googlecloudsdk.core import exceptions
def ChooseExistingServiceYaml(context, arg):
"""Rules for choosing a service.yaml or app.yaml file.
The rules are meant to discover common filename variants like
'service.dev.yml' or 'staging-service.yaml'.
Args:
context: Build context dir. Could be '.'.
arg: User's path (relative to context or absolute) to a yaml file with
service config, or None. The service config could be a knative Service
description or an appengine app.yaml.
Returns:
Absolute path to a yaml file, or None.
"""
if arg is not None:
complete_abs_path = os.path.abspath(os.path.join(context, arg))
if os.path.exists(complete_abs_path):
return complete_abs_path
raise exceptions.Error("File '{}' not found.".format(complete_abs_path))
for pattern in [
'*service.dev.yaml',
'*service.dev.yml',
'*service.yaml',
'*service.yml',
]:
matches = glob.glob(os.path.join(context, pattern))
if matches:
return sorted(matches)[0]
return None

View File

@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alternate tempfile.NamedTemporaryFile that's easier to use on Windows.
(Windows locks files from simultaneous writes/reads).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import os
import tempfile
class _WindowsNamedTempFile(object):
"""Wrapper around named temporary file for Windows.
NamedTemporaryFiles cannot be read by other processes on windows because
only one process can open a file at a time. This file will be unlinked
at the end of the context.
"""
def __init__(self, *args, **kwargs):
self._requested_delete = kwargs.get('delete', True)
self._args = args
self._kwargs = kwargs.copy()
self._kwargs['delete'] = False
self._f = None
def __enter__(self):
self._f = tempfile.NamedTemporaryFile(*self._args, **self._kwargs)
return self._f
def __exit__(self, exc_type, exc_value, tb):
if self._requested_delete and self._f:
try:
os.unlink(self._f.name)
except OSError:
# File already unlinked. No need to clean up.
pass
@contextlib.contextmanager
def NamedTempFile(contents, prefix='tmp', suffix='', delete=True):
"""Write a named temporary with given contents.
Args:
contents: (str) File contents.
prefix: (str) File base name prefix.
suffix: (str) Filename suffix.
delete: (bool) Delete file on __exit__.
Yields:
The temporary file object.
"""
common_args = dict(mode='w+t', prefix=prefix, suffix=suffix, delete=delete)
if os.name == 'nt':
with _WindowsNamedTempFile(**common_args) as f:
f.write(contents)
f.close()
yield f
else:
with tempfile.NamedTemporaryFile(**common_args) as f:
f.write(contents)
f.flush()
yield f

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple immutable data object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
class _DataType(type):
"""Dumb immutable data type."""
# TODO(b/154131605): This a type that is an immutable data object. Can't use
# attrs because it's not part of googlecloudsdk and can't use namedtuple
# because it's not efficient on python 2 (it generates code, which needs
# to be parsed and interpretted). Remove this code when we get support
# for attrs or another dumb data object in gcloud.
def __new__(cls, classname, bases, class_dict):
class_dict = class_dict.copy()
names = class_dict.get('NAMES', tuple())
class_dict.update(
(name, cls._CreateAccessor(i)) for i, name in enumerate(names))
return super(_DataType, cls).__new__(cls, classname, bases, class_dict)
@staticmethod
def _CreateAccessor(i):
"""Create an tuple accessor property."""
return property(lambda tpl: tpl[i]) # pylint: disable=unused-variable
class DataObject(six.with_metaclass(_DataType, tuple)):
"""Parent class of dumb data object."""
def __new__(cls, **kwargs):
names = getattr(cls, 'NAMES', tuple())
invalid_names = set(kwargs) - set(names)
if invalid_names:
raise ValueError('Invalid names: ' + repr(invalid_names))
tpl = tuple(kwargs[name] if name in kwargs else None for name in names)
return super(DataObject, cls).__new__(cls, tpl)
def replace(self, **changes): # pylint: disable=invalid-name
# https://docs.python.org/3/library/dataclasses.html#dataclasses.replace
out = dict((n, changes.get(n, getattr(self, n, None))) for n in self.NAMES)
return self.__class__(**out)

View File

@@ -0,0 +1,305 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for serverless local development setup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util.args import map_util
from googlecloudsdk.core import exceptions
import six
class FlagDef(object):
"""Object that holds a flag definition and adds it to a parser."""
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
return hash(self.name)
def ConfigureParser(self, parser):
parser.add_argument(self.name, **self.kwargs)
class FlagDefs(object):
"""Base type for all flag builders."""
def __init__(self):
self._operations = set()
def _AddFlag(self, name, **kwargs):
self._AddOperation(FlagDef(name, **kwargs))
def _AddOperation(self, operation):
self._operations.add(operation)
def ConfigureParser(self, parser):
for operation in self._operations:
operation.ConfigureParser(parser)
class MutuallyExclusiveGroupDef(FlagDefs):
"""Flag builder where all flags are added to a mutually exclusive group."""
def ConfigureParser(self, parser):
group = parser.add_mutually_exclusive_group(required=False)
for op in self._operations:
op.ConfigureParser(group)
class BuilderFlags(MutuallyExclusiveGroupDef):
"""Flags for builder settings."""
def AddDockerfile(self):
self._AddFlag(
'--dockerfile',
default='Dockerfile',
help='Dockerfile for the service image.')
def AddBuilder(self):
self._AddFlag(
'--builder',
help='Build with a given Cloud Native Computing Foundation Buildpack '
'builder.')
class CredentialFlags(MutuallyExclusiveGroupDef):
def AddServiceAccount(self):
self._AddFlag(
'--service-account',
help='When connecting to Google Cloud Platform services, use a service '
'account key.')
def AddApplicationDefaultCredential(self):
self._AddFlag(
'--application-default-credential',
action='store_true',
default=False,
help='When connecting to Google Cloud Platform services, use the '
'application default credential.')
class EnvVarFlags(MutuallyExclusiveGroupDef):
"""Environment variable flags."""
def AddEnvVars(self):
self._AddFlag(
'--env-vars',
metavar='KEY=VALUE',
action=arg_parsers.UpdateAction,
type=arg_parsers.ArgDict(
key_type=six.text_type, value_type=six.text_type),
help='List of key-value pairs to set as environment variables.')
def AddEnvVarsFile(self):
self._AddFlag(
'--env-vars-file',
metavar='FILE_PATH',
type=map_util.ArgDictFile(
key_type=six.text_type, value_type=six.text_type),
help='Path to a local YAML file with definitions for all environment '
'variables.')
class CommonFlags(FlagDefs):
"""Flags that are common between the gcloud code dev commands."""
def __init__(self):
super(CommonFlags, self).__init__()
self._group_cache = {}
def AddLocalPort(self):
self._AddFlag(
'--local-port',
type=int,
help='Local port to which the service connection is forwarded. If this '
'flag is not set, then a random port is chosen.')
def AddSource(self):
self._AddFlag(
'--source',
help='The directory containing the source to build. '
'If not specified, the current directory is used.')
def AddServiceName(self):
self._AddFlag('--service-name', required=False, help='Name of the service.')
def AddImage(self):
self._AddFlag('--image', required=False, help='Name for the built image.')
def AddMemory(self):
self._AddFlag(
'--memory',
type=arg_parsers.BinarySize(default_unit='B'),
help='Container memory limit. Limit is expressed either as an integer '
'representing the number of bytes or an integer followed by a unit '
'suffix. Valid unit suffixes are "B", "KB", "MB", "GB", "TB", "KiB", '
'"MiB", "GiB", "TiB", or "PiB".')
def AddCpu(self):
self._AddFlag(
'--cpu',
type=arg_parsers.BoundedFloat(lower_bound=0.0),
help='Container CPU limit. Limit is expressed as a number of CPUs. '
'Fractional CPU limits are allowed (e.g. 1.5).')
def AddCloudsqlInstances(self):
self._AddFlag(
'--cloudsql-instances',
type=arg_parsers.ArgList(),
metavar='CLOUDSQL_INSTANCE',
help='Cloud SQL instance connection strings. Must be in the form '
'<project>:<region>:<instance>.')
def AddReadinessProbe(self):
# This flag launches the readiness probe feature. It is currently
# default off. It will be moved to default on when ready and then
# the feature will be always on.
self._AddFlag(
'--readiness-probe',
default=False,
action='store_true',
hidden=True,
help='Add a readiness probe to the list of containers that delays '
'deployment stabilization until the application app has bound to $PORT')
def AddServiceConfigPositionalArg(self, include_app_engine_docs=False):
"""_AddFlag for service_config, which has two possible help strings.
Args:
include_app_engine_docs: Add paragraph that says app.yaml is allowed.
"""
help_text = (
'service.yaml filename override. Defaults to the first file '
'matching ```*service.dev.yaml``` then ```*service.yaml```, if any '
'exist. This path is relative to the --source dir.')
if include_app_engine_docs:
help_text += (
'\n'
'An App Engine config path (typically ```app.yaml```) may also be '
'provided here, and we will build with a Cloud Native Computing '
'Foundation Buildpack builder selected from '
'gcr.io/gae-runtimes/buildpacks, according to the App Engine '
'```runtime``` specified in app.yaml.')
self._AddFlag(
'service_config',
metavar='SERVICE_CONFIG',
nargs='?',
help=help_text,
)
def AddAllowSecretManagerFlag(self):
self._AddFlag(
'--allow-secret-manager',
action=arg_parsers.StoreTrueFalseAction,
help=('Suppress warnings if secrets need to be pulled from secret '
'manager'))
def AddSecrets(self):
self._AddFlag(
'--secrets',
metavar='KEY=VALUE',
action=arg_parsers.UpdateAction,
type=arg_parsers.ArgDict(
key_type=six.text_type, value_type=six.text_type),
help='List of key-value pairs to set as secrets.')
def AddCloud(self):
self._AddFlag(
'--cloud',
default=False,
action='store_true',
hidden=True,
help='deploy code to Cloud Run')
self._AddFlag(
'--region', help='region to deploy the dev service', hidden=True)
def _GetGroup(self, klass):
if klass not in self._group_cache:
group = klass()
self._group_cache[klass] = group
self._AddOperation(group)
return self._group_cache[klass]
def CredentialsGroup(self):
return self._GetGroup(CredentialFlags)
def EnvVarsGroup(self):
return self._GetGroup(EnvVarFlags)
def BuildersGroup(self):
return self._GetGroup(BuilderFlags)
def AddAlphaAndBetaFlags(self, release_track):
self._AddBetaFlags()
if release_track == base.ReleaseTrack.ALPHA:
self._AddAlphaFlags()
# See AssembleSettings for where we decide how to parse service_config args
# based on release track.
appyaml_support = release_track == base.ReleaseTrack.ALPHA
self.AddServiceConfigPositionalArg(include_app_engine_docs=appyaml_support)
def _AddBetaFlags(self):
"""Set up flags that are for alpha and beta tracks."""
self.BuildersGroup().AddDockerfile()
self.AddSource()
self.AddLocalPort()
self.CredentialsGroup().AddServiceAccount()
self.CredentialsGroup().AddApplicationDefaultCredential()
self.AddReadinessProbe()
self.AddAllowSecretManagerFlag()
self.AddSecrets()
self.BuildersGroup().AddBuilder()
def _AddAlphaFlags(self):
"""Set up flags that are for alpha track only."""
self.AddCloudsqlInstances()
self.AddServiceName()
self.AddImage()
self.AddMemory()
self.AddCpu()
self.EnvVarsGroup().AddEnvVars()
self.EnvVarsGroup().AddEnvVarsFile()
self.AddCloud()
class InvalidFlagError(exceptions.Error):
"""Flag settings are illegal."""
def Validate(namespace):
"""Validate flag requirements that cannot be handled by argparse."""
if ('cloudsql_instances' in namespace and
namespace.IsSpecified('cloudsql_instances') and
not (namespace.IsSpecified('service_account') or
namespace.IsSpecified('application_default_credential'))):
raise InvalidFlagError('--cloudsql-instances requires --service-account or '
'--application-default-credential to be specified.')

View File

@@ -0,0 +1,72 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read JSON objects from a stream."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import json
import six
def ReadJsonStream(file_obj):
"""Read the events from the skaffold event stream.
Args:
file_obj: A File object.
Yields:
Event dicts from the JSON payloads.
"""
for line in _ReadStreamingLines(file_obj):
if not line:
continue
yield json.loads(six.ensure_str(line))
if six.PY3:
def _ReadStreamingLines(file_obj):
with contextlib.suppress(ConnectionResetError):
for line in file_obj:
yield line
elif six.PY2:
def _ReadStreamingLines(file_obj):
"""Python 2 compatibility with py3's streaming behavior.
If file_obj is an HTTPResponse, iterating over lines blocks until a buffer
is full.
Args:
file_obj: A file-like object, including HTTPResponse.
Yields:
Lines, like iter(file_obj) but without buffering stalls.
"""
while True:
line = b''
while True:
byte = file_obj.read(1)
if not byte:
return
if byte == b'\n':
break
line += byte
yield line

View File

@@ -0,0 +1,339 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for generating the files for local development environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
from googlecloudsdk.command_lib.code import run_subprocess
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import platforms
from googlecloudsdk.core.util import times
import six
DEFAULT_CLUSTER_NAME = 'gcloud-local-dev'
class _KubeCluster(object):
"""A kubernetes cluster.
Attributes:
context_name: Kubernetes context name.
env_vars: Docker env vars.
shared_docker: Whether the kubernetes cluster shares a docker instance with
the developer's machine.
"""
def __init__(self, context_name, shared_docker):
"""Initializes KubeCluster with cluster name.
Args:
context_name: Kubernetes context.
shared_docker: Whether the kubernetes cluster shares a docker instance
with the developer's machine.
"""
self.context_name = context_name
self.shared_docker = shared_docker
@property
def env_vars(self):
return {}
def GetMinikubeVersion():
"""Returns the current version of minikube."""
return six.ensure_text(subprocess.check_output([_FindMinikube(), 'version']))
class MinikubeCluster(_KubeCluster):
"""A cluster on minikube.
Attributes:
context_name: Kubernetes context name.
env_vars: Docker environment variables.
shared_docker: Whether the kubernetes cluster shares a docker instance with
the developer's machine.
"""
@property
def env_vars(self):
return _GetMinikubeDockerEnvs(self.context_name)
class Minikube(object):
"""Starts and stops a minikube cluster."""
def __init__(self,
cluster_name,
stop_cluster=True,
vm_driver=None,
debug=False):
self._cluster_name = cluster_name
self._stop_cluster = stop_cluster
self._vm_driver = vm_driver
self._debug = debug
def __enter__(self):
_StartMinikubeCluster(self._cluster_name, self._vm_driver, self._debug)
return MinikubeCluster(self._cluster_name, self._vm_driver == 'docker')
def __exit__(self, exc_type, exc_value, tb):
if self._stop_cluster:
_StopMinikube(self._cluster_name, self._debug)
def _FindMinikube():
return (properties.VALUES.code.minikube_path_override.Get() or
run_subprocess.GetGcloudPreferredExecutable('minikube'))
class MinikubeStartError(exceptions.Error):
"""Error if minikube fails to start."""
_MINIKUBE_STEP = 'io.k8s.sigs.minikube.step'
_MINIKUBE_DOWNLOAD_PROGRESS = 'io.k8s.sigs.minikube.download.progress'
_MINIKUBE_ERROR = 'io.k8s.sigs.minikube.error'
_MINIKUBE_NOT_ENOUGH_CPU_FRAGMENT = 'The minimum allowed is 2 CPUs.'
# pylint: disable=line-too-long
# See https://github.com/kubernetes/minikube/blob/master/pkg/minikube/reason/exitcodes.go
# pylint: enable=line-too-long
_MINIKUBE_ERROR_MESSAGES = {
'29': 'Not enough CPUs. Cloud Run Emulator requires 2 CPUs.',
'69': 'Cannot reach docker daemon.',
}
_MINIKUBE_PASSTHROUGH_ADVICE_IDS = frozenset(['HOST_HOME_PERMISSION'])
if platforms.OperatingSystem.Current() != platforms.OperatingSystem.LINUX:
_MINIKUBE_ERROR_MESSAGES['29'] += ' Increase Docker VM CPUs to 2.'
def _StartMinikubeCluster(cluster_name, vm_driver, debug=False):
"""Starts a minikube cluster."""
# pylint: disable=broad-except
try:
if not _IsMinikubeClusterUp(cluster_name):
cmd = [
_FindMinikube(),
'start',
'-p',
cluster_name,
'--keep-context',
'--interactive=false',
'--delete-on-failure',
'--install-addons=false',
'--output=json',
]
if vm_driver:
cmd.append('--vm-driver=' + vm_driver)
if vm_driver == 'docker':
cmd.append('--container-runtime=docker')
if debug:
cmd.extend(['--alsologtostderr', '-v8'])
start_msg = "Starting development environment '%s' ..." % cluster_name
event_timeout = times.ParseDuration(
properties.VALUES.code.minikube_event_timeout.Get(
required=True)).total_seconds
with console_io.ProgressBar(start_msg) as progress_bar:
for json_obj in run_subprocess.StreamOutputJson(
cmd, event_timeout_sec=event_timeout, show_stderr=debug):
if debug:
print('minikube', json_obj)
_HandleMinikubeStatusEvent(progress_bar, json_obj)
except Exception as e:
six.reraise(MinikubeStartError, e, sys.exc_info()[2])
def _HandleMinikubeStatusEvent(progress_bar, json_obj):
"""Handle a minikube json event."""
if json_obj['type'] == _MINIKUBE_STEP:
data = json_obj['data']
# https://github.com/kubernetes/minikube/issues/9754
# currentstep and totalsteps could be:
# missing -> invalid
# '' -> invalid
# '0' -> ok
# 0 -> ok
# pylint:disable=g-explicit-bool-comparison
if data.get('currentstep', '') != '' and data.get('totalsteps', '') != '':
current_step = int(data['currentstep'])
total_steps = int(data['totalsteps'])
completion_fraction = current_step / float(total_steps)
progress_bar.SetProgress(completion_fraction)
elif json_obj['type'] == _MINIKUBE_DOWNLOAD_PROGRESS:
data = json_obj['data']
# https://github.com/kubernetes/minikube/issues/9754
# currentstep and totalsteps could be:
# missing -> invalid
# '' -> invalid
# '0' -> ok
# 0 -> ok
# pylint:disable=g-explicit-bool-comparison
if (data.get('currentstep', '') != '' and
data.get('totalsteps', '') != '' and 'progress' in data):
current_step = int(data['currentstep'])
total_steps = int(data['totalsteps'])
download_progress = float(data['progress'])
completion_fraction = (current_step + download_progress) / total_steps
progress_bar.SetProgress(completion_fraction)
elif (json_obj['type'] == _MINIKUBE_ERROR and 'exitcode' in json_obj['data']):
data = json_obj['data']
if ('id' in data and 'advice' in data and
data['id'] in _MINIKUBE_PASSTHROUGH_ADVICE_IDS):
raise MinikubeStartError(data['advice'])
else:
exit_code = data['exitcode']
msg = _MINIKUBE_ERROR_MESSAGES.get(exit_code,
'Unable to start Cloud Run Emulator.')
raise MinikubeStartError(msg)
def _GetMinikubeDockerEnvs(cluster_name):
"""Get the docker environment settings for a given cluster."""
cmd = [_FindMinikube(), 'docker-env', '-p', cluster_name, '--shell=none']
lines = run_subprocess.GetOutputLines(cmd, timeout_sec=20)
return dict(
line.split('=', 1) for line in lines if line and not line.startswith('#'))
def _IsMinikubeClusterUp(cluster_name):
"""Checks if a minikube cluster is running."""
cmd = [_FindMinikube(), 'status', '-p', cluster_name, '-o', 'json']
try:
status = run_subprocess.GetOutputJson(
cmd, timeout_sec=20, show_stderr=False)
return 'Host' in status and status['Host'].strip() == 'Running'
except (ValueError, subprocess.CalledProcessError):
return False
def _StopMinikube(cluster_name, debug=False):
"""Stop a minikube cluster."""
cmd = [_FindMinikube(), 'stop', '-p', cluster_name]
print("Stopping development environment '%s' ..." % cluster_name)
run_subprocess.Run(cmd, timeout_sec=150, show_output=debug)
print('Development environment stopped.')
def DeleteMinikube(cluster_name):
"""Delete a minikube cluster."""
cmd = [_FindMinikube(), 'delete', '-p', cluster_name]
print("Deleting development environment '%s' ..." % cluster_name)
run_subprocess.Run(cmd, timeout_sec=150, show_output=False)
print('Development environment stopped.')
class ExternalCluster(_KubeCluster):
"""A external kubernetes cluster.
Attributes:
context_name: Kubernetes context name.
env_vars: Docker environment variables.
shared_docker: Whether the kubernetes cluster shares a docker instance with
the developer's machine.
"""
def __init__(self, cluster_name):
"""Initializes ExternalCluster with profile name.
Args:
cluster_name: Name of the cluster.
"""
super(ExternalCluster, self).__init__(cluster_name, False)
class ExternalClusterContext(object):
"""Do nothing context manager for external clusters."""
def __init__(self, kube_context):
self._kube_context = kube_context
def __enter__(self):
return ExternalCluster(self._kube_context)
def __exit__(self, exc_type, exc_value, tb):
pass
def _FindKubectl():
return run_subprocess.GetGcloudPreferredExecutable('kubectl')
def _NamespaceExists(namespace, context_name=None):
cmd = [_FindKubectl()]
if context_name:
cmd += ['--context', context_name]
cmd += ['get', 'namespaces', '-o', 'name']
namespaces = run_subprocess.GetOutputLines(
cmd, timeout_sec=20, show_stderr=False)
return 'namespace/' + namespace in namespaces
def _CreateNamespace(namespace, context_name=None):
cmd = [_FindKubectl()]
if context_name:
cmd += ['--context', context_name]
cmd += ['create', 'namespace', namespace]
run_subprocess.Run(cmd, timeout_sec=20, show_output=False)
def _DeleteNamespace(namespace, context_name=None):
cmd = [_FindKubectl()]
if context_name:
cmd += ['--context', context_name]
cmd += ['delete', 'namespace', namespace]
run_subprocess.Run(cmd, timeout_sec=20, show_output=False)
class KubeNamespace(object):
"""Context to create and tear down kubernetes namespace."""
def __init__(self, namespace, context_name=None):
"""Initialize KubeNamespace.
Args:
namespace: (str) Namespace name.
context_name: (str) Kubernetes context name.
"""
self._namespace = namespace
self._context_name = context_name
self._delete_namespace = False
def __enter__(self):
if not _NamespaceExists(self._namespace, self._context_name):
_CreateNamespace(self._namespace, self._context_name)
self._delete_namespace = True
def __exit__(self, exc_type, exc_value, tb):
if self._delete_namespace:
_DeleteNamespace(self._namespace, self._context_name)

View File

@@ -0,0 +1,188 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for generating the files for local development environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import itertools
from googlecloudsdk.command_lib.code import builders
from googlecloudsdk.command_lib.code import local
from googlecloudsdk.command_lib.code import yaml_helper
from googlecloudsdk.core import yaml
import six
_SKAFFOLD_TEMPLATE = """
apiVersion: skaffold/v2beta5
kind: Config
build:
artifacts: []
deploy:
kubectl:
manifests: []
"""
class LocalRuntimeFiles(object):
"""Generates the developement environment files for a project."""
def __init__(self, settings):
"""Initialize LocalRuntimeFiles.
Args:
settings: Local development settings.
"""
self._settings = settings
def KubernetesConfig(self):
"""Create a kubernetes config file.
Returns:
Text of a kubernetes config file.
"""
if self._settings.cpu:
if isinstance(self._settings.cpu, six.text_type):
if not self._settings.cpu.endswith('m'):
raise ValueError('cpu limit must be defined as an integer or as '
'millicpus')
user_cpu = int(self._settings.cpu[:-1]) / 1000.0
else:
user_cpu = self._settings.cpu
cpu_request = min(0.1, user_cpu)
else:
cpu_request = None
code_generators = [
local.AppContainerGenerator(
self._settings.service_name, self._settings.image,
self._settings.env_vars, self._settings.env_vars_secrets,
self._settings.memory, self._settings.cpu, cpu_request,
self._settings.readiness_probe),
local.SecretsGenerator(self._settings.service_name,
self._settings.env_vars_secrets,
self._settings.volumes_secrets,
self._settings.namespace,
self._settings.allow_secret_manager)
]
credential_generator = None
if isinstance(self._settings.credential, local.ServiceAccountSetting):
credential_generator = local.CredentialGenerator(
functools.partial(local.GetServiceAccountSecret,
self._settings.credential.name))
code_generators.append(credential_generator)
elif isinstance(self._settings.credential,
local.ApplicationDefaultCredentialSetting):
credential_generator = local.CredentialGenerator(local.GetUserCredential)
code_generators.append(credential_generator)
if self._settings.cloudsql_instances:
if not credential_generator:
raise ValueError('A credential generator must be defined when cloudsql '
'instances are defined.')
cloudsql_proxy = local.CloudSqlProxyGenerator(
self._settings.cloudsql_instances, credential_generator.GetInfo())
code_generators.append(cloudsql_proxy)
return _GenerateKubeConfigs(code_generators)
def SkaffoldConfig(self, kubernetes_file_path):
"""Create a skaffold yaml file.
Args:
kubernetes_file_path: Path to the kubernetes config file.
Returns:
Text of the skaffold yaml file.
"""
skaffold_yaml = yaml.load(_SKAFFOLD_TEMPLATE)
manifests = yaml_helper.GetOrCreate(
skaffold_yaml, ('deploy', 'kubectl', 'manifests'), constructor=list)
manifests.append(kubernetes_file_path)
artifact = {'image': self._settings.image}
# Need to escape file paths for the yaml encoder. The yaml encoder will
# interpret \ as the beginning of an escape character. Windows paths may
# have backslashes.
artifact['context'] = six.ensure_text(
self._settings.context.encode('unicode_escape'))
if isinstance(self._settings.builder, builders.BuildpackBuilder):
artifact['buildpacks'] = {
'builder': self._settings.builder.builder,
}
if self._settings.builder.devmode:
artifact['buildpacks']['env'] = ['GOOGLE_DEVMODE=1']
artifact['sync'] = {'auto': {}}
if self._settings.builder.trust:
artifact['buildpacks']['trustBuilder'] = True
else:
# Macos needs a relative path or else
# e2e.surface.code.dev_mac_test.MacE2ETest.testNamespace fails.
dockerfile_rel_path = self._settings.builder.DockerfileRelPath(
self._settings.context)
artifact['docker'] = {
'dockerfile':
six.ensure_text(dockerfile_rel_path.encode('unicode_escape'))
}
artifacts = yaml_helper.GetOrCreate(
skaffold_yaml, ('build', 'artifacts'), constructor=list)
artifacts.append(artifact)
if self._settings.local_port:
port_forward_config = {
'resourceType': 'service',
'resourceName': self._settings.service_name,
'port': 8080,
'localPort': self._settings.local_port
}
if self._settings.namespace:
port_forward_config['namespace'] = self._settings.namespace
skaffold_yaml['portForward'] = [port_forward_config]
return yaml.dump(skaffold_yaml)
def _GenerateKubeConfigs(code_generators):
"""Generate Kubernetes yaml configs.
Args:
code_generators: Iterable of KubeConfigGenerator.
Returns:
Iterable of dictionaries representing kubernetes yaml configs.
"""
kube_configs = []
for code_generator in code_generators:
kube_configs.extend(code_generator.CreateConfigs())
deployments = [
config for config in kube_configs if config['kind'] == 'Deployment'
]
for deployment, code_generator in itertools.product(deployments,
code_generators):
code_generator.ModifyDeployment(deployment)
for deployment in deployments:
containers = yaml_helper.GetAll(deployment,
('spec', 'template', 'spec', 'containers'))
for container, code_generator in itertools.product(containers,
code_generators):
code_generator.ModifyContainer(container)
return yaml.dump_all(kube_configs)

View File

@@ -0,0 +1,205 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized versions of runners in subprocess.
Some of this is just for python 2 support and can be simplified.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os.path
import subprocess
import threading
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.code import json_stream
from googlecloudsdk.core import config
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files as file_utils
import six
def _FindOrInstallComponent(component_name):
"""Finds the path to a component or install it.
Args:
component_name: Name of the component.
Returns:
Path to the component. Returns None if the component can't be found.
"""
if (config.Paths().sdk_root and
update_manager.UpdateManager.EnsureInstalledAndRestart([component_name])):
return os.path.join(config.Paths().sdk_root, 'bin', component_name)
return None
def GetGcloudPreferredExecutable(exe):
"""Finds the path to an executable, preferring the gcloud packaged version.
Args:
exe: Name of the executable.
Returns:
Path to the executable.
Raises:
EnvironmentError: The executable can't be found.
"""
path = _FindOrInstallComponent(exe) or file_utils.FindExecutableOnPath(exe)
if not path:
raise EnvironmentError('Unable to locate %s.' % exe)
return path
class _TimeoutThread(object):
"""A context manager based on threading.Timer.
Pass a function to call after the given time has passed. If you exit before
the timer fires, nothing happens. If you exit after we've had to call the
timer function, we raise TimeoutError at exit time.
"""
def __init__(self,
func,
timeout_sec,
error_format='Task ran for more than {timeout_sec} seconds'):
self.func = func
self.timeout_sec = timeout_sec
self.error_format = error_format
self.timer = None
def __enter__(self):
self.Reset()
return self
def Reset(self):
if self.timer is not None:
self.timer.cancel()
self.timer = threading.Timer(self.timeout_sec, self.func)
self.timer.start()
def __exit__(self, exc_type, exc_value, traceback):
timed_out = self.timer.finished.is_set()
self.timer.cancel()
if timed_out:
raise utils.TimeoutError(
self.error_format.format(timeout_sec=self.timeout_sec))
def Run(cmd, timeout_sec, show_output=True, inpt=None):
"""Run command and optionally send the output to /dev/null or nul."""
with file_utils.FileWriter(os.devnull) as devnull:
stdout = devnull
stderr = devnull
stdin = None
if show_output:
stdout = None
stderr = None
if inpt:
stdin = subprocess.PIPE
# [py3 port] Should be able to use subprocess.run (etc) with 'timeout' param
# here and below. We're only using the Popen API in order to have a process
# to give to _TimeoutThread.
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, stdin=stdin)
with _TimeoutThread(p.kill, timeout_sec):
if inpt:
p.communicate(six.ensure_binary(inpt))
else:
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def _GetStdout(cmd, timeout_sec, show_stderr=True):
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=None if show_stderr else subprocess.PIPE)
with _TimeoutThread(p.kill, timeout_sec):
stdout, _ = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
return six.ensure_text(stdout)
def GetOutputLines(cmd, timeout_sec, show_stderr=True, strip_output=False):
"""Run command and get its stdout as a list of lines.
Args:
cmd: List of executable and arg strings.
timeout_sec: Command will be killed if it exceeds this.
show_stderr: False to suppress stderr from the command.
strip_output: Strip head/tail whitespace before splitting into lines.
Returns:
List of lines (without newlines).
"""
stdout = _GetStdout(cmd, timeout_sec, show_stderr=show_stderr)
if strip_output:
stdout = stdout.strip()
lines = stdout.splitlines()
return lines
def GetOutputJson(cmd, timeout_sec, show_stderr=True):
"""Run command and get its JSON stdout as a parsed dict.
Args:
cmd: List of executable and arg strings.
timeout_sec: Command will be killed if it exceeds this.
show_stderr: False to suppress stderr from the command.
Returns:
Parsed JSON.
"""
stdout = _GetStdout(cmd, timeout_sec, show_stderr=show_stderr)
return json.loads(stdout.strip())
def StreamOutputJson(cmd, event_timeout_sec, show_stderr=True):
"""Run command and get its output streamed as an iterable of dicts.
Args:
cmd: List of executable and arg strings.
event_timeout_sec: Command will be killed if we don't get a JSON line for
this long. (This is not the same as timeout_sec above).
show_stderr: False to suppress stderr from the command.
Yields:
Parsed JSON.
Raises:
CalledProcessError: cmd returned with a non-zero exit code.
TimeoutError: cmd has timed out.
"""
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=None if show_stderr else subprocess.PIPE)
with _TimeoutThread(
p.kill,
event_timeout_sec,
error_format='No subprocess output for {timeout_sec} seconds') as timer:
for obj in json_stream.ReadJsonStream(p.stdout):
timer.Reset()
yield obj
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)

View File

@@ -0,0 +1,123 @@
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for the Secret Manager integration in the local environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import re
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.code import kubernetes
from googlecloudsdk.command_lib.run import secrets_mapping
SECRETS_MESSAGE_MODULE = apis.GetMessagesModule('secretmanager', 'v1')
class SecretManagerSecret(object):
"""A secret to be fetched from Secret Manager."""
def __init__(self, name, versions, mapped_secret=None):
self.name = name
self.versions = versions
self.mapped_secret = mapped_secret
def __eq__(self, other):
return (self.name == other.name and self.versions == other.versions and
self.mapped_secret == other.mapped_secret)
def __repr__(self):
return '<Secret: (name="{}", versions={}, mapped_secret="{}")>'.format(
self.name, self.versions, self.mapped_secret)
def __hash__(self):
return hash((self.name, self.versions, self.mapped_secret))
def BuildSecrets(project_name, secret_list, namespace, client=None):
"""Fetch secrets from Secret Manager and create k8s secrets with the data."""
if client is None:
client = _SecretsClient()
secrets = []
for secret in secret_list:
secrets.append(
_BuildSecret(client, project_name, secret.name, secret.mapped_secret,
secret.versions, namespace))
return secrets
def _BuildSecret(client, project, secret_name, mapped_secret, versions,
namespace):
"""Build the k8s secret resource for minikube from Secret Manager data."""
if secrets_mapping.SpecialVersion.MOUNT_ALL in versions:
# TODO(b/187972361): Do we need to load all secret versions for the secret?
raise ValueError('local development requires you to specify all secret '
'versions that you need to use.')
secrets = {}
for version in versions:
secrets[version] = client.GetSecretData(project, secret_name, mapped_secret,
version)
return _BuildK8sSecret(secret_name, secrets, namespace)
def _BuildK8sSecret(secret_name, secrets, namespace):
"""Turn a map of SecretManager responses into a k8s secret."""
data = {}
for version, secret in secrets.items():
data[version] = base64.b64encode(secret.payload.data).decode('ascii')
metadata = {'name': secret_name}
if namespace:
metadata['namespace'] = namespace
d = {'metadata': metadata, 'data': data}
d['apiVersion'] = 'v1'
d['kind'] = 'Secret'
return d
def _DeleteSecrets(secret_map, namespace, context_name):
kubernetes.DeleteResources('secret', list(secret_map.keys()), namespace,
context_name)
class _SecretsClient(object):
"""Client implementation for calling Secret Manager to fetch secrets."""
def __init__(self):
self.secrets_client = apis.GetClientInstance('secretmanager', 'v1')
def GetSecretData(self, project, secret_name, mapped_secret, version):
"""Retrieve secret from secret manager."""
if mapped_secret:
if mapped_secret.startswith('projects/'):
# mapping a cross-project secret.
resource_name = '{}/versions/{}'.format(mapped_secret, version)
else:
# if we're mapping a local secret to a valid k8s name
resource_name = 'projects/{}/secrets/{}/versions/{}'.format(
project, mapped_secret, version)
else:
resource_name = 'projects/{}/secrets/{}/versions/{}'.format(
project, secret_name, version)
return self.secrets_client.projects_secrets_versions.Access(
SECRETS_MESSAGE_MODULE
.SecretmanagerProjectsSecretsVersionsAccessRequest(name=resource_name))
def IsValidK8sName(name):
# k8s names must start and end with alphanumeric, only contain alphanumeric,
# -, and ., and contain at most 253 characters
return re.match(r'[a-z0-9]([a-z0-9\-\.]{0,251}[a-z0-9])?$', name)

View File

@@ -0,0 +1,273 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reading the skaffold events stream."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import datetime
import os.path
import signal
import subprocess
import sys
import threading
from googlecloudsdk.command_lib.code import json_stream
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files as file_utils
import six
class StopThreadError(BaseException):
"""The thread has been stopped by a ThreadEvent."""
# In integration tests SIGINT doesn't generate KeyboardInterrupt. Create a
# signal handler that forces the generation of KeyboardInterrupt.
def _KeyboardInterruptHandler(unused_signum, unused_stack):
"""Raise a KeyboardInterrupt."""
raise KeyboardInterrupt()
class _SigInterruptedHandler(object):
"""Context manager to capture SIGINT and send it to a handler."""
def __init__(self, handler):
self._orig_handler = None
self._handler = handler
def __enter__(self):
self._orig_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._handler)
def __exit__(self, exc_type, exc_value, tb):
signal.signal(signal.SIGINT, self._orig_handler)
def _FindOrInstallSkaffoldComponent():
if (config.Paths().sdk_root and
update_manager.UpdateManager.EnsureInstalledAndRestart(['skaffold'])):
return os.path.join(config.Paths().sdk_root, 'bin', 'skaffold')
return None
def _FindSkaffold():
"""Find the path to the skaffold executable."""
skaffold = (
properties.VALUES.code.skaffold_path_override.Get() or
_FindOrInstallSkaffoldComponent() or
file_utils.FindExecutableOnPath('skaffold'))
if not skaffold:
raise EnvironmentError('Unable to locate skaffold.')
return skaffold
def GetVersion():
"""Get skaffold version string."""
return six.ensure_text(subprocess.check_output([_FindSkaffold(), 'version']))
@contextlib.contextmanager
def Skaffold(skaffold_config,
context_name=None,
namespace=None,
env_vars=None,
debug=False,
events_port=None):
"""Run skaffold and catch keyboard interrupts to kill the process.
Args:
skaffold_config: Path to skaffold configuration yaml file.
context_name: Kubernetes context name.
namespace: Kubernetes namespace name.
env_vars: Additional environment variables with which to run skaffold.
debug: If true, turn on debugging output.
events_port: If set, turn on the events api and expose it on this port.
Yields:
The skaffold process.
"""
cmd = [_FindSkaffold(), 'dev', '-f', skaffold_config, '--port-forward']
if context_name:
cmd += ['--kube-context=%s' % context_name]
if namespace:
cmd += ['--namespace=%s' % namespace]
if debug:
cmd += ['-vdebug']
if events_port:
cmd += ['--rpc-http-port=%s' % events_port]
# Supress the current Ctrl-C handler and pass the signal to the child
# process.
with _SigInterruptedHandler(_KeyboardInterruptHandler):
# Skaffold needs to be able to run minikube and kind. Those tools
# may live in the SDK root as installed gcloud components. Place the
# SDK root in the path for skaffold.
env = os.environ.copy()
if env_vars:
env.update((six.ensure_str(name), six.ensure_str(value))
for name, value in env_vars.items())
if config.Paths().sdk_root:
env['PATH'] = six.ensure_str(env['PATH'] + os.pathsep +
config.Paths().sdk_root)
try:
p = subprocess.Popen(cmd, env=env)
yield p
except KeyboardInterrupt:
p.terminate()
p.wait()
sys.stdout.flush()
sys.stderr.flush()
class PrintUrlThreadContext(object):
"""Context manager that starts a thread that prints outs local urls.
When entering the context, start a thread that watches the skaffold events
stream api, find the portForward events, and prints out the local urls
for a service. This will continue until the context is exited.
"""
def __init__(self, service_name, events_port):
"""Initialize PrintUrlThreadContext.
Args:
service_name: Name of the service.
events_port: Port number of the skaffold events stream api.
"""
self._stop = threading.Event()
self._thread = threading.Thread(
target=_PrintUrl, args=(service_name, events_port, self._stop))
def __enter__(self):
self._thread.start()
def __exit__(self, *args):
self._stop.set()
def _PrintUrl(service_name, events_port, stop):
"""Read the local url of a service from the event stream and print it.
Read the event stream api and find the portForward events. Print the local
url as determined from the portFoward events. This function will continuously
listen to the event stream and print out all local urls until eitherthe event
stream connection closes or the stop event is set.
Args:
service_name: Name of the service.
events_port: Port number of the skaffold events stream api.
stop: threading.Event event.
"""
try:
with contextlib.closing(_OpenEventStreamRetry(events_port,
stop)) as response:
for port in GetServiceLocalPort(response, service_name):
# If the thread has been signaled to stop, don't print out the url
if stop.is_set():
return
con = console_attr.GetConsoleAttr()
msg = 'Service URL: {bold}{url}{normal}'.format(
bold=con.GetFontCode(bold=True),
url='http://localhost:%s/' % port,
normal=con.GetFontCode())
# Sleep for a second to make sure the URL is printed below the start
# up logs printed by skaffold.'
stop.wait(1)
log.status.Print(con.Colorize(msg, color='blue'))
except StopThreadError:
return
def OpenEventsStream(events_port):
"""Open a connection to the skaffold events api output."""
return six.moves.urllib.request.urlopen(_GetEventsUrl(events_port))
def GetServiceLocalPort(response, service_name):
"""Get the local port for a service.
This function yields the new local port every time a new port forwarding
connection is created.
Args:
response: urlopen response.
service_name: Name of the service.
Yields:
Local port number.
"""
for event in ReadEventStream(response):
if _IsPortEventForService(event, service_name):
yield event['portEvent']['localPort']
def ReadEventStream(response):
"""Read the events from the skaffold event stream.
Args:
response: urlopen response.
Yields:
Events from the JSON payloads.
"""
for payload in json_stream.ReadJsonStream(response):
if not isinstance(payload, dict):
continue
event = payload['result']['event']
yield event
def _OpenEventStreamRetry(events_port,
stop_event,
retry_interval=datetime.timedelta(seconds=1)):
"""Open a connection to the skaffold events api output.
This function retries opening the connection until opening is succesful or
stop_event is set.
Args:
events_port: Port of the events api.
stop_event: A threading.Event object.
retry_interval: Interval for which to sleep between tries.
Returns:
urlopen response.
Raises:
StopThreadError: The stop_event was set before a connection was established.
"""
while not stop_event.is_set():
try:
return OpenEventsStream(events_port)
except six.moves.urllib.error.URLError:
stop_event.wait(retry_interval.total_seconds())
raise StopThreadError()
def _GetEventsUrl(events_port):
return 'http://localhost:{events_port}/v1/events'.format(
events_port=six.text_type(events_port))
def _IsPortEventForService(event, service_name):
return event.get('portEvent', {}).get('resourceName') == service_name

View File

@@ -0,0 +1,83 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for working with dictionaries representing yaml files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def GetOrCreate(obj, path, constructor=dict):
"""Get or create the object by following the field names in the path.
not exist, create the appropriate value.
Args:
obj: A dictionary representing a yaml dictionary
path: A list of strings representing fields to follow.
constructor: If the object at the end of the path does not exist, create an
object using the constructor given.
Returns:
An object at found by following the path.
"""
first, rest = path[0], path[1:]
if rest:
if first not in obj:
obj[first] = dict()
return GetOrCreate(obj[first], rest, constructor)
else:
if first not in obj:
obj[first] = constructor()
return obj[first]
def GetAll(obj, path):
"""Given a yaml object, yield all objects found by following a path.
Given a yaml object, read each field in the path and return the object
found at the end. If a field has a list value, follow the path for each
object in the list.
E.g.
>>> X = {'A': {'B': [{'C': {'D': 1}}, {'C': {'D': 2}}]}}
>>> sorted(list(GetAll(X, path=('A', 'B', 'C', 'D'))))
[1, 2]
Args:
obj: A dictionary representing a yaml dictionary
path: A list of strings representing fields to follow.
Yields:
Values that are found by following the given path.
"""
if not path:
yield obj
return
first, rest = path[0], path[1:]
if first in obj:
if isinstance(obj[first], dict):
for extracted_obj in GetAll(obj[first], rest):
yield extracted_obj
elif isinstance(obj[first], list):
for x in obj[first]:
for extracted_obj in GetAll(x, rest):
yield extracted_obj
else:
if rest:
raise ValueError(first + ' is not a dictionary or a list')
else:
yield obj[first]