feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
x=sys.modules['containerregistry.client.v2']
from containerregistry.client.v2 import docker_creds_
setattr(x, 'docker_creds', docker_creds_)
from containerregistry.client.v2 import docker_http_
setattr(x, 'docker_http', docker_http_)
from containerregistry.client.v2 import util_
setattr(x, 'util', util_)
from containerregistry.client.v2 import docker_digest_
setattr(x, 'docker_digest', docker_digest_)
from containerregistry.client.v2 import docker_image_
setattr(x, 'docker_image', docker_image_)
from containerregistry.client.v2 import v1_compat_
setattr(x, 'v1_compat', v1_compat_)
from containerregistry.client.v2 import docker_session_
setattr(x, 'docker_session', docker_session_)
from containerregistry.client.v2 import append_
setattr(x, 'append', append_)

View File

@@ -0,0 +1,106 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides DockerImage for examining docker_build outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import json
import os
from containerregistry.client.v2 import docker_digest
from containerregistry.client.v2 import docker_image
from containerregistry.client.v2 import util
# _EMPTY_LAYER_TAR_ID is the sha256 of an empty tarball.
_EMPTY_LAYER_TAR_ID = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' # pylint: disable=line-too-long
class Layer(docker_image.DockerImage):
"""Appends a new layer on top of a base image.
This augments a base docker image with new files from a gzipped tarball,
adds environment variables and exposes a port.
"""
def __init__(self, base, tar_gz,
port, *envs):
"""Creates a new layer on top of a base with optional tar.gz, port or envs.
Args:
base: a base DockerImage for a new layer.
tar_gz: an optional gzipped tarball passed as a string with filesystem
changeset.
port: an optional port to be exposed, passed as a string. For example:
'8080/tcp'.
*envs: environment variables passed as strings in the format:
'ENV_ONE=val', 'ENV_TWO=val2'.
"""
self._base = base
unsigned_manifest, unused_signatures = util.DetachSignatures(
self._base.manifest())
manifest = json.loads(unsigned_manifest)
v1_compat = json.loads(manifest['history'][0]['v1Compatibility'])
if tar_gz:
self._blob = tar_gz
self._blob_sum = docker_digest.SHA256(self._blob)
v1_compat['throwaway'] = False
else:
self._blob_sum = _EMPTY_LAYER_TAR_ID
self._blob = b''
v1_compat['throwaway'] = True
manifest['fsLayers'].insert(0, {'blobSum': self._blob_sum})
v1_compat['parent'] = v1_compat['id']
v1_compat['id'] = binascii.hexlify(os.urandom(32)).decode('utf8')
config = v1_compat.get('config', {}) or {}
envs = list(envs)
if envs:
env_keys = [env.split('=')[0] for env in envs]
old_envs = config.get('Env', []) or []
old_envs = [env for env in old_envs if env.split('=')[0] not in env_keys]
config['Env'] = old_envs + envs
if port is not None:
old_ports = config.get('ExposedPorts', {}) or {}
old_ports[port] = {}
config['ExposedPorts'] = old_ports
v1_compat['config'] = config
manifest['history'].insert(
0, {'v1Compatibility': json.dumps(v1_compat, sort_keys=True)})
self._manifest = util.Sign(json.dumps(manifest, sort_keys=True))
def manifest(self):
"""Override."""
return self._manifest
def blob(self, digest):
"""Override."""
if digest == self._blob_sum:
return self._blob
return self._base.blob(digest)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
"""Override."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Override."""
return

View File

@@ -0,0 +1,28 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package exposes credentials for talking to a Docker registry."""
from containerregistry.client import docker_creds
class Bearer(docker_creds.SchemeProvider):
"""Implementation for providing a transaction's Bearer token as creds."""
def __init__(self, bearer_token):
super(Bearer, self).__init__('Bearer')
self._bearer_token = bearer_token
@property
def suffix(self):
return self._bearer_token

View File

@@ -0,0 +1,34 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package holds a handful of utilities for calculating digests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
from containerregistry.client.v2 import util
def SHA256(content, prefix='sha256:'):
"""Return 'sha256:' + hex(sha256(content))."""
return prefix + hashlib.sha256(content).hexdigest()
def SignedManifestToSHA256(manifest):
"""Return 'sha256:' + hex(sha256(manifest - signatures))."""
unsigned_manifest, unused_signatures = util.DetachSignatures(manifest)
return SHA256(unsigned_manifest.encode('utf8'))

View File

@@ -0,0 +1,415 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package facilitates HTTP/REST requests to the registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
import threading
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2 import docker_creds as v2_creds
import httplib2
import six.moves.http_client
import six.moves.urllib.parse
# Options for docker_http.Transport actions
PULL = 'pull'
PUSH = 'push,pull'
# For now DELETE is PUSH, which is the read/write ACL.
DELETE = PUSH
CATALOG = 'catalog'
ACTIONS = [PULL, PUSH, DELETE, CATALOG]
class Diagnostic(object):
"""Diagnostic encapsulates a Registry v2 diagnostic message.
This captures one of the "errors" from a v2 Registry error response
message, as outlined here:
https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
Args:
error: the decoded JSON of the "errors" array element.
"""
def __init__(self, error):
self._error = error
def __eq__(self, other):
return (self.code == other.code and self.message == other.message and
self.detail == other.detail)
@property
def code(self):
return self._error.get('code', 'UNKNOWN')
@property
def message(self):
return self._error.get('message', '<no message specified>')
@property
def detail(self):
return self._error.get('detail', '<no details provided>')
def _DiagnosticsFromContent(content):
"""Extract and return the diagnostics from content."""
try:
content = content.decode('utf8')
except: # pylint: disable=bare-except
# Assume it's already decoded. Defensive coding for old py2 habits that
# are hard to break. Passing does not make the problem worse.
pass
try:
o = json.loads(content)
return [Diagnostic(d) for d in o.get('errors', [])]
except: # pylint: disable=bare-except
return [Diagnostic({
'code': 'UNKNOWN',
'message': content,
})]
class V2DiagnosticException(Exception):
"""Exceptions when an unexpected HTTP status is returned."""
def __init__(self, resp, content):
self._resp = resp
self._diagnostics = _DiagnosticsFromContent(content)
message = '\n'.join(
['response: %s' % resp] +
['%s: %s' % (d.message, d.detail) for d in self._diagnostics])
super(V2DiagnosticException, self).__init__(message)
@property
def diagnostics(self):
return self._diagnostics
@property
def response(self):
return self._resp
@property
def status(self):
return self._resp.status
class BadStateException(Exception):
"""Exceptions when we have entered an unexpected state."""
class TokenRefreshException(BadStateException):
"""Exception when token refresh fails."""
def _CheckState(predicate, message = None):
if not predicate:
raise BadStateException(message if message else 'Unknown')
_ANONYMOUS = ''
_BASIC = 'Basic'
_BEARER = 'Bearer'
_REALM_PFX = 'realm='
_SERVICE_PFX = 'service='
class Transport(object):
"""HTTP Transport abstraction to handle automatic v2 reauthentication.
In the v2 Registry protocol, all of the API endpoints expect to receive
'Bearer' authentication. These Bearer tokens are generated by exchanging
'Basic' or 'Anonymous' authentication with an authentication endpoint
designated by the opening ping request.
The Bearer tokens are scoped to a resource (typically repository), and
are generated with a set of capabilities embedded (e.g. push, pull).
The Docker client has a baked in 60-second expiration for Bearer tokens,
and upon expiration, registries can reject any request with a 401. The
transport should automatically refresh the Bearer token and reissue the
request.
Args:
name: the structured name of the docker resource being referenced.
creds: the basic authentication credentials to use for authentication
challenge exchanges.
transport: the HTTP transport to use under the hood.
action: One of docker_http.ACTIONS, for which we plan to use this transport
"""
def __init__(self, name,
creds,
transport, action):
self._name = name
self._basic_creds = creds
self._transport = transport
self._action = action
self._lock = threading.Lock()
_CheckState(action in ACTIONS,
'Invalid action supplied to docker_http.Transport: %s' % action)
# Ping once to establish realm, and then get a good credential
# for use with this transport.
self._Ping()
if self._authentication == _BEARER:
self._Refresh()
elif self._authentication == _BASIC:
self._creds = self._basic_creds
else:
self._creds = docker_creds.Anonymous()
def _Ping(self):
"""Ping the v2 Registry.
Only called during transport construction, this pings the listed
v2 registry. The point of this ping is to establish the "realm"
and "service" to use for Basic for Bearer-Token exchanges.
"""
# This initiates the pull by issuing a v2 ping:
# GET H:P/v2/
headers = {
'content-type': 'application/json',
'user-agent': docker_name.USER_AGENT,
}
resp, content = self._transport.request(
'{scheme}://{registry}/v2/'.format(
scheme=Scheme(self._name.registry), registry=self._name.registry),
'GET',
body=None,
headers=headers)
# We expect a www-authenticate challenge.
_CheckState(
resp.status in [
six.moves.http_client.OK, six.moves.http_client.UNAUTHORIZED
], 'Unexpected response pinging the registry: {}\nBody: {}'.format(
resp.status, content or '<empty>'))
# The registry is authenticated iff we have an authentication challenge.
if resp.status == six.moves.http_client.OK:
self._authentication = _ANONYMOUS
self._service = 'none'
self._realm = 'none'
return
challenge = resp['www-authenticate']
_CheckState(' ' in challenge,
'Unexpected "www-authenticate" header form: %s' % challenge)
(self._authentication, remainder) = challenge.split(' ', 1)
# Normalize the authentication scheme to have exactly the first letter
# capitalized. Scheme matching is required to be case insensitive:
# https://tools.ietf.org/html/rfc7235#section-2.1
self._authentication = self._authentication.capitalize()
_CheckState(self._authentication in [_BASIC, _BEARER],
'Unexpected "www-authenticate" challenge type: %s' %
self._authentication)
# Default "_service" to the registry
self._service = self._name.registry
tokens = remainder.split(',')
for t in tokens:
if t.startswith(_REALM_PFX):
self._realm = t[len(_REALM_PFX):].strip('"')
elif t.startswith(_SERVICE_PFX):
self._service = t[len(_SERVICE_PFX):].strip('"')
# Make sure these got set.
_CheckState(self._realm, 'Expected a "%s" in "www-authenticate" '
'header: %s' % (_REALM_PFX, challenge))
def _Scope(self):
"""Construct the resource scope to pass to a v2 auth endpoint."""
return self._name.scope(self._action)
def _Refresh(self):
"""Refreshes the Bearer token credentials underlying this transport.
This utilizes the "realm" and "service" established during _Ping to
set up _creds with up-to-date credentials, by passing the
client-provided _basic_creds to the authorization realm.
This is generally called under two circumstances:
1) When the transport is created (eagerly)
2) When a request fails on a 401 Unauthorized
Raises:
TokenRefreshException: Error during token exchange.
"""
headers = {
'content-type': 'application/json',
'user-agent': docker_name.USER_AGENT,
'Authorization': self._basic_creds.Get()
}
parameters = {
'scope': self._Scope(),
'service': self._service,
}
resp, content = self._transport.request(
# 'realm' includes scheme and path
'{realm}?{query}'.format(
realm=self._realm,
query=six.moves.urllib.parse.urlencode(parameters)),
'GET',
body=None,
headers=headers)
if resp.status != six.moves.http_client.OK:
raise TokenRefreshException('Bad status during token exchange: %d\n%s' %
(resp.status, content))
try:
content = content.decode('utf8')
except: # pylint: disable=bare-except
# Assume it's already decoded. Defensive coding for old py2 habits that
# are hard to break. Passing does not make the problem worse.
pass
wrapper_object = json.loads(content)
token = wrapper_object.get('token') or wrapper_object.get('access_token')
_CheckState(token is not None,
'Malformed JSON response: %s' % content)
with self._lock:
# We have successfully reauthenticated.
self._creds = v2_creds.Bearer(token)
# pylint: disable=invalid-name
def Request(
self,
url,
accepted_codes = None,
method = None,
body = None,
content_type = None):
"""Wrapper containing much of the boilerplate REST logic for Registry calls.
Args:
url: the URL to which to talk
accepted_codes: the list of acceptable http status codes
method: the HTTP method to use (defaults to GET/PUT depending on
whether body is provided)
body: the body to pass into the PUT request (or None for GET)
content_type: the mime-type of the request (or None for JSON).
content_type is ignored when body is None.
Raises:
BadStateException: an unexpected internal state has been encountered.
V2DiagnosticException: an error has occurred interacting with v2.
Returns:
The response of the HTTP request, and its contents.
"""
if not method:
method = 'GET' if not body else 'PUT'
# If the first request fails on a 401 Unauthorized, then refresh the
# Bearer token and retry, if the authentication mode is bearer.
for retry in [self._authentication == _BEARER, False]:
# self._creds may be changed by self._Refresh(), so do
# not hoist this.
headers = {
'user-agent': docker_name.USER_AGENT,
}
auth = self._creds.Get()
if auth:
headers['Authorization'] = auth
if body: # Requests w/ bodies should have content-type.
headers['content-type'] = (
content_type if content_type else 'application/json')
# POST/PUT require a content-length, when no body is supplied.
if method in ('POST', 'PUT') and not body:
headers['content-length'] = '0'
resp, content = self._transport.request(
url, method, body=body, headers=headers)
if resp.status != six.moves.http_client.UNAUTHORIZED:
break
elif retry:
# On Unauthorized, refresh the credential and retry.
self._Refresh()
if resp.status not in accepted_codes:
# Use the content returned by GCR as the error message.
raise V2DiagnosticException(resp, content)
return resp, content
def PaginatedRequest(self,
url,
accepted_codes = None,
method = None,
body = None,
content_type = None
):
"""Wrapper around Request that follows Link headers if they exist.
Args:
url: the URL to which to talk
accepted_codes: the list of acceptable http status codes
method: the HTTP method to use (defaults to GET/PUT depending on
whether body is provided)
body: the body to pass into the PUT request (or None for GET)
content_type: the mime-type of the request (or None for JSON)
Yields:
The return value of calling Request for each page of results.
"""
next_page = url
while next_page:
resp, content = self.Request(next_page, accepted_codes, method, body,
content_type)
yield resp, content
next_page = ParseNextLinkHeader(resp)
def ParseNextLinkHeader(resp):
"""Returns "next" link from RFC 5988 Link header or None if not present."""
link = resp.get('link')
if not link:
return None
m = re.match(r'.*<(.+)>;\s*rel="next".*', link)
if not m:
return None
return m.group(1)
def Scheme(endpoint):
"""Returns https scheme for all the endpoints except localhost."""
if endpoint.startswith('localhost:'):
return 'http'
elif re.match(r'.*\.local(?:host)?(?::\d{1,5})?$', endpoint):
return 'http'
else:
return 'https'

View File

@@ -0,0 +1,319 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides DockerImage for examining docker_build outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import gzip
import io
import json
import os
import tarfile
from typing import Any, Dict, Iterator, List, Set, Text, Union # pylint: disable=g-multiple-import,unused-import
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2 import docker_digest
from containerregistry.client.v2 import docker_http
import httplib2
import six
import six.moves.http_client
class DigestMismatchedError(Exception):
"""Exception raised when a digest mismatch is encountered."""
class DockerImage(six.with_metaclass(abc.ABCMeta, object)):
"""Interface for implementations that interact with Docker images."""
def fs_layers(self):
"""The ordered collection of filesystem layers that comprise this image."""
manifest = json.loads(self.manifest())
return [x['blobSum'] for x in manifest['fsLayers']]
def blob_set(self):
"""The unique set of blobs that compose to create the filesystem."""
return set(self.fs_layers())
def digest(self):
"""The digest of the manifest."""
return docker_digest.SignedManifestToSHA256(self.manifest())
# pytype: disable=bad-return-type
@abc.abstractmethod
def manifest(self):
"""The JSON manifest referenced by the tag/digest.
Returns:
The raw json manifest
"""
# pytype: enable=bad-return-type
def blob_size(self, digest):
"""The byte size of the raw blob."""
return len(self.blob(digest))
# pytype: disable=bad-return-type
@abc.abstractmethod
def blob(self, digest):
"""The raw blob of the layer.
Args:
digest: the 'algo:digest' of the layer being addressed.
Returns:
The raw blob bytes of the layer.
"""
# pytype: enable=bad-return-type
def uncompressed_blob(self, digest):
"""Same as blob() but uncompressed."""
buf = io.BytesIO(self.blob(digest))
f = gzip.GzipFile(mode='rb', fileobj=buf)
return f.read()
def diff_id(self, digest):
"""diff_id only exist in schema v22."""
return None
# __enter__ and __exit__ allow use as a context manager.
@abc.abstractmethod
def __enter__(self):
"""Open the image for reading."""
@abc.abstractmethod
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Close the image."""
def __str__(self):
"""A human-readable representation of the image."""
return str(type(self))
class FromRegistry(DockerImage):
"""This accesses a docker image hosted on a registry (non-local)."""
def __init__(self, name,
basic_creds,
transport):
super().__init__()
self._name = name
self._creds = basic_creds
self._original_transport = transport
self._response = {}
def _content(self, suffix, cache = True):
"""Fetches content of the resources from registry by http calls."""
if isinstance(self._name, docker_name.Repository):
suffix = '{repository}/{suffix}'.format(
repository=self._name.repository, suffix=suffix)
if suffix in self._response:
return self._response[suffix]
_, content = self._transport.Request(
'{scheme}://{registry}/v2/{suffix}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
suffix=suffix),
accepted_codes=[six.moves.http_client.OK])
if cache:
self._response[suffix] = content
return content
def _tags(self):
# See //cloud/containers/registry/proto/v2/tags.proto
# for the full response structure.
return json.loads(self._content('tags/list').decode('utf8'))
def tags(self):
return self._tags().get('tags', [])
def digest(self):
"""The digest of the manifest."""
if isinstance(self._name, docker_name.Digest):
return self._name.digest
return super().digest()
def manifests(self):
payload = self._tags()
if 'manifest' not in payload:
# Only GCR supports this schema.
return {}
return payload['manifest']
def children(self):
payload = self._tags()
if 'child' not in payload:
# Only GCR supports this schema.
return []
return payload['child']
def exists(self):
try:
self.manifest(validate=False)
return True
except docker_http.V2DiagnosticException as err:
if err.status == six.moves.http_client.NOT_FOUND:
return False
raise
def manifest(self, validate=True):
"""Override."""
# GET server1/v2/<name>/manifests/<tag_or_digest>
if isinstance(self._name, docker_name.Tag):
return self._content('manifests/' + self._name.tag).decode('utf8')
else:
assert isinstance(self._name, docker_name.Digest)
c = self._content('manifests/' + self._name.digest).decode('utf8')
# v2 removes signatures to compute the manifest digest, this is hard.
computed = docker_digest.SignedManifestToSHA256(c)
if validate and computed != self._name.digest:
raise DigestMismatchedError(
'The returned manifest\'s digest did not match requested digest, '
'%s vs. %s' % (self._name.digest, computed))
return c
def blob_size(self, digest):
"""The byte size of the raw blob."""
suffix = 'blobs/' + digest
if isinstance(self._name, docker_name.Repository):
suffix = '{repository}/{suffix}'.format(
repository=self._name.repository, suffix=suffix)
resp, unused_content = self._transport.Request(
'{scheme}://{registry}/v2/{suffix}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
suffix=suffix),
method='HEAD',
accepted_codes=[six.moves.http_client.OK])
return int(resp['content-length'])
# Large, do not memoize.
def blob(self, digest):
"""Override."""
# GET server1/v2/<name>/blobs/<digest>
c = self._content('blobs/' + digest, cache=False)
computed = docker_digest.SHA256(c)
if digest != computed:
raise DigestMismatchedError(
'The returned content\'s digest did not match its content-address, '
'%s vs. %s' % (digest, computed if c else '(content was empty)'))
return c
def catalog(self, page_size = 100):
# TODO(user): Handle docker_name.Repository for /v2/<name>/_catalog
if isinstance(self._name, docker_name.Repository):
raise ValueError('Expected docker_name.Registry for "name"')
url = '{scheme}://{registry}/v2/_catalog?n={page_size}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
page_size=page_size)
for _, content in self._transport.PaginatedRequest(
url, accepted_codes=[six.moves.http_client.OK]):
wrapper_object = json.loads(content)
if 'repositories' not in wrapper_object:
raise docker_http.BadStateException(
'Malformed JSON response: %s' % content)
for repo in wrapper_object['repositories']:
# TODO(user): This should return docker_name.Repository instead.
yield repo
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
# Create a v2 transport to use for making authenticated requests.
self._transport = docker_http.Transport(
self._name, self._creds, self._original_transport, docker_http.PULL)
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def __str__(self):
return '<docker_image.FromRegistry name: {}>'.format(str(self._name))
def _in_whiteout_dir(fs, name):
while name:
dirname = os.path.dirname(name)
if name == dirname:
break
if fs.get(dirname):
return True
name = dirname
return False
_WHITEOUT_PREFIX = '.wh.'
def extract(image, tar):
"""Extract the final filesystem from the image into tar.
Args:
image: a docker image whose final filesystem to construct.
tar: the open tarfile into which we are writing the final filesystem.
"""
# Maps all of the files we have already added (and should never add again)
# to whether they are a tombstone or not.
fs = {}
# Walk the layers, topmost first and add files. If we've seen them in a
# higher layer then we skip them.
for layer in image.fs_layers():
buf = io.BytesIO(image.blob(layer))
with tarfile.open(mode='r:gz', fileobj=buf) as layer_tar:
for member in layer_tar.getmembers():
# If we see a whiteout file, then don't add anything to the tarball
# but ensure that any lower layers don't add a file with the whited
# out name.
basename = os.path.basename(member.name)
dirname = os.path.dirname(member.name)
tombstone = basename.startswith(_WHITEOUT_PREFIX)
if tombstone:
basename = basename[len(_WHITEOUT_PREFIX):]
# Before adding a file, check to see whether it (or its whiteout) have
# been seen before.
name = os.path.normpath(os.path.join('.', dirname, basename))
if name in fs:
continue
# Check for a whited out parent directory
if _in_whiteout_dir(fs, name):
continue
# Mark this file as handled by adding its name.
# A non-directory implicitly tombstones any entries with
# a matching (or child) name.
fs[name] = tombstone or not member.isdir()
if not tombstone:
if member.isfile():
tar.addfile(member, fileobj=layer_tar.extractfile(member.name))
else:
tar.addfile(member, fileobj=None)

View File

@@ -0,0 +1,335 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manages pushes to and deletes from a v2 docker registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import concurrent.futures
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2 import docker_http
from containerregistry.client.v2 import docker_image
import httplib2
import six.moves.http_client
import six.moves.urllib.parse
def _tag_or_digest(name):
if isinstance(name, docker_name.Tag):
return name.tag
else:
assert isinstance(name, docker_name.Digest)
return name.digest
class Push(object):
"""Push encapsulates a Registry v2 Docker push session."""
def __init__(self,
name,
creds,
transport,
mount = None,
threads = 1):
"""Constructor.
If multiple threads are used, the caller *must* ensure that the provided
transport is thread-safe, as well as the image that is being uploaded.
It is notable that tarfile and httplib2.Http in Python are NOT threadsafe.
Args:
name: the fully-qualified name of the tag to push
creds: provider for authorizing requests
transport: the http transport to use for sending requests
mount: list of repos from which to mount blobs.
threads: the number of threads to use for uploads.
Raises:
ValueError: an incorrectly typed argument was supplied.
"""
self._name = name
self._transport = docker_http.Transport(name, creds, transport,
docker_http.PUSH)
self._mount = mount
self._threads = threads
def name(self):
return self._name
def _scheme_and_host(self):
return '{scheme}://{registry}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry)
def _base_url(self):
return self._scheme_and_host() + '/v2/{repository}'.format(
repository=self._name.repository)
def _get_absolute_url(self, location):
# If 'location' is an absolute URL (includes host), this will be a no-op.
return six.moves.urllib.parse.urljoin(
base=self._scheme_and_host(), url=location)
def blob_exists(self, digest):
"""Check the remote for the given layer."""
# HEAD the blob, and check for a 200
resp, unused_content = self._transport.Request(
'{base_url}/blobs/{digest}'.format(
base_url=self._base_url(), digest=digest),
method='HEAD',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def manifest_exists(self, image):
"""Check the remote for the given manifest by digest."""
# GET the manifest by digest, and check for 200
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{digest}'.format(
base_url=self._base_url(), digest=image.digest()),
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _monolithic_upload(self, image,
digest):
self._transport.Request(
'{base_url}/blobs/uploads/?digest={digest}'.format(
base_url=self._base_url(), digest=digest),
method='POST',
body=image.blob(digest),
accepted_codes=[six.moves.http_client.CREATED])
def _add_digest(self, url, digest):
scheme, netloc, path, query_string, fragment = (
six.moves.urllib.parse.urlsplit(url))
qs = six.moves.urllib.parse.parse_qs(query_string)
qs['digest'] = [digest]
query_string = six.moves.urllib.parse.urlencode(qs, doseq=True)
return six.moves.urllib.parse.urlunsplit((scheme, netloc, path, # pytype: disable=bad-return-type
query_string, fragment))
def _put_upload(self, image, digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._add_digest(location, digest)
self._transport.Request(
location,
method='PUT',
body=image.blob(digest),
accepted_codes=[six.moves.http_client.CREATED])
# pylint: disable=missing-docstring
def patch_upload(self, source,
digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._get_absolute_url(location)
blob = source
if isinstance(source, docker_image.DockerImage):
blob = source.blob(digest)
resp, unused_content = self._transport.Request(
location,
method='PATCH',
body=blob,
content_type='application/octet-stream',
accepted_codes=[
six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED,
six.moves.http_client.CREATED
])
location = self._add_digest(resp['location'], digest)
location = self._get_absolute_url(location)
self._transport.Request(
location,
method='PUT',
body=None,
accepted_codes=[six.moves.http_client.CREATED])
def _put_blob(self, image, digest):
"""Upload the aufs .tgz for a single layer."""
# We have a few choices for unchunked uploading:
# POST to /v2/<name>/blobs/uploads/?digest=<digest>
# Fastest, but not supported by many registries.
# self._monolithic_upload(image, digest)
#
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PUT /v2/<name>/blobs/uploads/<uuid> (full body)
# Next fastest, but there is a mysterious bad interaction
# with Bintray. This pattern also hasn't been used in
# clients since 1.8, when they switched to the 3-stage
# method below.
# self._put_upload(image, digest)
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PATCH /v2/<name>/blobs/uploads/<uuid> (full body)
# PUT /v2/<name>/blobs/uploads/<uuid> (no body)
#
# * We attempt to perform a cross-repo mount if any repositories are
# specified in the "mount" parameter. This does a fast copy from a
# repository that is known to contain this blob and skips the upload.
self.patch_upload(image, digest)
def _remote_tag_digest(self):
"""Check the remote for the given manifest by digest."""
# GET the tag we're pushing
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{tag}'.format(
base_url=self._base_url(),
tag=self._name.tag), # pytype: disable=attribute-error
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
if resp.status == six.moves.http_client.NOT_FOUND: # pytype: disable=attribute-error
return None
return resp.get('docker-content-digest')
def put_manifest(self, image):
"""Upload the manifest for this image."""
self._transport.Request(
'{base_url}/manifests/{tag_or_digest}'.format(
base_url=self._base_url(),
tag_or_digest=_tag_or_digest(self._name)),
method='PUT',
body=image.manifest().encode('utf8'),
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.CREATED,
six.moves.http_client.ACCEPTED
])
def _start_upload(self,
digest,
mount = None
):
"""POST to begin the upload process with optional cross-repo mount param."""
if not mount:
# Do a normal POST to initiate an upload if mount is missing.
url = '{base_url}/blobs/uploads/'.format(base_url=self._base_url())
accepted_codes = [six.moves.http_client.ACCEPTED]
else:
# If we have a mount parameter, try to mount the blob from another repo.
mount_from = '&'.join([
'from=' + six.moves.urllib.parse.quote(repo.repository, '')
for repo in self._mount
])
url = '{base_url}/blobs/uploads/?mount={digest}&{mount_from}'.format(
base_url=self._base_url(), digest=digest, mount_from=mount_from)
accepted_codes = [
six.moves.http_client.CREATED, six.moves.http_client.ACCEPTED
]
resp, unused_content = self._transport.Request(
url, method='POST', body=None, accepted_codes=accepted_codes)
# pytype: disable=attribute-error,bad-return-type
return resp.status == six.moves.http_client.CREATED, resp.get('location')
# pytype: enable=attribute-error,bad-return-type
def _upload_one(self, image, digest):
"""Upload a single layer, after checking whether it exists already."""
if self.blob_exists(digest):
logging.info('Layer %s exists, skipping', digest)
return
self._put_blob(image, digest)
logging.info('Layer %s pushed.', digest)
def upload(self, image):
"""Upload the layers of the given image.
Args:
image: the image to upload.
"""
# If the manifest (by digest) exists, then avoid N layer existence
# checks (they must exist).
if self.manifest_exists(image):
if isinstance(self._name, docker_name.Tag):
if self._remote_tag_digest() == image.digest():
logging.info('Tag points to the right manifest, skipping push.')
return
logging.info('Manifest exists, skipping blob uploads and pushing tag.')
else:
logging.info('Manifest exists, skipping upload.')
elif self._threads == 1:
for digest in image.blob_set():
self._upload_one(image, digest)
else:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads) as executor:
future_to_params = {
executor.submit(self._upload_one, image, digest): (image, digest)
for digest in image.blob_set()
}
for future in concurrent.futures.as_completed(future_to_params):
future.result()
# This should complete the upload by uploading the manifest.
self.put_manifest(image)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, exception_type, unused_value, unused_traceback):
if exception_type:
logging.error('Error during upload of: %s', self._name)
return
logging.info('Finished upload of: %s', self._name)
# pylint: disable=invalid-name
def Delete(name,
creds, transport):
"""Delete a tag or digest.
Args:
name: a tag or digest to be deleted.
creds: the credentials to use for deletion.
transport: the transport to use to contact the registry.
"""
docker_transport = docker_http.Transport(name, creds, transport,
docker_http.DELETE)
_, unused_content = docker_transport.Request(
'{scheme}://{registry}/v2/{repository}/manifests/{entity}'.format(
scheme=docker_http.Scheme(name.registry),
registry=name.registry,
repository=name.repository,
entity=_tag_or_digest(name)),
method='DELETE',
accepted_codes=[six.moves.http_client.OK, six.moves.http_client.ACCEPTED])

View File

@@ -0,0 +1,141 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package holds a handful of utilities for manipulating manifests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import json
import os
import subprocess
from containerregistry.client import docker_name
class BadManifestException(Exception):
"""Exception type raised when a malformed manifest is encountered."""
def _JoseBase64UrlDecode(message):
"""Perform a JOSE-style base64 decoding of the supplied message.
This is based on the docker/libtrust version of the similarly named
function found here:
https://github.com/docker/libtrust/blob/master/util.go
Args:
message: a JOSE-style base64 url-encoded message.
Raises:
BadManifestException: a malformed message was supplied.
Returns:
The decoded message.
"""
bytes_msg = message.encode('utf8')
l = len(bytes_msg)
if l % 4 == 0:
pass
elif l % 4 == 2:
bytes_msg += b'=='
elif l % 4 == 3:
bytes_msg += b'='
else:
raise BadManifestException('Malformed JOSE Base64 encoding.')
return base64.urlsafe_b64decode(bytes_msg).decode('utf8')
def _ExtractProtectedRegion(signature):
"""Extract the length and encoded suffix denoting the protected region."""
protected = json.loads(_JoseBase64UrlDecode(signature['protected']))
return (protected['formatLength'], protected['formatTail'])
def _ExtractCommonProtectedRegion(
signatures):
"""Verify that the signatures agree on the protected region and return one."""
p = _ExtractProtectedRegion(signatures[0])
for sig in signatures[1:]:
if p != _ExtractProtectedRegion(sig):
raise BadManifestException('Signatures disagree on protected region')
return p
def DetachSignatures(manifest):
"""Detach the signatures from the signed manifest and return the two halves.
Args:
manifest: a signed JSON manifest.
Raises:
BadManifestException: the provided manifest was improperly signed.
Returns:
a pair consisting of the manifest with the signature removed and a list of
the removed signatures.
"""
# First, decode the manifest to extract the list of signatures.
json_manifest = json.loads(manifest)
# Next, extract the signatures that have signed a portion of the manifest.
signatures = json_manifest['signatures']
# Do some basic validation of the signature input
if len(signatures) < 1:
raise BadManifestException('Expected a signed manifest.')
for sig in signatures:
if 'protected' not in sig:
raise BadManifestException('Signature is missing "protected" key')
# Establish the protected region and extract it from our original string.
(format_length, format_tail) = _ExtractCommonProtectedRegion(signatures)
suffix = _JoseBase64UrlDecode(format_tail)
unsigned_manifest = manifest[0:format_length] + suffix
return (unsigned_manifest, signatures)
def Sign(unsigned_manifest):
# TODO(user): Implement v2 signing in Python.
return unsigned_manifest
def _AttachSignatures(manifest,
signatures):
"""Attach the provided signatures to the provided naked manifest."""
(format_length, format_tail) = _ExtractCommonProtectedRegion(signatures)
prefix = manifest[0:format_length]
suffix = _JoseBase64UrlDecode(format_tail)
return '{prefix},"signatures":{signatures}{suffix}'.format(
prefix=prefix,
signatures=json.dumps(signatures, sort_keys=True),
suffix=suffix)
def Rename(manifest, name):
"""Rename this signed manifest to the provided name, and resign it."""
unsigned_manifest, unused_signatures = DetachSignatures(manifest)
json_manifest = json.loads(unsigned_manifest)
# Rewrite the name fields.
json_manifest['name'] = name.repository
json_manifest['tag'] = name.tag
# Reserialize the json to a string.
updated_unsigned_manifest = json.dumps(
json_manifest, sort_keys=True, indent=2)
# Sign the updated manifest
return Sign(updated_unsigned_manifest)

View File

@@ -0,0 +1,188 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides compatibility interfaces for v1/v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from containerregistry.client.v1 import docker_image as v1_image
from containerregistry.client.v2 import docker_digest
from containerregistry.client.v2 import docker_image as v2_image
from containerregistry.client.v2 import util
from six.moves import zip # pylint: disable=redefined-builtin
class V1FromV2(v1_image.DockerImage):
"""This compatibility interface serves the v1 interface from a v2 image."""
def __init__(self, v2_img):
"""Constructor.
Args:
v2_img: a v2 DockerImage on which __enter__ has already been called.
"""
self._v2_image = v2_img
self._ComputeLayerMapping()
def _ComputeLayerMapping(self):
"""Parse the v2 manifest and extract indices to efficiently answer v1 apis.
This reads the v2 manifest, corrolating the v1 compatibility and v2 fsLayer
arrays and creating three indices for efficiently answering v1 queries:
self._v1_to_v2: dict, maps from v1 layer id to v2 digest
self._v1_json: dict, maps from v1 layer id to v1 json
self._v1_ancestry: list, the order of the v1 layers
"""
raw_manifest = self._v2_image.manifest()
manifest = json.loads(raw_manifest)
v2_ancestry = [fs_layer['blobSum'] for fs_layer in manifest['fsLayers']]
v1_jsons = [v1_layer['v1Compatibility'] for v1_layer in manifest['history']]
def ExtractId(v1_json):
v1_metadata = json.loads(v1_json)
return v1_metadata['id']
# Iterate once using the maps to deduplicate.
self._v1_to_v2 = {}
self._v1_json = {}
self._v1_ancestry = []
for (v1_json, v2_digest) in zip(v1_jsons, v2_ancestry):
v1_id = ExtractId(v1_json)
if v1_id in self._v1_to_v2:
assert self._v1_to_v2[v1_id] == v2_digest
assert self._v1_json[v1_id] == v1_json
continue
self._v1_to_v2[v1_id] = v2_digest
self._v1_json[v1_id] = v1_json
self._v1_ancestry.append(v1_id)
# Already effectively memoized.
def top(self):
"""Override."""
return self._v1_ancestry[0]
def repositories(self):
"""Override."""
# TODO(user): This is only used in v1-specific test code.
pass
def parent(self, layer_id):
"""Override."""
ancestry = self.ancestry(layer_id)
if len(ancestry) == 1:
return None
return ancestry[1]
# Already effectively memoized.
def json(self, layer_id):
"""Override."""
return self._v1_json.get(layer_id, '{}')
# Large, don't memoize
def uncompressed_layer(self, layer_id):
"""Override."""
v2_digest = self._v1_to_v2.get(layer_id)
return self._v2_image.uncompressed_blob(v2_digest)
# Large, don't memoize
def layer(self, layer_id):
"""Override."""
v2_digest = self._v1_to_v2.get(layer_id)
return self._v2_image.blob(v2_digest)
def diff_id(self, digest): # pytype: disable=signature-mismatch # overriding-return-type-checks
"""Override."""
return self._v2_image.diff_id(self._v1_to_v2.get(digest))
def ancestry(self, layer_id):
"""Override."""
index = self._v1_ancestry.index(layer_id)
return self._v1_ancestry[index:]
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
class V2FromV1(v2_image.DockerImage):
"""This compatibility interface serves the v2 interface from a v1 image."""
def __init__(self, v1_img):
"""Constructor.
Args:
v1_img: a v1 DockerImage on which __enter__ has already been called.
Raises:
ValueError: an incorrectly typed argument was supplied.
"""
self._v1_image = v1_img
# Construct a manifest from the v1 image, including establishing mappings
# from v2 layer digests to v1 layer ids.
self._ProcessImage()
def _ProcessImage(self):
fs_layers = []
self._layer_map = {}
for layer_id in self._v1_image.ancestry(self._v1_image.top()):
blob = self._v1_image.layer(layer_id)
digest = docker_digest.SHA256(blob)
fs_layers += [{'blobSum': digest}]
self._layer_map[digest] = layer_id
self._manifest = util.Sign(
json.dumps(
{
'schemaVersion':
1,
'name':
'unused',
'tag':
'unused',
'architecture':
'amd64',
'fsLayers':
fs_layers,
'history': [{
'v1Compatibility': self._v1_image.json(layer_id)
} for layer_id in self._v1_image.ancestry(self._v1_image.top())
],
},
sort_keys=True))
def manifest(self):
"""Override."""
return self._manifest
def uncompressed_blob(self, digest):
"""Override."""
return self._v1_image.uncompressed_layer(self._layer_map[digest])
def blob(self, digest):
"""Override."""
return self._v1_image.layer(self._layer_map[digest])
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass