feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add, replace or delete the cached resource URIs from a single collection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from googlecloudsdk.core.cache import resource_cache
import six
class _TableRows(object):
"""An _UpdateCacheOp._GetTablesFromUris dict entry."""
def __init__(self, table):
self.table = table
self.rows = []
@six.add_metaclass(abc.ABCMeta)
class _UpdateCacheOp(object):
"""The cache update operation base class."""
def __init__(self, completer):
self._completer_class = completer
def Update(self, uris):
"""Applies UpdateRows() to tables that contain the resources uris."""
try:
with resource_cache.ResourceCache() as cache:
completer = self._completer_class(cache=cache)
tables = {}
for uri in uris:
row = completer.StringToRow(uri)
table = completer.GetTableForRow(row)
entry = tables.get(table.name)
if not entry:
entry = _TableRows(table)
tables[table.name] = entry
entry.rows.append(row)
for table, rows in six.iteritems(tables):
self.UpdateRows(table, rows)
except Exception: # pylint: disable=broad-except
pass
@abc.abstractmethod
def UpdateRows(self, table, rows):
"""Updates table with rows."""
pass
class AddToCacheOp(_UpdateCacheOp):
"""An AddToCache operation."""
def UpdateRows(self, table, rows):
"""Adds rows to table."""
table.AddRows(rows)
class DeleteFromCacheOp(_UpdateCacheOp):
"""A DeleteFromCache operation."""
def UpdateRows(self, table, rows):
"""Deletes rows from table."""
table.DeleteRows(rows)
class ReplaceCacheOp(_UpdateCacheOp):
"""A ReplaceCache operation."""
def UpdateRows(self, table, rows):
"""Replaces table with rows."""
table.DeleteRows()
table.AddRows(rows)
class NoCacheUpdater(resource_cache.BaseUpdater):
"""No cache updater."""

View File

@@ -0,0 +1,127 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Cloud SDK completion cache.
A completion cache is a persistent cache that stores the current list of names
for resources visible to the caller. The cache generates lists of resources
that match prefixes and/or patterns, suitable for command line completers. The
name representation is resource specific. See core.resource.resource_style for
details.
Refer to the resource_cache module for a detailed description of resource
parsing and representation.
+---------------------------+
| completion cache |
| +-----------------------+ |
| | completer | |
| +-----------------------+ |
| ... |
+---------------------------+
A completion cache is implemented as an extended ResourceCache object that
contains Completer objects. A Completer object:
* has a Complete() method that returns resource strings matching a pattern
* has methods to convert between strings and parameter tuples
* has an underlying ResourceCache Collection object that holds parameter tuples
* derives from resource_cache.Updater to update the collection parameter tuples
This module is resource agnostic. All resource specific information is
encapsulated in resource specific Completer objects.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from googlecloudsdk.core.cache import resource_cache
import six
@six.add_metaclass(abc.ABCMeta)
class Completer(resource_cache.Updater):
"""A completion cache resource string completer.
Along with the Complete() method, a completer has two main functions, each
handled by a mixin:
(1) Convert between resource string and parameter tuple representations.
(2) Retrieve the current list of resources for the collection. See
resource_cache.Updater for details.
"""
@abc.abstractmethod
def StringToRow(self, string):
"""Returns the row representation of string.
May fill in some column values
Args:
string: The resource string representation.
Returns:
The row representation of string.
"""
pass
def RowToTemplate(self, row):
"""Returns the row template of row for the Resource.Complete method.
By default all parameters are treated as prefixes.
Args:
row: The resource parameter tuple.
Returns:
The row template of row for the Resource.Complete method.
"""
row_template = list(row)
if len(row) < self.columns:
row_template += [''] * (self.columns - len(row))
return [c if '*' in c else c + '*' for c in row_template]
@abc.abstractmethod
def RowToString(self, row, parameter_info=None):
"""Returns the string representation of row.
Args:
row: The resource parameter tuple.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The string representation of row.
"""
pass
def Complete(self, prefix, parameter_info):
"""Returns the list of strings matching prefix.
Args:
prefix: The resource prefix string to match.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The list of strings matching prefix.
"""
row = self.StringToRow(prefix)
row_template = self.RowToTemplate(row)
rows = self.Select(row_template, parameter_info)
return [self.RowToString(row, parameter_info) for row in rows]

View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions for the Cloud SDK persistent cache module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
class Error(Exception):
"""Base for all persistent cache exceptions."""
class CacheVersionMismatch(Error):
"""Cache version mismatch."""
def __init__(self, message, actual, requested):
super(CacheVersionMismatch, self).__init__(message)
self.actual = actual
self.requested = requested
class CacheInvalid(Error):
"""Cach object is invalid."""
class CacheNameInvalid(Error):
"""Name is not a valid cache name."""
class CacheNotFound(Error):
"""Cache not found."""
class CacheTableDeleted(Error):
"""Cache table deleted."""
class CacheTableExpired(Error):
"""Cache table expired."""
class CacheTableRestricted(Error):
"""Cache table is restricted."""
class CacheTableNameInvalid(Error):
"""Cache table invalid table name."""
class CacheTableColumnsInvalid(Error):
"""Cache table columns invalid."""
class CacheTableKeysInvalid(Error):
"""Cache table keys invalid."""
class CacheTableNotFound(Error):
"""Cache table not found."""
class CacheTableRowSizeInvalid(Error):
"""Cache table row has incorrect size."""

View File

@@ -0,0 +1,261 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A persistent cache implementation using files.
See the persistent_cache module for a detailed description.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import fnmatch
import json
import os
from googlecloudsdk.core.cache import exceptions
from googlecloudsdk.core.cache import metadata_table
from googlecloudsdk.core.cache import persistent_cache_base
from googlecloudsdk.core.util import files
import six
from six.moves import range # pylint: disable=redefined-builtin
class _Table(persistent_cache_base.Table):
"""A persistent cache table.
Attributes:
name: The table name.
deleted: Table was deleted if True.
restricted: Table is restricted if True.
modified: Table modify timestamp.
timeout: Tables older than timeout are invalid.
_cache: The parent cache object.
_rows: The list of rows in the table.
"""
def __init__(self, cache, name, columns=1, keys=1, timeout=0, modified=0,
restricted=False):
self._rows = None
super(_Table, self).__init__(cache, name, columns=columns, keys=keys,
timeout=timeout, modified=modified,
restricted=restricted)
if restricted:
self._cache._restricted.add(name) # pylint: disable=protected-access
self.deleted = False
try:
contents = files.ReadFileContents(
os.path.join(self._cache.name, self.EncodeName(name)))
except files.MissingFileError:
contents = None
self.changed = True
except files.Error:
raise
if contents:
self._rows = [tuple(r) for r in json.loads(contents)]
else:
self._rows = []
# pylint: disable=protected-access
if self._cache._metadata:
self._cache._tables[name] = self
def Delete(self):
"""Deletes the table."""
self.Invalidate()
self.DeleteRows()
# pylint: disable=protected-access
self._cache._metadata.DeleteRows([(self.name,)])
self.deleted = True
def _Commit(self):
"""Commits changed/deleted table data to the table file."""
if self.changed:
self.changed = False
path = os.path.join(self._cache.name, self.EncodeName(self.name))
# pylint: disable=protected-access
if self.deleted:
self.deleted = False
self._cache._metadata.DeleteRows([(self.name,)])
del self._cache._tables[self.name]
try:
os.remove(path)
except OSError as e:
# The deleted table might have never been committed.
if e.errno != errno.ENOENT:
raise
else:
self._cache._metadata.AddRows(
[metadata_table.Metadata.Row(
name=self.name,
columns=self.columns,
keys=self.keys,
timeout=self.timeout,
modified=self.modified,
restricted=self.restricted,
version=self._cache.version)])
files.WriteFileContents(path, json.dumps(self._rows))
def _RowEqual(self, a, b):
"""Returns True if rows a and b have the same key."""
return a[:self.keys] == b[:self.keys]
def _RowMatch(self, row_template, row):
"""Returns True if row_template matches row."""
if row_template:
for i in range(len(row_template)):
if row_template[i] is not None:
if (isinstance(row_template[i], six.string_types) and
isinstance(row[i], six.string_types)):
if not fnmatch.fnmatch(row[i], row_template[i]):
return False
elif row_template[i] != row[i]:
return False
return True
def _AnyRowMatch(self, row_templates, row):
"""Returns True if any template in row_templates matches row."""
for row_template in row_templates:
if self._RowMatch(row_template, row):
return True
return False
def AddRows(self, rows):
"""Adds each row in rows to the table."""
self._CheckRows(rows)
self.changed = True
rows = sorted(self._rows + list(rows), key=lambda x: x[:self.keys])
self._rows = []
i = 0
while i < len(rows):
# Skip over dup keys, keep the last, which is a new entry thanks to the
# stable sort above.
while i < len(rows) - 1 and self._RowEqual(rows[i], rows[i + 1]):
i += 1
self._rows.append(rows[i])
i += 1
def DeleteRows(self, row_templates=None):
"""Deletes each row in the table matching any of the row_templates."""
self.changed = True
if row_templates:
self._CheckRowTemplates(row_templates)
keep = []
for row in self._rows:
if not self._AnyRowMatch(row_templates, row):
keep.append(row)
self._rows = keep
else:
self._rows = []
def Select(self, row_template=None, ignore_expiration=False):
"""Returns the list of rows that match row_template, None for all."""
if row_template is not None:
self._CheckRowTemplates([row_template])
if not ignore_expiration and not self.restricted and not self.modified:
raise exceptions.CacheTableExpired(
'[{}] cache table [{}] has expired.'.format(
self._cache.name, self.name))
matched = []
for row in self._rows:
if row and self._RowMatch(row_template, row):
matched.append(row)
return matched
class Cache(metadata_table.CacheUsingMetadataTable):
"""A persistent cache object.
Attributes:
name: The db path name. Created/removed by this object. May be a file or
directory. In this implementation its a file.
timeout: The default table timeout.
version: A caller defined version string that must match the version string
stored when the persistent object was created.
_lock: The cache lock object. None if no files have been committed yet.
_lock_path: The cache lock meta file.
_metadata: The metadata restricted _Table.
_persistent: True if the persistent object has been committed at least once.
_restricted: The set of restricted table names.
_start: The cache instance start time.
_tables: The map of open table objects.
"""
def __init__(self, name, create=True, timeout=None, version=None):
super(Cache, self).__init__(
_Table, name, create=create, timeout=timeout, version=version)
lock_name = '__lock__'
self._restricted = set([lock_name])
self._tables = {}
self._metadata = None
self._start = persistent_cache_base.Now()
self._lock_path = os.path.join(self.name, lock_name)
self._lock = None
self._persistent = False
if not os.path.exists(self.name):
if not create:
raise exceptions.CacheNotFound(
'Persistent cache [{}] not found.'.format(self.name))
elif not os.path.exists(self._lock_path):
raise exceptions.CacheInvalid(
'[{}] is not a persistent cache.'.format(self.name))
else:
# self.name exists and is a directory, and self._lock_path exists.
self._persistent = True
self._lock = files.FileLock(self._lock_path, timeout_secs=2)
self._lock.Lock()
try:
self.InitializeMetadata()
except exceptions.Error:
# Make sure we clean up any dangling resources.
self.Close(commit=False)
raise
def Delete(self):
"""Permanently deletes the persistent cache."""
self.Close(commit=False)
if self._persistent:
files.RmTree(self.name)
self._persistent = False
def Commit(self):
"""Commits all operations up to this point."""
if not self._lock:
os.mkdir(self.name, 0o700)
self._persistent = True
self._lock = files.FileLock(self._lock_path, timeout_secs=2)
self._lock.Lock()
# Update the changed tables.
for table in list([x for x in self._tables.values() if x.changed]):
table._Commit() # pylint: disable=protected-access
if self._metadata.changed:
self._metadata._Commit() # pylint: disable=protected-access
def Close(self, commit=True):
"""Closes the cache, optionally committing any changes.
Args:
commit: Commits any changes before closing if True.
"""
if commit:
self.Commit()
if self._lock:
self._lock.Unlock()
self._lock = None
self._metadata = None
self._tables = None

View File

@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for caching the result of function calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
class FakeLruCache:
"""Doesn't actually cache but supports LRU interface in Python 2."""
def __init__(self, function):
self._function = function
def cache_clear(self):
"""Exposes this function of actual LRU to avoid missing attribute errors."""
pass
def __call__(self, *args, **kwargs):
return self._function(*args, **kwargs)
def lru(maxsize=128):
"""Returns cached result if function was run with same args before.
Wraps functools.lru_cache, so it's not referenced at import in Python 2 and
unsupported Python 3 distributions.
Args:
maxsize (int|None): From Python functools docs: "...saves up to the maxsize
most recent calls... If maxsize is set to None, the LRU feature is
disabled and the cache can grow without bound."
Returns:
Wrapped functools.lru_cache.
"""
def _wrapper(function):
if getattr(functools, 'lru_cache', None):
return functools.lru_cache(maxsize=maxsize)(function)
return FakeLruCache(function)
return _wrapper

View File

@@ -0,0 +1,266 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A persistent cache metadata table implementation layer.
Used by persistent cache implementations that maintain a metadata table to keep
track of cache tables.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from googlecloudsdk.core.cache import exceptions
from googlecloudsdk.core.cache import persistent_cache_base
import six
class Metadata(object):
"""Metadata table row container.
This object encapsulates the persistent metadata table row layout.
Attributes:
name: The table name.
columns: The number of columns in the table. Must be >= 1.
keys: The number of columns, starting from 0, that form the primary
row key. Must be 1 <= keys <= columns. The primary key is used to
differentiate rows in the AddRows and DeleteRows methods.
timeout: A float number of seconds. Tables older than (modified+timeout)
are invalid. 0 means no timeout.
modified: Table modify Now() (time.time()) value. 0 for expired tables.
restricted: True if Table is restricted.
version: A caller defined version string that must match the version string
stored when the persistent object was created. '' for all but the
metadata table itself.
"""
COLUMNS = 7
def __init__(self, row):
"""Constructs a metadata container from a row."""
(self.name, self.columns, self.keys, self.timeout, self.modified,
restricted, self.version) = row
self.restricted = bool(restricted)
@classmethod
def Row(cls, name=None, columns=None, keys=None, timeout=None,
modified=None, restricted=None, version=None):
"""Constructs and returns a metadata table row from the args."""
if restricted is not None:
restricted = int(restricted)
return (name, columns, keys, timeout, modified, restricted, version)
@six.add_metaclass(abc.ABCMeta)
class CacheUsingMetadataTable(persistent_cache_base.Cache):
"""A persistent cache metadata table implementation layer.
Attributes:
_metadata: A table containing a row for each table.
_table_class: The cache Table class.
_restricted: The set of restricted table names.
_tables: The map of open table objects.
"""
def __init__(self, table, name, create=True, timeout=0, version=None):
super(CacheUsingMetadataTable, self).__init__(
name, create=create, timeout=timeout, version=version)
self._metadata = None
self._table_class = table
self._restricted = None
self._tables = None
@abc.abstractmethod
def Delete(self):
pass
@abc.abstractmethod
def Commit(self):
pass
@abc.abstractmethod
def Close(self, commit=True):
pass
def _ImplementationCreateTable(self, name, columns, keys):
"""Implementation layer _CreateTable.
The cache implementation object can override this method to do
implementation specific table initialization.
Args:
name: The table name.
columns: The number of columns in each row.
keys: The number of columns, left to right, that are primary keys. 0 for
all columns.
"""
pass
def _CreateTable(self, name, restricted, columns, keys, timeout):
"""Creates and returns a table object for name.
NOTE: This code is conditioned on self._metadata. If self._metadata is None
then we are initializing/updating the metadata table. The table name is
relaxed, in particular '_' is allowed in the table name. This avoids user
table name conflicts. Finally, self._metadata is set and the metadata
table row is updated to reflect any changes in the default timeout.
Args:
name: The table name.
restricted: Return a restricted table object.
columns: The number of columns in each row.
keys: The number of columns, left to right, that are primary keys. 0 for
all columns.
timeout: The number of seconds after last modification when the table
becomes invalid. 0 for no timeout.
Raises:
CacheTableNameInvalid: If name is invalid.
Returns:
A table object for name.
"""
if columns is None:
columns = 1
if columns < 1:
raise exceptions.CacheTableColumnsInvalid(
'[{}] table [{}] column count [{}] must be >= 1.'.format(
self.name, name, columns))
if keys is None:
keys = columns
if keys < 1 or keys > columns:
raise exceptions.CacheTableKeysInvalid(
'[{}] table [{}] primary key count [{}] must be >= 1 and <= {}.'
.format(self.name, name, keys, columns))
if timeout is None:
timeout = self.timeout
self._ImplementationCreateTable(name, columns, keys)
table = self._table_class(self,
name=name,
columns=columns,
keys=keys,
timeout=timeout,
modified=0,
restricted=restricted)
if self._metadata:
version = None
else:
# Initializing the metadata table -- get its Table object.
self._metadata = table
table.Validate()
rows = table.Select(Metadata.Row(name=name))
row = rows[0] if rows else None
if row:
metadata = Metadata(row)
if self.version is None:
self.version = metadata.version or ''
elif self.version != metadata.version:
raise exceptions.CacheVersionMismatch(
'[{}] cache version [{}] does not match [{}].'.format(
self.name, metadata.version, self.version),
metadata.version, self.version)
if self.timeout is None:
self.timeout = metadata.timeout
table.modified = 0
version = self.version
self._metadata.AddRows([Metadata.Row(
name=table.name,
columns=table.columns,
keys=table.keys,
timeout=table.timeout,
modified=table.modified,
restricted=table.restricted,
version=version)])
return table
def Table(self, name, create=True, restricted=False, columns=None, keys=None,
timeout=None):
"""Returns the Table object for existing table name.
Args:
name: The table name.
create: If True creates the table if it does not exist.
restricted: Return a restricted table object.
columns: The number of columns in each row.
keys: The number of columns, left to right, that are primary keys. 0 for
all columns.
timeout: The number of seconds after last modification when the table
becomes invalid. 0 for no timeout. If None then the default cache
timeout is assumed.
Raises:
CacheTableNameInvalid: name is not a valid table name.
CacheTableNotFound: If the table does not exist.
Returns:
A Table object for name.
"""
if name in self._restricted:
raise exceptions.CacheTableRestricted(
'[{}] cache table [{}] is restricted.'.format(self.name, name))
table = self._tables.get(name, None)
if table:
if not table.deleted:
if columns is not None and columns != table.columns:
raise exceptions.CacheTableColumnsInvalid(
'[{}] cache table [{}] columns [{}] does not match existing {}.'
.format(self.name, name, columns, table.columns))
if keys is not None and keys != table.keys:
raise exceptions.CacheTableKeysInvalid(
'[{}] cache table [{}] keys [{}] does not match existing {}.'
.format(self.name, name, keys, table.keys))
return table
if not create:
raise exceptions.CacheTableNotFound(
'[{}] cache table [{}] not found.'.format(self.name, name))
if self._metadata:
rows = self._metadata.Select(Metadata.Row(name=name))
row = rows[0] if rows else None
if row:
metadata = Metadata(row)
return self._table_class(self,
name=metadata.name,
columns=metadata.columns,
keys=metadata.keys,
timeout=metadata.timeout,
modified=metadata.modified,
restricted=metadata.restricted)
if not create:
raise exceptions.CacheTableNotFound(
'[{}] cache table [{}] not found.'.format(self.name, name))
return self._CreateTable(name, restricted, columns, keys, timeout)
def InitializeMetadata(self):
"""Initializes the metadata table and self._metadata."""
self.Table('__metadata__', restricted=True, columns=Metadata.COLUMNS,
keys=1, timeout=0)
def Select(self, name=None):
"""Returns the list of unrestricted table names matching name.
Args:
name: The table name pattern. None for all unrestricted tables. May
contain the * and ? pattern match characters.
Returns:
The list of unrestricted table names matching name.
"""
rows = self._metadata.Select(Metadata.Row(name=name, restricted=False))
return [Metadata(row).name for row in rows]

View File

@@ -0,0 +1,361 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Cloud SDK persistent cache abstract base classes.
A persistent cache is a long-lived object that contains cache data and metadata.
Cache data is organized into zero or more named tables. Table data is an
unordered list of row tuples of fixed length. Column value types within a row
are fixed and may be one of string (basestring or unicode), floating point, or
integer.
+-----------------------+
| persistent cache |
| +-------------------+ |
| | table | |
| | (key,...,col,...) | |
| | ... | |
| +-------------------+ |
| ... |
+-----------------------+
A persistent cache is implemented as a Cache object that contains Table objects.
Each table has a timeout and last modified time attribute. Read access on a
table that has expired is an error. The rows in a table have a fixed number of
columns specified by the columns attribute. The keys attribute is the count of
columns in a table row, left to right, that forms the primary key. The primary
key is used to differentiate rows. Adding a row that already exists is not an
error. The row is simply replaced by the new data.
A Table object can be restricted and hidden from cache users. These tables
must be instantiated when the Cache object is instantiated, before the first
user access to the cache. This allows a cache implementation layer to have
tables that are hidden from the layers above it.
The table select and delete methods match against a row template. A template may
have fewer columns than the number of columns in the table. Omitted template
columns or columns with value None match all values for that column. '*' and '?'
matching operators are supported for string columns. It is not an error to
select or delete a row that does not exist.
HINTS for IMPLEMENTERS
By default the Cache and Table constructors create the objects if they don't
exist. The create=False kwarg disables this and raises an exception if the
object does not exist. In addition, the Select ignore_expiration=True kwarg
disables expiry check. These can be used by meta commands/functions to view
and debug cache data without modifying the underlying persistent data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import time
from googlecloudsdk.core.cache import exceptions
import six
import six.moves.urllib.parse
def Now():
"""Returns the current time in seconds since the epoch."""
return time.time()
@six.add_metaclass(abc.ABCMeta)
class Table(object):
"""A persistent cache table object.
This object should only be instantiated by a Cache object.
The AddRows and DeleteRows methods operate on lists of rows rather than a
single row. This accomodates sqlite3 (and possibly other implementation
layers) that batch rows ops. Restricting to a single row would rule out
batching.
Attributes:
cache: The parent cache object.
changed: Table data or metadata changed if True.
name: The table name.
modified: Table modify Now() (time.time()) value. 0 for expired tables.
restricted: True if Table is restricted.
timeout: A float number of seconds. Tables older than (modified+timeout)
are invalid. 0 means no timeout.
"""
def __init__(self, cache, name, columns=1, keys=1, timeout=0, modified=0,
restricted=False):
self._cache = cache
self.name = name
self.restricted = restricted
self.modified = modified
self.changed = False
self.timeout = timeout or 0
self.columns = columns
self.keys = keys
# Determine is the table has expired once at initialization time. We expect
# callers to keep cache or table objects open for a few seconds at most.
# Given that it doesn't make sense to do a few operations in that window
# only to have the last one expire.
if timeout and modified and (modified + timeout) < Now():
self.Invalidate()
@property
def is_expired(self):
"""True if the table data has expired.
Expired tables have a self.modified value of 0. Expiry is currently
computed once when the table object is instantiated. This property shields
callers from that implementation detail.
Returns:
True if the table data has expired.
"""
return not self.modified
@classmethod
def EncodeName(cls, name):
r"""Returns name encoded for file system path compatibility.
A table name may be a file name. alnum and '_.-' are not encoded.
Args:
name: The cache name string to encode.
Raises:
CacheTableNameInvalid: For invalid table names.
Returns:
Name encoded for portability.
"""
if not name:
raise exceptions.CacheTableNameInvalid(
'Cache table name [{}] is invalid.'.format(name))
return six.moves.urllib.parse.quote(name, '!@+,')
def _CheckRows(self, rows):
"""Raise an exception if the size of any row in rows is invalid.
Each row size must be equal to the number of columns in the table.
Args:
rows: The list of rows to check.
Raises:
CacheTableRowSizeInvalid: If any row has an invalid size.
"""
for row in rows:
if len(row) != self.columns:
raise exceptions.CacheTableRowSizeInvalid(
'Cache table [{}] row size [{}] is invalid. Must be {}.'.format(
self.name, len(row), self.columns))
def _CheckRowTemplates(self, rows):
"""Raise an exception if the size of any row template in rows is invalid.
Each row template must have at least 1 column and no more than the number
of columns in the table.
Args:
rows: The list of rows to check.
Raises:
CacheTableRowSizeInvalid: If any row template size is invalid.
"""
for row in rows:
if not 1 <= len(row) <= self.columns:
if self.columns == 1:
limits = '1'
else:
limits = '>= 1 and <= {}'.format(self.columns)
raise exceptions.CacheTableRowSizeInvalid(
'Cache table [{}] row size [{}] is invalid. Must be {}.'.format(
self.name, len(row), limits))
def Invalidate(self):
"""Invalidates the table by marking it expired."""
self.changed = True
self.modified = 0
def Validate(self, timeout=None):
"""Validates the table and resets the TTL."""
if timeout is not None:
self.timeout = timeout or 0
self.modified = Now()
self.changed = True
@abc.abstractmethod
def Delete(self):
"""Deletes the table."""
pass
@abc.abstractmethod
def AddRows(self, rows):
"""Adds each row in rows to the table. Existing rows are overwritten.
The number of columns in each row must be equal to the number of columns
in the table.
Args:
rows: A list of rows to add. Existing rows are overwritten.
"""
pass
@abc.abstractmethod
def DeleteRows(self, row_templates=None):
"""Deletes each row in the table matching any of the row_templates.
Args:
row_templates: A list of row templates. See Select() below for a detailed
description of templates. None deletes all rows and is allowed for
expired tables.
"""
pass
@abc.abstractmethod
def Select(self, row_template=None, ignore_expiration=False):
"""Returns the list of rows that match row_template.
Args:
row_template: A row template. The number of columns in the template must
not exceed the number of columns in the table. An omitted column or
column with value None matches all values for the column. A None value
for row_template matches all rows. Each string column may contain these
wildcard characters:
* - match zero or more characters
? - match any character
ignore_expiration: Disable table expiration checks if True.
Raises:
CacheTableExpired: If the table has expired.
Returns:
The list of rows that match row_template.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class Cache(object):
r"""A persistent cache object.
This class is also a context manager. Changes are automaticaly committed if
the context exits with no exceptions. For example:
with CacheImplementation('my-cache-name') as c:
...
Attributes:
name: The persistent cache name. Created/removed by this object. Internally
encoded by Cache.EncodeName().
timeout: The default table timeout in seconds. 0 for no timeout.
version: A caller defined version string that must match the version string
stored when the persistent object was created.
"""
def __init__(self, name, create=True, timeout=None, version=None):
self.name = Cache.EncodeName(name)
del create # Unused in __init__. Subclass constructors may use this.
self.timeout = timeout
self.version = version
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
self.Close(commit=typ is None)
@classmethod
def EncodeName(cls, name):
r"""Returns name encoded for filesystem portability.
A cache name may be a file path. The part after the rightmost of
('/', '\\') is encoded with Table.EncodeName().
Args:
name: The cache name string to encode.
Raises:
CacheNameInvalid: For invalid cache names.
Returns:
Name encoded for filesystem portability.
"""
basename_index = max(name.rfind('/'), name.rfind('\\')) + 1
if not name[basename_index:]:
raise exceptions.CacheNameInvalid(
'Cache name [{}] is invalid.'.format(name))
return name[:basename_index] + Table.EncodeName(name[basename_index:])
@abc.abstractmethod
def Delete(self):
"""Permanently deletes the cache."""
pass
def Invalidate(self):
"""Invalidates the cache by invalidating all of its tables."""
for name in self.Select():
self.Table(name).Invalidate()
@abc.abstractmethod
def Commit(self):
"""Commits all changes up to this point."""
pass
@abc.abstractmethod
def Close(self, commit=True):
"""Closes the cache, optionally committing any changes.
Args:
commit: Commits any changes before closing if True.
"""
pass
@abc.abstractmethod
def Table(self, name, create=True, columns=1, keys=1, timeout=None):
"""Returns the Table object for existing table name.
Args:
name: The table name.
create: If True creates the table if it does not exist.
columns: The number of columns in the table. Must be >= 1.
keys: The number of columns, starting from 0, that form the primary
row key. Must be 1 <= keys <= columns. The primary key is used to
differentiate rows in the AddRows and DeleteRows methods.
timeout: The table timeout in seconds, 0 for no timeout.
Raises:
CacheTableNameInvalid: name is not a valid table name.
CacheTableNotFound: If the table does not exist.
Returns:
A Table object for name.
"""
pass
@abc.abstractmethod
def Select(self, name=None):
"""Returns the list of table names matching name.
Args:
name: The table name pattern to match, None for all tables. The pattern
may contain these wildcard characters:
* - match zero or more characters
? - match any character
"""
pass

View File

@@ -0,0 +1,550 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Cloud SDK resource cache.
A resource is an object maintained by a service. Each resource has a
corresponding URI. A URI is composed of one or more parameters. A
service-specific resource parser extracts the parameter tuple from a URI. A
corresponding resource formatter reconstructs the URI from the parameter tuple.
Each service has an API List request that returns the list of resource URIs
visible to the caller. Some APIs are aggregated and return the list of all URIs
for all parameter values. Other APIs are not aggregated and require one or more
of the parsed parameter tuple values to be specified in the list request. This
means that getting the list of all URIs for a non-aggregated resource requires
multiple List requests, ranging over the combination of all values for all
aggregate parameters.
A collection is list of resource URIs in a service visible to the caller. The
collection name uniqely identifies the collection and the service.
A resource cache is a persistent cache that stores parsed resource parameter
tuples for multiple collections. The data for a collection is in one or more
tables.
+---------------------------+
| resource cache |
| +-----------------------+ |
| | collection | |
| | +-------------------+ | |
| | | table | | |
| | | (key,...,col,...) | | |
| | | ... | | |
| | +-------------------+ | |
| | ... | |
| +-----------------------+ |
| ... |
+---------------------------+
A resource cache is implemented as a ResourceCache object that contains
Collection objects. A Collection is a virtual table that contains one or more
persistent cache tables. Each Collection is also an Updater that handles
resource parsing and updates. Updates are typically done by service List or
Query requests that populate the tables.
The Updater objects make this module resource agnostic. For example, there
could be updater objects that are not associated with a URI. The ResourceCache
doesn't care.
If the List request API for a collection aggregates then its parsed parameter
tuples are contained in one table. Otherwise the collection is stored in
multiple tables. The total number of tables is determined by the number of
aggregate parameters for the List API, and the number of values each aggregate
parameter can take on.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import os
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import module_util
from googlecloudsdk.core import properties
from googlecloudsdk.core.cache import exceptions
from googlecloudsdk.core.cache import file_cache
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
import six
# Rollout hedge just in case a cache implementation causes problems.
try:
from googlecloudsdk.core.cache import sqlite_cache # pylint: disable=g-import-not-at-top, sqlite3 is not ubiquitous
except ImportError:
sqlite_cache = None
if (sqlite_cache and
'sql' in encoding.GetEncodedValue(
os.environ, 'CLOUDSDK_CACHE_IMPLEMENTATION', 'sqlite')):
PERSISTENT_CACHE_IMPLEMENTATION = sqlite_cache
else:
PERSISTENT_CACHE_IMPLEMENTATION = file_cache
DEFAULT_TIMEOUT = 1*60*60
VERSION = 'googlecloudsdk.resource-1.0'
class ParameterInfo(object):
"""An object for accessing parameter values in the program state.
"program state" is defined by this class. It could include parsed command
line arguments and properties. The class also can also map between resource
and program parameter names.
Attributes:
_additional_params: The list of parameter names not in the parsed resource.
_updaters: A parameter_name => (Updater, aggregator) dict.
"""
def __init__(self, additional_params=None, updaters=None):
self._additional_params = additional_params or []
self._updaters = updaters or {}
def GetValue(self, parameter_name, check_properties=True):
"""Returns the program state string value for parameter_name.
Args:
parameter_name: The Parameter name.
check_properties: Check the property value if True.
Returns:
The parameter value from the program state.
"""
del parameter_name, check_properties
return None
def GetAdditionalParams(self):
"""Return the list of parameter names not in the parsed resource.
These names are associated with the resource but not a specific parameter
in the resource. For example a global resource might not have a global
Boolean parameter in the parsed resource, but its command line specification
might require a --global flag to completly qualify the resource.
Returns:
The list of parameter names not in the parsed resource.
"""
return self._additional_params
def GetUpdater(self, parameter_name):
"""Returns the updater and aggregator property for parameter_name.
Args:
parameter_name: The Parameter name.
Returns:
An (updater, aggregator) tuple where updater is the Updater class and
aggregator is True if this updater must be used to aggregate all resource
values.
"""
return self._updaters.get(parameter_name, (None, None))
class Parameter(object):
"""A parsed resource tuple parameter descriptor.
A parameter tuple has one or more columns. Each has a Parameter descriptor.
Attributes:
column: The parameter tuple column index.
name: The parameter name.
"""
def __init__(self, column=0, name=None):
self.column = column
self.name = name
class _RuntimeParameter(Parameter):
"""A runtime Parameter.
Attributes:
aggregator: True if parameter is an aggregator (not aggregated by updater).
generate: True if values must be generated for this parameter.
updater_class: The updater class.
value: A default value from the program state.
"""
def __init__(self, parameter, updater_class, value, aggregator):
super(_RuntimeParameter, self).__init__(
parameter.column, name=parameter.name)
self.generate = False
self.updater_class = updater_class
self.value = value
self.aggregator = aggregator
class BaseUpdater(object):
"""A base object for thin updater wrappers."""
@six.add_metaclass(abc.ABCMeta)
class Updater(BaseUpdater):
"""A resource cache table updater.
An updater returns a list of parsed parameter tuples that replaces the rows in
one cache table. It can also adjust the table timeout.
The parameters may have their own updaters. These objects are organized as a
tree with one resource at the root.
Attributes:
cache: The persistent cache object.
collection: The resource collection name.
columns: The number of columns in the parsed resource parameter tuple.
parameters: A list of Parameter objects.
timeout: The resource table timeout in seconds, 0 for no timeout (0 is easy
to represent in a persistent cache tuple which holds strings and numbers).
"""
def __init__(self,
cache=None,
collection=None,
columns=0,
column=0,
parameters=None,
timeout=DEFAULT_TIMEOUT):
"""Updater constructor.
Args:
cache: The persistent cache object.
collection: The resource collection name that (1) uniquely names the
table(s) for the parsed resource parameters (2) is the lookup name of
the resource URI parser. Resource collection names are unique by
definition. Non-resource collection names must not clash with resource
collections names. Prepending a '.' to non-resource collections names
will avoid the clash.
columns: The number of columns in the parsed resource parameter tuple.
Must be >= 1.
column: If this is an updater for an aggregate parameter then the updater
produces a table of aggregate_resource tuples. The parent collection
copies aggregate_resource[column] to a column in its own resource
parameter tuple.
parameters: A list of Parameter objects.
timeout: The resource table timeout in seconds, 0 for no timeout.
"""
super(Updater, self).__init__()
self.cache = cache
self.collection = collection
self.columns = columns if collection else 1
self.column = column
self.parameters = parameters or []
self.timeout = timeout or 0
def _GetTableName(self, suffix_list=None):
"""Returns the table name; the module path if no collection.
Args:
suffix_list: a list of values to attach to the end of the table name.
Typically, these will be aggregator values, like project ID.
Returns: a name to use for the table in the cache DB.
"""
if self.collection:
name = [self.collection]
else:
name = [module_util.GetModulePath(self)]
if suffix_list:
name.extend(suffix_list)
return '.'.join(name)
def _GetRuntimeParameters(self, parameter_info):
"""Constructs and returns the _RuntimeParameter list.
This method constructs a muable shadow of self.parameters with updater_class
and table instantiations. Each runtime parameter can be:
(1) A static value derived from parameter_info.
(2) A parameter with it's own updater_class. The updater is used to list
all of the possible values for the parameter.
(3) An unknown value (None). The possible values are contained in the
resource cache for self.
The Select method combines the caller supplied row template and the runtime
parameters to filter the list of parsed resources in the resource cache.
Args:
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The runtime parameters shadow of the immutable self.parameters.
"""
runtime_parameters = []
for parameter in self.parameters:
updater_class, aggregator = parameter_info.GetUpdater(parameter.name)
value = parameter_info.GetValue(
parameter.name, check_properties=aggregator)
runtime_parameter = _RuntimeParameter(
parameter, updater_class, value, aggregator)
runtime_parameters.append(runtime_parameter)
return runtime_parameters
def ParameterInfo(self):
"""Returns the parameter info object."""
return ParameterInfo()
def SelectTable(self, table, row_template, parameter_info, aggregations=None):
"""Returns the list of rows matching row_template in table.
Refreshes expired tables by calling the updater.
Args:
table: The persistent table object.
row_template: A row template to match in Select().
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
aggregations: A list of aggregation Parameter objects.
Returns:
The list of rows matching row_template in table.
"""
if not aggregations:
aggregations = []
log.info('cache table=%s aggregations=[%s]',
table.name,
' '.join(['{}={}'.format(x.name, x.value) for x in aggregations]))
try:
return table.Select(row_template)
except exceptions.CacheTableExpired:
rows = self.Update(parameter_info, aggregations)
if rows is not None:
table.DeleteRows()
table.AddRows(rows)
table.Validate()
return table.Select(row_template, ignore_expiration=True)
def Select(self, row_template, parameter_info=None):
"""Returns the list of rows matching row_template in the collection.
All tables in the collection are in play. The row matching done by the
cache layer conveniently prunes the number of tables accessed.
Args:
row_template: A row template tuple. The number of columns in the template
must match the number of columns in the collection. A column with value
None means match all values for the column. Each column may contain
these wildcard characters:
* - match any string of zero or more characters
? - match any character
The matching is anchored on the left.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The list of rows that match the template row.
"""
template = list(row_template)
if self.columns > len(template):
template += [None] * (self.columns - len(template))
log.info(
'cache template=[%s]', ', '.join(["'{}'".format(t) for t in template]))
# Values keeps track of all valid permutations of values to select from
# cache tables. The nth item in each permutation corresponds to the nth
# parameter for which generate is True. The list of aggregations (which is
# a list of runtime parameters that are aggregators) must also be the same
# length as these permutations.
values = [[]]
aggregations = []
parameters = self._GetRuntimeParameters(parameter_info)
for i, parameter in enumerate(parameters):
parameter.generate = False
if parameter.value and template[parameter.column] in (None, '*'):
template[parameter.column] = parameter.value
log.info('cache parameter=%s column=%s value=%s aggregate=%s',
parameter.name, parameter.column, parameter.value,
parameter.aggregator)
if parameter.aggregator:
aggregations.append(parameter)
parameter.generate = True
for v in values:
v.append(parameter.value)
elif parameter.aggregator:
aggregations.append(parameter)
parameter.generate = True
log.info('cache parameter=%s column=%s value=%s aggregate=%s',
parameter.name, parameter.column, parameter.value,
parameter.aggregator)
# Updater object instantiation is on demand so they don't have to be
# instantiated at import time in the static CLI tree. It also makes it
# easier to serialize in the static CLI tree JSON object.
updater = parameter.updater_class(cache=self.cache)
sub_template = [None] * updater.columns
sub_template[updater.column] = template[parameter.column]
log.info('cache parameter=%s column=%s aggregate=%s',
parameter.name, parameter.column, parameter.aggregator)
new_values = []
for perm, selected in updater.YieldSelectTableFromPermutations(
parameters[:i], values, sub_template, parameter_info):
updater.ExtendValues(new_values, perm, selected)
values = new_values
if not values:
aggregation_values = [x.value for x in aggregations]
# Given that values is essentially a reduced crossproduct of all results
# from the parameter updaters, it collapses to [] if any intermediate
# update finds no results. We only want to keep going here if no
# aggregators needed to be updated in the first place.
if None in aggregation_values:
return []
table_name = self._GetTableName(suffix_list=aggregation_values)
table = self.cache.Table(
table_name,
columns=self.columns,
keys=self.columns,
timeout=self.timeout)
return self.SelectTable(table, template, parameter_info, aggregations)
rows = []
for _, selected in self.YieldSelectTableFromPermutations(
parameters, values, template, parameter_info):
rows.extend(selected)
log.info('cache rows=%s' % rows)
return rows
def _GetParameterColumn(self, parameter_info, parameter_name):
"""Get this updater's column number for a certain parameter."""
updater_parameters = self._GetRuntimeParameters(parameter_info)
for parameter in updater_parameters:
if parameter.name == parameter_name:
return parameter.column
return None
def ExtendValues(self, values, perm, selected):
"""Add selected values to a template and extend the selected rows."""
vals = [row[self.column] for row in selected]
log.info('cache collection={} adding values={}'.format(
self.collection, vals))
v = [perm + [val] for val in vals]
values.extend(v)
def YieldSelectTableFromPermutations(self, parameters, values, template,
parameter_info):
"""Selects completions from tables using multiple permutations of values.
For each vector in values, e.g. ['my-project', 'my-zone'], this method
selects rows matching the template from a leaf table corresponding to the
vector (e.g. 'my.collection.my-project.my-zone') and yields a 2-tuple
containing that vector and the selected rows.
Args:
parameters: [Parameter], the list of parameters up through the
current updater belonging to the parent. These will be used to iterate
through each permutation contained in values.
values: list(list()), a list of lists of valid values. Each item in values
corresponds to a single permutation of values for which item[n] is a
possible value for the nth generator in parent_parameters.
template: list(str), the template to use to select new values.
parameter_info: ParameterInfo, the object that is used to get runtime
values.
Yields:
(perm, list(list)): a 2-tuple where the first value is the permutation
currently being used to select values and the second value is the result
of selecting to match the permutation.
"""
for perm in values:
temp_perm = [val for val in perm]
table = self.cache.Table(
self._GetTableName(suffix_list=perm),
columns=self.columns,
keys=self.columns,
timeout=self.timeout)
aggregations = []
for parameter in parameters:
if parameter.generate:
# Find the matching parameter from current updater. If the parameter
# isn't found, the value is discarded.
column = self._GetParameterColumn(parameter_info, parameter.name)
if column is None:
continue
template[column] = temp_perm.pop(0)
parameter.value = template[column]
if parameter.value:
aggregations.append(parameter)
selected = self.SelectTable(table, template, parameter_info, aggregations)
yield perm, selected
def GetTableForRow(self, row, parameter_info=None, create=True):
"""Returns the table for row.
Args:
row: The fully populated resource row.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
create: Create the table if it doesn't exist if True.
Returns:
The table for row.
"""
parameters = self._GetRuntimeParameters(parameter_info)
values = [row[p.column] for p in parameters if p.aggregator]
return self.cache.Table(
self._GetTableName(suffix_list=values),
columns=self.columns,
keys=self.columns,
timeout=self.timeout,
create=create)
@abc.abstractmethod
def Update(self, parameter_info=None, aggregations=None):
"""Returns the list of all current parsed resource parameters."""
del parameter_info, aggregations
class ResourceCache(PERSISTENT_CACHE_IMPLEMENTATION.Cache):
"""A resource cache object."""
def __init__(self, name=None, create=True):
"""ResourceCache constructor.
Args:
name: The persistent cache object name. If None then a default name
conditioned on the account name is used.
<GLOBAL_CONFIG_DIR>/cache/<ACCOUNT>/resource.cache
create: Create the cache if it doesn't exist if True.
"""
if not name:
name = self.GetDefaultName()
super(ResourceCache, self).__init__(
name=name, create=create, version=VERSION)
@staticmethod
def GetDefaultName():
"""Returns the default resource cache name."""
path = [config.Paths().cache_dir]
account = properties.VALUES.core.account.Get(required=False)
if account:
path.append(account)
files.MakeDir(os.path.join(*path))
path.append('resource.cache')
return os.path.join(*path)
def Delete(name=None):
"""Deletes the current persistent resource cache however it's implemented."""
if not name:
name = ResourceCache.GetDefaultName()
# Keep trying implementation until cache not found or a matching cache found.
for implementation in (sqlite_cache, file_cache):
if not implementation:
continue
try:
implementation.Cache(name=name, create=False, version=VERSION).Delete()
return
except exceptions.CacheInvalid:
continue

View File

@@ -0,0 +1,281 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A persistent cache implementation using sqlite3.
See the persistent_cache module for a detailed description.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import gc
import os
from googlecloudsdk.core.cache import exceptions
from googlecloudsdk.core.cache import metadata_table
from googlecloudsdk.core.cache import persistent_cache_base
from googlecloudsdk.core.util import files
import six
from six.moves import range # pylint: disable=redefined-builtin
import sqlite3
def _FieldRef(column):
"""Returns a field reference name.
Args:
column: The field column number counting from 0.
Returns:
A field reference name.
"""
return 'f{column}'.format(column=column)
def _Where(row_template=None):
"""Returns a WHERE clause for the row template.
Column string matching supports * and ? match ops.
Args:
row_template: A template row tuple. A column value None means match all
values for this column. A None value for row means all rows.
Returns:
A WHERE clause for the row template or the empty string if there is no none.
"""
terms = []
if row_template:
for index in range(len(row_template)):
term = row_template[index]
if term is None:
continue
if isinstance(term, six.string_types):
pattern = term.replace('*', '%').replace('.', '_').replace('"', '""')
terms.append('{field} LIKE "{pattern}"'.format(
field=_FieldRef(index), pattern=pattern))
else:
terms.append('{field} = {term}'.format(
field=_FieldRef(index), term=term))
if not terms:
return ''
return ' WHERE ' + ' AND '.join(terms)
class _Table(persistent_cache_base.Table):
"""A persistent cache table.
Attributes:
name: The table name.
deleted: Table was deleted if True.
modified: Table modify timestamp.
timeout: Tables older than timeout are invalid.
_cache: The parent cache object.
_fields: The f1,... fields name string.
_values: The ?,... parameter replacement string for INSERT.
"""
def __init__(self, cache, name, columns=1, keys=1, timeout=0, modified=0,
restricted=False):
self._rows = None
super(_Table, self).__init__(cache, name, columns=columns, keys=keys,
timeout=timeout, modified=modified,
restricted=restricted)
if restricted:
self._cache._restricted.add(name) # pylint: disable=protected-access
self._fields = ', '.join([_FieldRef(i) for i in range(columns)])
self._values = ', '.join(['?'] * columns)
self.deleted = False
# pylint: disable=protected-access
if self._cache._metadata:
self._cache._tables[name] = self
def Delete(self):
"""Deletes the table."""
self.Invalidate()
self._cache.cursor.execute(
'DROP TABLE "{table}"'.format(table=self.name))
# pylint: disable=protected-access
self._cache._db.commit()
self._cache._metadata.DeleteRows([(self.name,)])
self.deleted = True
def _Commit(self):
"""Commits changed/deleted table data."""
if self.changed:
self.changed = False
# pylint: disable=protected-access
if self.deleted:
self.deleted = False
self._cache._metadata.DeleteRows([(self.name,)])
del self._cache._tables[self.name]
else:
self._cache._metadata.AddRows(
[metadata_table.Metadata.Row(
name=self.name,
columns=self.columns,
keys=self.keys,
timeout=self.timeout,
modified=self.modified,
restricted=self.restricted,
version=self._cache.version)])
def AddRows(self, rows):
"""Adds each row in rows to the table."""
self._CheckRows(rows)
self._cache.cursor.executemany(
'INSERT OR REPLACE INTO "{table}" ({fields}) VALUES ({values})'.
format(
table=self.name, fields=self._fields, values=self._values),
rows)
self._cache._db.commit() # pylint: disable=protected-access
def DeleteRows(self, row_templates=None):
"""Deletes each row in the table matching any of the row_templates."""
if row_templates:
self._CheckRowTemplates(row_templates)
for template in row_templates:
self._cache.cursor.execute(
'DELETE FROM "{table}"{where}'.format(
table=self.name, where=_Where(template)))
else:
self._cache.cursor.execute(
'DELETE FROM "{table}" WHERE 1'.format(table=self.name))
self._cache._db.commit() # pylint: disable=protected-access
def Select(self, row_template=None, ignore_expiration=False):
"""Returns the list of rows that match row_template, None for all."""
if row_template is not None:
self._CheckRowTemplates([row_template])
if not ignore_expiration and not self.restricted and not self.modified:
raise exceptions.CacheTableExpired(
'[{}] cache table [{}] has expired.'.format(
self._cache.name, self.name))
self._cache.cursor.execute(
'SELECT {fields} FROM "{table}"{where}'.format(
fields=self._fields, table=self.name, where=_Where(row_template)))
return self._cache.cursor.fetchall()
class Cache(metadata_table.CacheUsingMetadataTable):
"""A persistent cache object.
Attributes:
cursor: The _db operations cursor.
name: The db path name. Created/removed by this object. May be a file or
directory. In this implementation its a file.
timeout: The default table timeout.
version: A caller defined version string that must match the version string
stored when the persistent object was created.
_db: The db connection.
_metadata: The metadata restricted _Table.
_persistent: True if the persistent object has been committed at least once.
_restricted: The set of restricted table names.
_start: The cache instance start time.
_tables: The map of open table objects.
"""
_EXPECTED_MAGIC = b'SQLite format 3'
def __init__(self, name, create=True, timeout=None, version=None):
super(Cache, self).__init__(
_Table, name, create=create, timeout=timeout, version=version)
self._persistent = False
# Check if the db file exists and is an sqlite3 db.
# Surprise, we have to do the heavy lifting.
# That stops here.
try:
with files.BinaryFileReader(name) as f:
actual_magic = f.read(len(self._EXPECTED_MAGIC))
if actual_magic != self._EXPECTED_MAGIC:
raise exceptions.CacheInvalid(
'[{}] is not a persistent cache.'.format(self.name))
self._persistent = True
except files.MissingFileError:
if not create:
raise exceptions.CacheNotFound(
'Persistent cache [{}] not found.'.format(self.name))
except files.Error:
raise exceptions.CacheInvalid(
'[{}] is not a persistent cache.'.format(self.name))
self._db = sqlite3.connect(name)
self.cursor = self._db.cursor()
self._restricted = set(['__lock__'])
self._tables = {}
self._metadata = None
self._start = persistent_cache_base.Now()
try:
self.InitializeMetadata()
except exceptions.Error:
# Make sure we clean up any dangling resources.
self.Close(commit=False)
raise
def _DeleteCacheFile(self):
"""Permanently deletes the persistent cache file."""
try:
os.remove(self.name)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise
def Delete(self):
"""Closes and permanently deletes the persistent cache."""
self.Close(commit=False)
self._DeleteCacheFile()
def Commit(self):
"""Commits all operations up to this point."""
# Update the changed tables.
for table in [x for x in self._tables.values() if x.changed]:
table._Commit() # pylint: disable=protected-access
if self._metadata.changed:
self._metadata._Commit() # pylint: disable=protected-access
self._db.commit()
self._persistent = True
def Close(self, commit=True):
"""Closes the cache, optionally committing any changes.
Args:
commit: Commits any changes before closing if True.
"""
if self._db:
if commit:
self.Commit()
del self.cursor
self._db.close()
self._db = None
gc.collect(2) # On Windows, connection refs sometimes remain in memory
# and prevent the db file from being deleted. This gets rid of them.
self._tables = None
if not commit and not self._persistent:
# Need this because sqlite3 creates a filesystem artifact even if there
# were no commits.
self._DeleteCacheFile()
def _ImplementationCreateTable(self, name, columns, keys):
"""sqlite3 implementation specific _CreateTable."""
field_list = [_FieldRef(i) for i in range(columns)]
key_list = [_FieldRef(i) for i in range(keys or 1)]
field_list.append('PRIMARY KEY ({keys})'.format(keys=', '.join(key_list)))
fields = '({fields})'.format(fields=', '.join(field_list))
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS "{name}" {fields}'.format(
name=name, fields=fields))