feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env python
"""Provides Logic for Fetching and Storing Discovery Documents from an on-disc cache."""
import hashlib
import os
import pathlib
import tempfile
from typing import Optional
from absl import logging
from pyglib import stringutil
_DISCOVERY_CACHE_FILE = 'api_discovery.json'
def _get_cache_file_name(
cache_root: str, discovery_url: str, api_name: str, api_version: str
) -> pathlib.Path:
"""Returns the cache file name for the given api and version."""
# Use the sha1 hash as this is not security-related, just need a stable hash.
url_hash = hashlib.sha1(discovery_url.encode('utf-8')).hexdigest()
return pathlib.Path(
cache_root,
url_hash,
api_name,
api_version,
_DISCOVERY_CACHE_FILE,
)
def get_from_cache(
cache_root: str, discovery_url: str, api_name: str, api_version: str
) -> Optional[str]:
"""Loads a discovery document from the on-disc cache using key `api` and `version`.
Args:
cache_root: [str], a directory where all cache files are stored.
discovery_url: [str], URL where the discovery document was fetched from.
api_name: [str], Name of api `discovery_document` to be saved.
api_version: [str], Version of document to get
Returns:
Discovery document as a dict.
None if any errors occur during loading, or parsing the document
"""
file = _get_cache_file_name(cache_root, discovery_url, api_name, api_version)
if not os.path.isfile(file):
logging.info('Discovery doc not in cache. %s', file)
return None
try:
with open(file, 'rb') as f:
contents = f.read()
return contents.decode('utf-8')
except Exception as e: # pylint: disable=broad-except
logging.warning('Error loading discovery document %s: %s', file, e)
return None
def save_to_cache(
cache_root: str,
discovery_url: str,
api_name: str,
api_version: str,
discovery_document: str,
) -> None:
"""Saves a discovery document to the on-disc cache with key `api` and `version`.
Args:
cache_root: [str], a directory where all cache files are stored.
discovery_url: [str], URL where the discovery document was fetched from.
api_name: [str], Name of api `discovery_document` to be saved.
api_version: [str], Version of `discovery_document`.
discovery_document: [str]. Discovery document as a json string.
Raises:
OSError: If an error occurs when the file is written.
"""
file = _get_cache_file_name(cache_root, discovery_url, api_name, api_version)
directory = file.parent
# Return. File already cached.
if file.exists():
return
directory.mkdir(parents=True, exist_ok=True)
# Here we will write the discovery doc to a temp file and then rename that
# temp file to our destination cache file. This is to ensure we have an
# atomic file operation. Without this it could be possible to have a bq
# client see the cached discovery file and load it although it is empty.
# The temporary file needs to be in a unique path so that different
# invocations don't conflict; both will be able to write to their temp
# file, and the last one will move to final place.
# TOO
with tempfile.TemporaryDirectory(dir=directory) as tmpdir:
temp_file_path = pathlib.Path(tmpdir) / 'tmp.json'
with temp_file_path.open('wb') as f:
f.write(stringutil.ensure_binary(discovery_document, 'utf8'))
# Flush followed by fsync to ensure all data is written to temp file
# before our rename operation.
f.flush()
os.fsync(f.fileno())
# Atomically create (via rename) the 'real' cache file.
temp_file_path.rename(file)

View File

@@ -0,0 +1,133 @@
#!/usr/bin/env python
"""Methods for loading discovery documents for Google Cloud APIs.
Discovery Documents are used to create API Clients.
"""
import pkgutil
from typing import Optional, Union
from absl import logging
from utils import bq_consts
PKG_NAME = 'bigquery_client'
# Latest version of the BigQuery API discovery_document from discovery_next.
DISCOVERY_NEXT_BIGQUERY = 'discovery_next/bigquery.json'
# Latest version of the IAM Policy API discovery_document from discovery_next.
DISCOVERY_NEXT_IAM_POLICY = 'discovery_next/iam-policy.json'
# Latest version of the Reservations discovery_document from discovery_next.
DISCOVERY_NEXT_RESERVATIONS = (
'discovery_next/bigqueryreservation_google_rest_v1.json'
)
SUPPORTED_BIGQUERY_APIS = frozenset([
'https://www.googleapis.com',
'https://bigquery.googleapis.com',
'https://bigqueryreservation.googleapis.com',
])
SERVICES_TO_LOCAL_DISCOVERY_DOC_MAP = {
bq_consts.Service.BIGQUERY: DISCOVERY_NEXT_BIGQUERY,
bq_consts.Service.CONNECTIONS: DISCOVERY_NEXT_BIGQUERY,
bq_consts.Service.RESERVATIONS: DISCOVERY_NEXT_RESERVATIONS,
bq_consts.Service.BQ_IAM: DISCOVERY_NEXT_IAM_POLICY,
}
# TODO(b/318711380): Local discovery load for different APIs.
def load_local_discovery_doc_from_service(
service: bq_consts.Service,
api: str,
api_version: str,
) -> Union[None, bytes]:
"""Loads the discovery document for a service."""
if service not in SERVICES_TO_LOCAL_DISCOVERY_DOC_MAP:
logging.info(
'Skipping local %s discovery document load since the service is not yet'
' supported',
service,
)
return
if service == bq_consts.Service.BIGQUERY and (
api not in SUPPORTED_BIGQUERY_APIS or api_version != 'v2'
):
# For now, align this strictly with the default flag values. We can loosen
# this but for a first pass I'm keeping the current code flow.
logging.info(
'Loading the "%s" discovery doc from the server since this is not'
' v2 (%s) and the API endpoint (%s) is not one of (%s).',
service,
api_version,
api,
', '.join(SUPPORTED_BIGQUERY_APIS),
)
return
if service != bq_consts.Service.BQ_IAM and api not in SUPPORTED_BIGQUERY_APIS:
# For non-IAM APIs, we only support local discovery docs for selected API
# endpoints.
logging.info(
'Loading the "%s" discovery doc from the server since the API endpoint'
' (%s) is not one of (%s).',
service,
api,
', '.join(SUPPORTED_BIGQUERY_APIS),
)
return
return load_local_discovery_doc(SERVICES_TO_LOCAL_DISCOVERY_DOC_MAP[service])
def load_local_discovery_doc(doc_filename: str) -> bytes:
"""Loads the discovery document for `doc_filename` with `version` from package files.
Example:
bq_disc_doc = discovery_document_loader
.load_local_discovery_doc('discovery_next/bigquery.json')
Args:
doc_filename: [str], The filename of the discovery document to be loaded.
Raises:
FileNotFoundError: If no discovery doc could be loaded.
Returns:
`bytes`, On success, A json object with the contents of the
discovery document. On failure, None.
"""
doc = _fetch_discovery_doc_from_pkg(PKG_NAME, doc_filename)
if not doc:
raise FileNotFoundError(
'Failed to load discovery doc from resource path: %s.%s'
% (PKG_NAME, doc_filename)
)
return doc
def _fetch_discovery_doc_from_pkg(
package: str, resource: str
) -> Optional[bytes]:
"""Loads a discovery doc as `bytes` specified by `package` and `resource` returning None on error."""
try:
raw_doc = pkgutil.get_data(package, resource)
# TODO(b/286571605) Ideally this would be ModuleNotFoundError but it's not
# supported before python3.6 so we need to be less specific for now.
except ImportError:
raw_doc = None
if not raw_doc:
logging.warning(
'Failed to load discovery doc from (package, resource): %s, %s',
package,
resource,
)
else:
logging.info(
'Successfully loaded discovery doc from (package, resource): %s, %s',
package,
resource,
)
return raw_doc