feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,44 @@
{
"components": [
{
"data": {
"contents_checksum": "239d13b0bd2ce8c978287c3a07407946f459b829fda560a93665230e0b6f7338",
"source": "",
"type": "tar"
},
"dependencies": [
"bq",
"core"
],
"details": {
"description": "Provides the bq tool for interacting with the BigQuery service.",
"display_name": "BigQuery Command Line Tool (Platform Specific)"
},
"gdu_only": false,
"id": "bq-nix",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {
"operating_systems": [
"CYGWIN",
"LINUX",
"MACOSX",
"MSYS"
]
},
"platform_required": false,
"version": {
"build_number": 20251031202127,
"version_string": "2.1.25"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,598 @@
bin/bootstrapping/bq.py
data/cli/bq.json
platform/bq/LICENSE.txt
platform/bq/MANIFEST.in
platform/bq/README.txt
platform/bq/VERSION
platform/bq/auth/gcloud_credential_loader.py
platform/bq/auth/main_credential_loader.py
platform/bq/auth/utils.py
platform/bq/bigquery_client.py
platform/bq/bq.py
platform/bq/bq_auth_flags.py
platform/bq/bq_flags.py
platform/bq/bq_utils.py
platform/bq/clients/bigquery_client.py
platform/bq/clients/bigquery_client_extended.py
platform/bq/clients/bigquery_http.py
platform/bq/clients/client_connection.py
platform/bq/clients/client_data_transfer.py
platform/bq/clients/client_dataset.py
platform/bq/clients/client_deprecated.py
platform/bq/clients/client_job.py
platform/bq/clients/client_model.py
platform/bq/clients/client_project.py
platform/bq/clients/client_reservation.py
platform/bq/clients/client_routine.py
platform/bq/clients/client_row_access_policy.py
platform/bq/clients/client_table.py
platform/bq/clients/table_reader.py
platform/bq/clients/utils.py
platform/bq/clients/wait_printer.py
platform/bq/credential_loader.py
platform/bq/discovery_documents/discovery_document_cache.py
platform/bq/discovery_documents/discovery_document_loader.py
platform/bq/discovery_next/bigquery.json
platform/bq/discovery_next/bigqueryreservation_google_rest_v1.json
platform/bq/discovery_next/iam-policy.json
platform/bq/frontend/bigquery_command.py
platform/bq/frontend/bq_cached_client.py
platform/bq/frontend/command_cancel.py
platform/bq/frontend/command_copy.py
platform/bq/frontend/command_delete.py
platform/bq/frontend/command_extract.py
platform/bq/frontend/command_head.py
platform/bq/frontend/command_info.py
platform/bq/frontend/command_init.py
platform/bq/frontend/command_insert.py
platform/bq/frontend/command_list.py
platform/bq/frontend/command_load.py
platform/bq/frontend/command_make.py
platform/bq/frontend/command_mkdef.py
platform/bq/frontend/command_partition.py
platform/bq/frontend/command_query.py
platform/bq/frontend/command_repl.py
platform/bq/frontend/command_show.py
platform/bq/frontend/command_snapshot.py
platform/bq/frontend/command_truncate.py
platform/bq/frontend/command_undelete.py
platform/bq/frontend/command_update.py
platform/bq/frontend/command_version.py
platform/bq/frontend/command_wait.py
platform/bq/frontend/commands_iam.py
platform/bq/frontend/flags.py
platform/bq/frontend/utils.py
platform/bq/frontend/utils_data_transfer.py
platform/bq/frontend/utils_flags.py
platform/bq/frontend/utils_formatting.py
platform/bq/frontend/utils_id.py
platform/bq/gcloud_wrapper/bq_to_gcloud_adapter.py
platform/bq/gcloud_wrapper/bq_to_gcloud_command_executor.py
platform/bq/gcloud_wrapper/bq_to_gcloud_config_classes.py
platform/bq/gcloud_wrapper/gcloud_runner.py
platform/bq/gcloud_wrapper/supported_commands/supported_commands_dataset.py
platform/bq/gcloud_wrapper/supported_commands/supported_commands_migration_workflow.py
platform/bq/gcloud_wrapper/supported_commands/supported_commands_project.py
platform/bq/gcloud_wrapper/supported_gcloud_commands.py
platform/bq/remove_pyreadline.py
platform/bq/table_formatter.py
platform/bq/third_party/absl/LICENSE
platform/bq/third_party/absl/__init__.py
platform/bq/third_party/absl/_collections_abc.py
platform/bq/third_party/absl/app.py
platform/bq/third_party/absl/app.pyi
platform/bq/third_party/absl/command_name.py
platform/bq/third_party/absl/flags/__init__.py
platform/bq/third_party/absl/flags/__init__.pyi
platform/bq/third_party/absl/flags/_argument_parser.py
platform/bq/third_party/absl/flags/_argument_parser.pyi
platform/bq/third_party/absl/flags/_defines.py
platform/bq/third_party/absl/flags/_defines.pyi
platform/bq/third_party/absl/flags/_exceptions.py
platform/bq/third_party/absl/flags/_flag.py
platform/bq/third_party/absl/flags/_flag.pyi
platform/bq/third_party/absl/flags/_flagvalues.py
platform/bq/third_party/absl/flags/_flagvalues.pyi
platform/bq/third_party/absl/flags/_helpers.py
platform/bq/third_party/absl/flags/_validators.py
platform/bq/third_party/absl/flags/_validators_classes.py
platform/bq/third_party/absl/flags/argparse_flags.py
platform/bq/third_party/absl/logging/__init__.py
platform/bq/third_party/absl/logging/converter.py
platform/bq/third_party/appdirs/LICENSE
platform/bq/third_party/bazel_platforms/LICENSE
platform/bq/third_party/cachetools/__init__.py
platform/bq/third_party/cachetools/_decorators.py
platform/bq/third_party/cachetools/func.py
platform/bq/third_party/cachetools/keys.py
platform/bq/third_party/certifi/LICENSE
platform/bq/third_party/certifi/__init__.py
platform/bq/third_party/certifi/cacert.pem
platform/bq/third_party/certifi/core.py
platform/bq/third_party/cffi/LICENSE
platform/bq/third_party/charset_normalizer/LICENSE
platform/bq/third_party/charset_normalizer/__init__.py
platform/bq/third_party/charset_normalizer/api.py
platform/bq/third_party/charset_normalizer/assets/__init__.py
platform/bq/third_party/charset_normalizer/cd.py
platform/bq/third_party/charset_normalizer/constant.py
platform/bq/third_party/charset_normalizer/legacy.py
platform/bq/third_party/charset_normalizer/md.py
platform/bq/third_party/charset_normalizer/models.py
platform/bq/third_party/charset_normalizer/utils.py
platform/bq/third_party/charset_normalizer/version.py
platform/bq/third_party/fasteners/LICENSE
platform/bq/third_party/fasteners/__init__.py
platform/bq/third_party/fasteners/_utils.py
platform/bq/third_party/fasteners/lock.py
platform/bq/third_party/fasteners/process_lock.py
platform/bq/third_party/fasteners/version.py
platform/bq/third_party/gflags/AUTHORS
platform/bq/third_party/gflags/COPYING
platform/bq/third_party/gflags/ChangeLog
platform/bq/third_party/gflags/LICENSE
platform/bq/third_party/gflags/MANIFEST.in
platform/bq/third_party/gflags/Makefile
platform/bq/third_party/gflags/NEWS
platform/bq/third_party/gflags/PKG-INFO
platform/bq/third_party/gflags/README
platform/bq/third_party/gflags/__init__.py
platform/bq/third_party/gflags/gflags2man.py
platform/bq/third_party/gflags/gflags_validators.py
platform/bq/third_party/gflags/setup.cfg
platform/bq/third_party/gflags/setup.py
platform/bq/third_party/google/__init__.py
platform/bq/third_party/google/api_core/LICENSE
platform/bq/third_party/google/api_core/__init__.py
platform/bq/third_party/google/api_core/client_options.py
platform/bq/third_party/google/api_core/general_helpers.py
platform/bq/third_party/google/api_core/iam.py
platform/bq/third_party/google/apputils/LICENSE
platform/bq/third_party/google/apputils/__init__.py
platform/bq/third_party/google/apputils/app.py
platform/bq/third_party/google/apputils/appcommands.py
platform/bq/third_party/google/apputils/basetest.py
platform/bq/third_party/google/apputils/datelib.py
platform/bq/third_party/google/apputils/debug.py
platform/bq/third_party/google/apputils/file_util.py
platform/bq/third_party/google/apputils/resources.py
platform/bq/third_party/google/apputils/run_script_module.py
platform/bq/third_party/google/apputils/setup_command.py
platform/bq/third_party/google/apputils/shellutil.py
platform/bq/third_party/google/apputils/stopwatch.py
platform/bq/third_party/google/auth/LICENSE
platform/bq/third_party/google/auth/__init__.py
platform/bq/third_party/google/auth/_cloud_sdk.py
platform/bq/third_party/google/auth/_default.py
platform/bq/third_party/google/auth/_exponential_backoff.py
platform/bq/third_party/google/auth/_helpers.py
platform/bq/third_party/google/auth/_refresh_worker.py
platform/bq/third_party/google/auth/_service_account_info.py
platform/bq/third_party/google/auth/aws.py
platform/bq/third_party/google/auth/compute_engine/__init__.py
platform/bq/third_party/google/auth/compute_engine/_metadata.py
platform/bq/third_party/google/auth/compute_engine/credentials.py
platform/bq/third_party/google/auth/credentials.py
platform/bq/third_party/google/auth/crypt/__init__.py
platform/bq/third_party/google/auth/crypt/_cryptography_rsa.py
platform/bq/third_party/google/auth/crypt/_python_rsa.py
platform/bq/third_party/google/auth/crypt/base.py
platform/bq/third_party/google/auth/crypt/rsa.py
platform/bq/third_party/google/auth/environment_vars.py
platform/bq/third_party/google/auth/exceptions.py
platform/bq/third_party/google/auth/external_account.py
platform/bq/third_party/google/auth/external_account_authorized_user.py
platform/bq/third_party/google/auth/iam.py
platform/bq/third_party/google/auth/identity_pool.py
platform/bq/third_party/google/auth/impersonated_credentials.py
platform/bq/third_party/google/auth/jwt.py
platform/bq/third_party/google/auth/metrics.py
platform/bq/third_party/google/auth/pluggable.py
platform/bq/third_party/google/auth/transport/__init__.py
platform/bq/third_party/google/auth/transport/_http_client.py
platform/bq/third_party/google/auth/transport/_mtls_helper.py
platform/bq/third_party/google/auth/transport/mtls.py
platform/bq/third_party/google/auth/transport/requests.py
platform/bq/third_party/google/auth/version.py
platform/bq/third_party/google/oauth2/LICENSE
platform/bq/third_party/google/oauth2/__init__.py
platform/bq/third_party/google/oauth2/_client.py
platform/bq/third_party/google/oauth2/challenges.py
platform/bq/third_party/google/oauth2/credentials.py
platform/bq/third_party/google/oauth2/reauth.py
platform/bq/third_party/google/oauth2/service_account.py
platform/bq/third_party/google/oauth2/sts.py
platform/bq/third_party/google/oauth2/utils.py
platform/bq/third_party/google_auth_httplib2/LICENSE
platform/bq/third_party/google_auth_httplib2/__init__.py
platform/bq/third_party/google_reauth/LICENSE
platform/bq/third_party/google_reauth/__init__.py
platform/bq/third_party/google_reauth/_helpers.py
platform/bq/third_party/google_reauth/_reauth_client.py
platform/bq/third_party/google_reauth/all_tests.py
platform/bq/third_party/google_reauth/challenges.py
platform/bq/third_party/google_reauth/errors.py
platform/bq/third_party/google_reauth/reauth.py
platform/bq/third_party/google_reauth/reauth_creds.py
platform/bq/third_party/googleapiclient/LICENSE
platform/bq/third_party/googleapiclient/__init__.py
platform/bq/third_party/googleapiclient/_auth.py
platform/bq/third_party/googleapiclient/_helpers.py
platform/bq/third_party/googleapiclient/channel.py
platform/bq/third_party/googleapiclient/discovery.py
platform/bq/third_party/googleapiclient/discovery_cache/__init__.py
platform/bq/third_party/googleapiclient/discovery_cache/appengine_memcache.py
platform/bq/third_party/googleapiclient/discovery_cache/base.py
platform/bq/third_party/googleapiclient/discovery_cache/file_cache.py
platform/bq/third_party/googleapiclient/errors.py
platform/bq/third_party/googleapiclient/http.py
platform/bq/third_party/googleapiclient/mimeparse.py
platform/bq/third_party/googleapiclient/model.py
platform/bq/third_party/googleapiclient/schema.py
platform/bq/third_party/httplib2/LICENSE
platform/bq/third_party/httplib2/__init__.py
platform/bq/third_party/httplib2/python2/__init__.py
platform/bq/third_party/httplib2/python2/auth.py
platform/bq/third_party/httplib2/python2/ca_certs_locater.py
platform/bq/third_party/httplib2/python2/cacerts.txt
platform/bq/third_party/httplib2/python2/certs.py
platform/bq/third_party/httplib2/python2/error.py
platform/bq/third_party/httplib2/python2/iri2uri.py
platform/bq/third_party/httplib2/python2/socks.py
platform/bq/third_party/httplib2/python3/__init__.py
platform/bq/third_party/httplib2/python3/auth.py
platform/bq/third_party/httplib2/python3/ca_certs_locater.py
platform/bq/third_party/httplib2/python3/cacerts.txt
platform/bq/third_party/httplib2/python3/certs.py
platform/bq/third_party/httplib2/python3/error.py
platform/bq/third_party/httplib2/python3/iri2uri.py
platform/bq/third_party/httplib2/python3/socks.py
platform/bq/third_party/idna/LICENSE
platform/bq/third_party/idna/__init__.py
platform/bq/third_party/idna/codec.py
platform/bq/third_party/idna/compat.py
platform/bq/third_party/idna/core.py
platform/bq/third_party/idna/idnadata.py
platform/bq/third_party/idna/intranges.py
platform/bq/third_party/idna/package_data.py
platform/bq/third_party/idna/uts46data.py
platform/bq/third_party/inflection/LICENSE
platform/bq/third_party/inflection/__init__.py
platform/bq/third_party/inflection/inflection.py
platform/bq/third_party/ipaddr/LICENSE
platform/bq/third_party/libffi/LICENSE
platform/bq/third_party/libunwind/LICENSE
platform/bq/third_party/monotonic/LICENSE
platform/bq/third_party/monotonic/__init__.py
platform/bq/third_party/oauth2client_4_0/LICENSE
platform/bq/third_party/oauth2client_4_0/__init__.py
platform/bq/third_party/oauth2client_4_0/_helpers.py
platform/bq/third_party/oauth2client_4_0/_openssl_crypt.py
platform/bq/third_party/oauth2client_4_0/_pkce.py
platform/bq/third_party/oauth2client_4_0/_pure_python_crypt.py
platform/bq/third_party/oauth2client_4_0/_pycrypto_crypt.py
platform/bq/third_party/oauth2client_4_0/client.py
platform/bq/third_party/oauth2client_4_0/clientsecrets.py
platform/bq/third_party/oauth2client_4_0/contrib/__init__.py
platform/bq/third_party/oauth2client_4_0/contrib/_metadata.py
platform/bq/third_party/oauth2client_4_0/contrib/devshell.py
platform/bq/third_party/oauth2client_4_0/contrib/gce.py
platform/bq/third_party/oauth2client_4_0/contrib/multiprocess_file_storage.py
platform/bq/third_party/oauth2client_4_0/crypt.py
platform/bq/third_party/oauth2client_4_0/file.py
platform/bq/third_party/oauth2client_4_0/service_account.py
platform/bq/third_party/oauth2client_4_0/tools.py
platform/bq/third_party/oauth2client_4_0/transport.py
platform/bq/third_party/openssl/LICENSE
platform/bq/third_party/packaging/LICENSE
platform/bq/third_party/pkg_resources/LICENSE
platform/bq/third_party/ply/LICENSE
platform/bq/third_party/program_image_remapper/LICENSE
platform/bq/third_party/protobuf/LICENSE
platform/bq/third_party/pyasn1/LICENSE
platform/bq/third_party/pyasn1/__init__.py
platform/bq/third_party/pyasn1/codec/__init__.py
platform/bq/third_party/pyasn1/codec/ber/__init__.py
platform/bq/third_party/pyasn1/codec/ber/decoder.py
platform/bq/third_party/pyasn1/codec/ber/encoder.py
platform/bq/third_party/pyasn1/codec/ber/eoo.py
platform/bq/third_party/pyasn1/codec/cer/__init__.py
platform/bq/third_party/pyasn1/codec/cer/decoder.py
platform/bq/third_party/pyasn1/codec/cer/encoder.py
platform/bq/third_party/pyasn1/codec/der/__init__.py
platform/bq/third_party/pyasn1/codec/der/decoder.py
platform/bq/third_party/pyasn1/codec/der/encoder.py
platform/bq/third_party/pyasn1/codec/native/__init__.py
platform/bq/third_party/pyasn1/codec/native/decoder.py
platform/bq/third_party/pyasn1/codec/native/encoder.py
platform/bq/third_party/pyasn1/compat/__init__.py
platform/bq/third_party/pyasn1/compat/binary.py
platform/bq/third_party/pyasn1/compat/calling.py
platform/bq/third_party/pyasn1/compat/dateandtime.py
platform/bq/third_party/pyasn1/compat/integer.py
platform/bq/third_party/pyasn1/compat/octets.py
platform/bq/third_party/pyasn1/compat/string.py
platform/bq/third_party/pyasn1/debug.py
platform/bq/third_party/pyasn1/error.py
platform/bq/third_party/pyasn1/type/__init__.py
platform/bq/third_party/pyasn1/type/base.py
platform/bq/third_party/pyasn1/type/char.py
platform/bq/third_party/pyasn1/type/constraint.py
platform/bq/third_party/pyasn1/type/error.py
platform/bq/third_party/pyasn1/type/namedtype.py
platform/bq/third_party/pyasn1/type/namedval.py
platform/bq/third_party/pyasn1/type/opentype.py
platform/bq/third_party/pyasn1/type/tag.py
platform/bq/third_party/pyasn1/type/tagmap.py
platform/bq/third_party/pyasn1/type/univ.py
platform/bq/third_party/pyasn1/type/useful.py
platform/bq/third_party/pyasn1_modules/LICENSE
platform/bq/third_party/pyasn1_modules/__init__.py
platform/bq/third_party/pyasn1_modules/pem.py
platform/bq/third_party/pyasn1_modules/rfc1155.py
platform/bq/third_party/pyasn1_modules/rfc1157.py
platform/bq/third_party/pyasn1_modules/rfc1901.py
platform/bq/third_party/pyasn1_modules/rfc1902.py
platform/bq/third_party/pyasn1_modules/rfc1905.py
platform/bq/third_party/pyasn1_modules/rfc2251.py
platform/bq/third_party/pyasn1_modules/rfc2314.py
platform/bq/third_party/pyasn1_modules/rfc2315.py
platform/bq/third_party/pyasn1_modules/rfc2437.py
platform/bq/third_party/pyasn1_modules/rfc2459.py
platform/bq/third_party/pyasn1_modules/rfc2511.py
platform/bq/third_party/pyasn1_modules/rfc2560.py
platform/bq/third_party/pyasn1_modules/rfc2631.py
platform/bq/third_party/pyasn1_modules/rfc2634.py
platform/bq/third_party/pyasn1_modules/rfc2985.py
platform/bq/third_party/pyasn1_modules/rfc2986.py
platform/bq/third_party/pyasn1_modules/rfc3114.py
platform/bq/third_party/pyasn1_modules/rfc3161.py
platform/bq/third_party/pyasn1_modules/rfc3274.py
platform/bq/third_party/pyasn1_modules/rfc3279.py
platform/bq/third_party/pyasn1_modules/rfc3280.py
platform/bq/third_party/pyasn1_modules/rfc3281.py
platform/bq/third_party/pyasn1_modules/rfc3412.py
platform/bq/third_party/pyasn1_modules/rfc3414.py
platform/bq/third_party/pyasn1_modules/rfc3447.py
platform/bq/third_party/pyasn1_modules/rfc3560.py
platform/bq/third_party/pyasn1_modules/rfc3565.py
platform/bq/third_party/pyasn1_modules/rfc3709.py
platform/bq/third_party/pyasn1_modules/rfc3770.py
platform/bq/third_party/pyasn1_modules/rfc3779.py
platform/bq/third_party/pyasn1_modules/rfc3852.py
platform/bq/third_party/pyasn1_modules/rfc4043.py
platform/bq/third_party/pyasn1_modules/rfc4055.py
platform/bq/third_party/pyasn1_modules/rfc4073.py
platform/bq/third_party/pyasn1_modules/rfc4108.py
platform/bq/third_party/pyasn1_modules/rfc4210.py
platform/bq/third_party/pyasn1_modules/rfc4211.py
platform/bq/third_party/pyasn1_modules/rfc4334.py
platform/bq/third_party/pyasn1_modules/rfc4985.py
platform/bq/third_party/pyasn1_modules/rfc5035.py
platform/bq/third_party/pyasn1_modules/rfc5083.py
platform/bq/third_party/pyasn1_modules/rfc5084.py
platform/bq/third_party/pyasn1_modules/rfc5208.py
platform/bq/third_party/pyasn1_modules/rfc5280.py
platform/bq/third_party/pyasn1_modules/rfc5480.py
platform/bq/third_party/pyasn1_modules/rfc5649.py
platform/bq/third_party/pyasn1_modules/rfc5652.py
platform/bq/third_party/pyasn1_modules/rfc5751.py
platform/bq/third_party/pyasn1_modules/rfc5755.py
platform/bq/third_party/pyasn1_modules/rfc5913.py
platform/bq/third_party/pyasn1_modules/rfc5914.py
platform/bq/third_party/pyasn1_modules/rfc5915.py
platform/bq/third_party/pyasn1_modules/rfc5916.py
platform/bq/third_party/pyasn1_modules/rfc5917.py
platform/bq/third_party/pyasn1_modules/rfc5924.py
platform/bq/third_party/pyasn1_modules/rfc5934.py
platform/bq/third_party/pyasn1_modules/rfc5940.py
platform/bq/third_party/pyasn1_modules/rfc5958.py
platform/bq/third_party/pyasn1_modules/rfc5990.py
platform/bq/third_party/pyasn1_modules/rfc6010.py
platform/bq/third_party/pyasn1_modules/rfc6019.py
platform/bq/third_party/pyasn1_modules/rfc6031.py
platform/bq/third_party/pyasn1_modules/rfc6032.py
platform/bq/third_party/pyasn1_modules/rfc6120.py
platform/bq/third_party/pyasn1_modules/rfc6170.py
platform/bq/third_party/pyasn1_modules/rfc6187.py
platform/bq/third_party/pyasn1_modules/rfc6210.py
platform/bq/third_party/pyasn1_modules/rfc6211.py
platform/bq/third_party/pyasn1_modules/rfc6402.py
platform/bq/third_party/pyasn1_modules/rfc6482.py
platform/bq/third_party/pyasn1_modules/rfc6486.py
platform/bq/third_party/pyasn1_modules/rfc6487.py
platform/bq/third_party/pyasn1_modules/rfc6664.py
platform/bq/third_party/pyasn1_modules/rfc6955.py
platform/bq/third_party/pyasn1_modules/rfc6960.py
platform/bq/third_party/pyasn1_modules/rfc7030.py
platform/bq/third_party/pyasn1_modules/rfc7191.py
platform/bq/third_party/pyasn1_modules/rfc7229.py
platform/bq/third_party/pyasn1_modules/rfc7292.py
platform/bq/third_party/pyasn1_modules/rfc7296.py
platform/bq/third_party/pyasn1_modules/rfc7508.py
platform/bq/third_party/pyasn1_modules/rfc7585.py
platform/bq/third_party/pyasn1_modules/rfc7633.py
platform/bq/third_party/pyasn1_modules/rfc7773.py
platform/bq/third_party/pyasn1_modules/rfc7894.py
platform/bq/third_party/pyasn1_modules/rfc7906.py
platform/bq/third_party/pyasn1_modules/rfc7914.py
platform/bq/third_party/pyasn1_modules/rfc8017.py
platform/bq/third_party/pyasn1_modules/rfc8018.py
platform/bq/third_party/pyasn1_modules/rfc8103.py
platform/bq/third_party/pyasn1_modules/rfc8209.py
platform/bq/third_party/pyasn1_modules/rfc8226.py
platform/bq/third_party/pyasn1_modules/rfc8358.py
platform/bq/third_party/pyasn1_modules/rfc8360.py
platform/bq/third_party/pyasn1_modules/rfc8398.py
platform/bq/third_party/pyasn1_modules/rfc8410.py
platform/bq/third_party/pyasn1_modules/rfc8418.py
platform/bq/third_party/pyasn1_modules/rfc8419.py
platform/bq/third_party/pyasn1_modules/rfc8479.py
platform/bq/third_party/pyasn1_modules/rfc8494.py
platform/bq/third_party/pyasn1_modules/rfc8520.py
platform/bq/third_party/pyasn1_modules/rfc8619.py
platform/bq/third_party/pyasn1_modules/rfc8649.py
platform/bq/third_party/pycparser/LICENSE
platform/bq/third_party/pyglib/__init__.py
platform/bq/third_party/pyglib/appcommands.py
platform/bq/third_party/pyglib/resources.py
platform/bq/third_party/pyglib/stringutil.py
platform/bq/third_party/pyparsing/LICENSE
platform/bq/third_party/pyparsing/__init__.py
platform/bq/third_party/python_runtime/LICENSE
platform/bq/third_party/pyu2f/LICENSE
platform/bq/third_party/pyu2f/__init__.py
platform/bq/third_party/pyu2f/apdu.py
platform/bq/third_party/pyu2f/convenience/__init__.py
platform/bq/third_party/pyu2f/convenience/authenticator.py
platform/bq/third_party/pyu2f/convenience/baseauthenticator.py
platform/bq/third_party/pyu2f/convenience/customauthenticator.py
platform/bq/third_party/pyu2f/convenience/localauthenticator.py
platform/bq/third_party/pyu2f/errors.py
platform/bq/third_party/pyu2f/hardware.py
platform/bq/third_party/pyu2f/hid/__init__.py
platform/bq/third_party/pyu2f/hid/base.py
platform/bq/third_party/pyu2f/hid/linux.py
platform/bq/third_party/pyu2f/hid/macos.py
platform/bq/third_party/pyu2f/hid/windows.py
platform/bq/third_party/pyu2f/hidtransport.py
platform/bq/third_party/pyu2f/model.py
platform/bq/third_party/pyu2f/u2f.py
platform/bq/third_party/requests/LICENSE
platform/bq/third_party/requests/__init__.py
platform/bq/third_party/requests/__version__.py
platform/bq/third_party/requests/_internal_utils.py
platform/bq/third_party/requests/adapters.py
platform/bq/third_party/requests/api.py
platform/bq/third_party/requests/auth.py
platform/bq/third_party/requests/certs.py
platform/bq/third_party/requests/compat.py
platform/bq/third_party/requests/cookies.py
platform/bq/third_party/requests/exceptions.py
platform/bq/third_party/requests/help.py
platform/bq/third_party/requests/hooks.py
platform/bq/third_party/requests/models.py
platform/bq/third_party/requests/packages.py
platform/bq/third_party/requests/sessions.py
platform/bq/third_party/requests/status_codes.py
platform/bq/third_party/requests/structures.py
platform/bq/third_party/requests/utils.py
platform/bq/third_party/rsa/LICENSE
platform/bq/third_party/rsa/__init__.py
platform/bq/third_party/rsa/asn1.py
platform/bq/third_party/rsa/common.py
platform/bq/third_party/rsa/core.py
platform/bq/third_party/rsa/key.py
platform/bq/third_party/rsa/pem.py
platform/bq/third_party/rsa/pkcs1.py
platform/bq/third_party/rsa/pkcs1_v2.py
platform/bq/third_party/rsa/prime.py
platform/bq/third_party/rsa/randnum.py
platform/bq/third_party/rsa/transform.py
platform/bq/third_party/six/LICENSE
platform/bq/third_party/six/__init__.py
platform/bq/third_party/socks/LICENSE
platform/bq/third_party/socks/__init__.py
platform/bq/third_party/socks/sockshandler.py
platform/bq/third_party/tcmalloc/LICENSE
platform/bq/third_party/termcolor/LICENSE
platform/bq/third_party/termcolor/__init__.py
platform/bq/third_party/typing_extensions/LICENSE
platform/bq/third_party/typing_extensions/__init__.py
platform/bq/third_party/tz/LICENSE
platform/bq/third_party/uritemplate/LICENSE
platform/bq/third_party/uritemplate/__init__.py
platform/bq/third_party/uritemplate/api.py
platform/bq/third_party/uritemplate/orderedset.py
platform/bq/third_party/uritemplate/template.py
platform/bq/third_party/uritemplate/variable.py
platform/bq/third_party/urllib3/LICENSE
platform/bq/third_party/urllib3/__init__.py
platform/bq/third_party/urllib3/_collections.py
platform/bq/third_party/urllib3/_version.py
platform/bq/third_party/urllib3/connection.py
platform/bq/third_party/urllib3/connectionpool.py
platform/bq/third_party/urllib3/contrib/__init__.py
platform/bq/third_party/urllib3/contrib/_appengine_environ.py
platform/bq/third_party/urllib3/contrib/_securetransport/__init__.py
platform/bq/third_party/urllib3/contrib/_securetransport/bindings.py
platform/bq/third_party/urllib3/contrib/_securetransport/low_level.py
platform/bq/third_party/urllib3/contrib/appengine.py
platform/bq/third_party/urllib3/contrib/ntlmpool.py
platform/bq/third_party/urllib3/contrib/securetransport.py
platform/bq/third_party/urllib3/contrib/socks.py
platform/bq/third_party/urllib3/exceptions.py
platform/bq/third_party/urllib3/fields.py
platform/bq/third_party/urllib3/filepost.py
platform/bq/third_party/urllib3/packages/__init__.py
platform/bq/third_party/urllib3/packages/backports/__init__.py
platform/bq/third_party/urllib3/packages/backports/finalize.py
platform/bq/third_party/urllib3/packages/backports/makefile.py
platform/bq/third_party/urllib3/packages/six.py
platform/bq/third_party/urllib3/poolmanager.py
platform/bq/third_party/urllib3/request.py
platform/bq/third_party/urllib3/response.py
platform/bq/third_party/urllib3/util/__init__.py
platform/bq/third_party/urllib3/util/connection.py
platform/bq/third_party/urllib3/util/proxy.py
platform/bq/third_party/urllib3/util/queue.py
platform/bq/third_party/urllib3/util/request.py
platform/bq/third_party/urllib3/util/response.py
platform/bq/third_party/urllib3/util/retry.py
platform/bq/third_party/urllib3/util/ssl_.py
platform/bq/third_party/urllib3/util/ssl_match_hostname.py
platform/bq/third_party/urllib3/util/ssltransport.py
platform/bq/third_party/urllib3/util/timeout.py
platform/bq/third_party/urllib3/util/url.py
platform/bq/third_party/urllib3/util/wait.py
platform/bq/third_party/wcwidth/LICENSE
platform/bq/third_party/wcwidth/__init__.py
platform/bq/third_party/wcwidth/table_wide.py
platform/bq/third_party/wcwidth/table_zero.py
platform/bq/third_party/wcwidth/unicode_versions.py
platform/bq/third_party/wcwidth/wcwidth.py
platform/bq/third_party/yaml/LICENSE
platform/bq/third_party/yaml/__init__.py
platform/bq/third_party/yaml/lib2/__init__.py
platform/bq/third_party/yaml/lib2/composer.py
platform/bq/third_party/yaml/lib2/constructor.py
platform/bq/third_party/yaml/lib2/cyaml.py
platform/bq/third_party/yaml/lib2/dumper.py
platform/bq/third_party/yaml/lib2/emitter.py
platform/bq/third_party/yaml/lib2/error.py
platform/bq/third_party/yaml/lib2/events.py
platform/bq/third_party/yaml/lib2/loader.py
platform/bq/third_party/yaml/lib2/nodes.py
platform/bq/third_party/yaml/lib2/parser.py
platform/bq/third_party/yaml/lib2/reader.py
platform/bq/third_party/yaml/lib2/representer.py
platform/bq/third_party/yaml/lib2/resolver.py
platform/bq/third_party/yaml/lib2/scanner.py
platform/bq/third_party/yaml/lib2/serializer.py
platform/bq/third_party/yaml/lib2/tokens.py
platform/bq/third_party/yaml/lib3/__init__.py
platform/bq/third_party/yaml/lib3/composer.py
platform/bq/third_party/yaml/lib3/constructor.py
platform/bq/third_party/yaml/lib3/cyaml.py
platform/bq/third_party/yaml/lib3/dumper.py
platform/bq/third_party/yaml/lib3/emitter.py
platform/bq/third_party/yaml/lib3/error.py
platform/bq/third_party/yaml/lib3/events.py
platform/bq/third_party/yaml/lib3/loader.py
platform/bq/third_party/yaml/lib3/nodes.py
platform/bq/third_party/yaml/lib3/parser.py
platform/bq/third_party/yaml/lib3/reader.py
platform/bq/third_party/yaml/lib3/representer.py
platform/bq/third_party/yaml/lib3/resolver.py
platform/bq/third_party/yaml/lib3/scanner.py
platform/bq/third_party/yaml/lib3/serializer.py
platform/bq/third_party/yaml/lib3/tokens.py
platform/bq/utils/bq_api_utils.py
platform/bq/utils/bq_consts.py
platform/bq/utils/bq_error.py
platform/bq/utils/bq_error_utils.py
platform/bq/utils/bq_gcloud_utils.py
platform/bq/utils/bq_id_utils.py
platform/bq/utils/bq_logging.py
platform/bq/utils/bq_processor_utils.py
platform/bq/wrapped_credentials.py

View File

@@ -0,0 +1,38 @@
{
"components": [
{
"data": {
"contents_checksum": "7b6f3a8e589c77a004c6c793ff5bb704d2e0e18c7359e0b79d8790e85071947c",
"source": "",
"type": "tar"
},
"dependencies": [
"bq-nix",
"bq-win",
"core"
],
"details": {
"description": "Provides the bq tool for interacting with the BigQuery service.",
"display_name": "BigQuery Command Line Tool"
},
"gdu_only": false,
"id": "bq",
"is_configuration": false,
"is_hidden": false,
"is_required": false,
"platform": {},
"platform_required": false,
"version": {
"build_number": 20251212160114,
"version_string": "2.1.26"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,2 @@
bin/docker-credential-gcloud
bin/git-credential-gcloud.sh

View File

@@ -0,0 +1,46 @@
{
"components": [
{
"data": {
"contents_checksum": "03936b9be628bdb92a2c94609da221957750bc1c9e7b5ceeb9a531b57a8424ce",
"source": "",
"type": "tar"
},
"dependencies": [
"bundled-python3-unix",
"core",
"gcloud-deps",
"ssh-tools"
],
"details": {
"description": "Handles all core functionality for the Google Cloud CLI.",
"display_name": "Google Cloud CLI Core Libraries (Platform Specific)"
},
"gdu_only": false,
"id": "core-nix",
"is_configuration": false,
"is_hidden": true,
"is_required": true,
"platform": {
"operating_systems": [
"CYGWIN",
"LINUX",
"MACOSX",
"MSYS"
]
},
"platform_required": false,
"version": {
"build_number": 20251031202127,
"version_string": "2025.10.31"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,40 @@
{
"components": [
{
"data": {
"contents_checksum": "e5ca824830504d9ec1ea26c2100da2eaed3b952a84e373ba426f6a78d6a57fb4",
"source": "",
"type": "tar"
},
"dependencies": [
"bundled-python3-unix",
"core-nix",
"core-win",
"gcloud-deps",
"ssh-tools"
],
"details": {
"description": "Handles all core functionality for the Google Cloud CLI.",
"display_name": "Google Cloud CLI Core Libraries"
},
"gdu_only": false,
"id": "core",
"is_configuration": false,
"is_hidden": false,
"is_required": true,
"platform": {},
"platform_required": false,
"version": {
"build_number": 20251212160114,
"version_string": "2025.12.12"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,2 @@
bin/gcloud-crc32c
platform/gcloud-crc32c_licenses/LICENSES.txt

View File

@@ -0,0 +1,43 @@
{
"components": [
{
"data": {
"contents_checksum": "81916b45eaeacf3fbfe45ce1da0c26d97bdf6137fbf033ff9e3734b185ca4485",
"source": "",
"type": "tar"
},
"dependencies": [
"gcloud-crc32c"
],
"details": {
"description": "Command line tool that calculates CRC32C hashes on local files.",
"display_name": "Google Cloud CRC32C Hash Tool (Platform Specific)"
},
"gdu_only": false,
"id": "gcloud-crc32c-darwin-arm",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {
"architectures": [
"arm"
],
"operating_systems": [
"MACOSX"
]
},
"platform_required": false,
"version": {
"build_number": 20251031202127,
"version_string": "1.0.0"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,48 @@
{
"components": [
{
"dependencies": [
"gcloud-crc32c-darwin-arm",
"gcloud-crc32c-darwin-x86_64",
"gcloud-crc32c-linux-arm",
"gcloud-crc32c-linux-x86",
"gcloud-crc32c-linux-x86_64",
"gcloud-crc32c-windows-x86",
"gcloud-crc32c-windows-x86_64"
],
"details": {
"description": "Command line tool that calculates CRC32C hashes on local files.",
"display_name": "Google Cloud CRC32C Hash Tool"
},
"gdu_only": false,
"id": "gcloud-crc32c",
"is_configuration": false,
"is_hidden": false,
"is_required": false,
"platform": {
"architectures": [
"arm",
"x86",
"x86_64"
],
"operating_systems": [
"LINUX",
"MACOSX",
"WINDOWS"
]
},
"platform_required": false,
"version": {
"build_number": 0,
"version_string": "1.0.0"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,44 @@
{
"components": [
{
"data": {
"contents_checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"source": "",
"type": "tar"
},
"dependencies": [
"core",
"gcloud-deps"
],
"details": {
"description": "Set of third_party gcloud cli dependencies.",
"display_name": "gcloud cli dependencies (Platform Specific)"
},
"gdu_only": false,
"id": "gcloud-deps-darwin-x86_64",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {
"architectures": [
"x86_64"
],
"operating_systems": [
"MACOSX"
]
},
"platform_required": false,
"version": {
"build_number": 20210416153011,
"version_string": "2021.04.16"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,42 @@
{
"components": [
{
"data": {
"contents_checksum": "b3c03ddd545fef26a38d709f717330041494e1167f96302b73b69b97078dffeb",
"source": "",
"type": "tar"
},
"dependencies": [
"core",
"gcloud-deps-darwin-x86",
"gcloud-deps-darwin-x86_64",
"gcloud-deps-linux-x86",
"gcloud-deps-linux-x86_64",
"gcloud-deps-windows-x86",
"gcloud-deps-windows-x86_64"
],
"details": {
"description": "Set of third_party gcloud cli dependencies.",
"display_name": "gcloud cli dependencies"
},
"gdu_only": false,
"id": "gcloud-deps",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {},
"platform_required": false,
"version": {
"build_number": 20251212160114,
"version_string": "2025.12.12"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,31 @@
{
"components": [
{
"dependencies": [
"core"
],
"details": {
"description": "Default set of gcloud commands.",
"display_name": "Default set of gcloud commands"
},
"gdu_only": false,
"id": "gcloud",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {},
"platform_required": false,
"version": {
"build_number": 0,
"version_string": ""
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,44 @@
{
"components": [
{
"data": {
"contents_checksum": "e93774d79da5cfd4b40a623ce538cd687d7e5697e21cae518d927f00051ae1d0",
"source": "",
"type": "tar"
},
"dependencies": [
"core",
"gsutil"
],
"details": {
"description": "Provides the gsutil tool for interacting with Google Cloud Storage.",
"display_name": "Cloud Storage Command Line Tool (Platform Specific)"
},
"gdu_only": true,
"id": "gsutil-nix",
"is_configuration": false,
"is_hidden": true,
"is_required": false,
"platform": {
"operating_systems": [
"CYGWIN",
"LINUX",
"MACOSX",
"MSYS"
]
},
"platform_required": false,
"version": {
"build_number": 20251031202127,
"version_string": "5.35"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
{
"components": [
{
"data": {
"contents_checksum": "134cd0b20104c6b46a48c7906c57e2e4f57c6c73370966a21d0226cc3bcfaca2",
"source": "",
"type": "tar"
},
"dependencies": [
"core",
"gsutil-nix",
"gsutil-win"
],
"details": {
"description": "Provides the gsutil tool for interacting with Google Cloud Storage.",
"display_name": "Cloud Storage Command Line Tool"
},
"gdu_only": true,
"id": "gsutil",
"is_configuration": false,
"is_hidden": false,
"is_required": false,
"platform": {},
"platform_required": false,
"version": {
"build_number": 20250627154417,
"version_string": "5.35"
}
}
],
"revision": 20251212160114,
"schema_version": {
"no_update": false,
"url": "https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz",
"version": 3
},
"version": "550.0.0"
}

View File

@@ -0,0 +1,20 @@
The Google Cloud CLI and its source code are licensed under Apache
License v. 2.0 (the "License"), unless otherwise specified by an alternate
license file.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Note that if you use the Google Cloud CLI with any Google Cloud Platform
products, your use is additionally going to be governed by the license agreement
or terms of service, as applicable, of the underlying Google Cloud Platform
product with which you are using the Google Cloud CLI. For example, if you are
using the Google Cloud CLI with Google App Engine, your use would additionally
be governed by the Google App Engine Terms of Service.
This also means that if you were to create works that call Google APIs, you
would still need to agree to the terms of service (usually, Google's
Developer Terms of Service at https://developers.google.com/terms) for those
APIs separately, as this code does not grant you any special rights to use
the services.

View File

@@ -0,0 +1,9 @@
Google Cloud CLI
Copyright 2013 Google LLC. All Rights Reserved.
The Google Cloud CLI contains tools and libraries that allow you to create and
manage resources on Google's Cloud Platform, including App Engine, Compute
Engine, Cloud Storage, Cloud SQL, and BigQuery.
For more information on how to set up and use the Google Cloud CLI, please see:
https://cloud.google.com/cli

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
550.0.0

View File

@@ -0,0 +1 @@
["core", "gcloud-deps", "bq", "gcloud", "gcloud-crc32c", "gsutil"]

View File

@@ -0,0 +1,321 @@
# Copyright 2013 Google Inc. All Rights Reserved.
"""Common bootstrapping functionality used by the wrapper scripts."""
# Disables import order warning and unused import. Setup changes the python
# path so cloud sdk imports will actually work, so it must come first.
# pylint: disable=C6203
# pylint: disable=W0611
from __future__ import absolute_import
from __future__ import unicode_literals
# Python 3 is strict about imports and we use this file in different ways, which
# makes sub-imports difficult. In general, when a script is executed, that
# directory is put on the PYTHONPATH. The issue is that some of the wrapper
# scripts are executed from within the bootstrapping/ directory and some are
# executed from within the bin/ directory.
# pylint: disable=g-statement-before-imports
if '.' in __name__:
# Here, __name__ will be bootstrapping.bootstrapping. This indicates that this
# file was loaded as a member of package bootstrapping. This in turn indicates
# that the main file that was executed was not in the bootstrapping directory,
# so bin/ is on the path and bootstrapping is considered a python package.
# Do an import of setup from this current package.
from . import setup # pylint:disable=g-import-not-at-top
else:
# In this case, __name__ is bootstrapping, which indicates that the main
# script was executed from within this directory meaning that Python doesn't
# consider this a package but rather the root of the PYTHONPATH. We can't do
# the above import because since we are not in a package, the '.' doesn't
# refer to anything. Just do a direct import which will find setup on the
# PYTHONPATH (which is just this directory).
import setup # pylint:disable=g-import-not-at-top
import gcloud
import sys
# Reorder sys.path if needed right now before more modules loaded and cached
sys.path = gcloud.reorder_sys_path(sys.path)
# pylint: disable=g-import-not-at-top
import json
# pylint: enable=g-import-not-at-top
import os
import platform
from googlecloudsdk.core import config
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.core.updater import local_state
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
from six.moves import input
BOOTSTRAPPING_DIR = os.path.dirname(os.path.realpath(__file__))
BIN_DIR = os.path.dirname(BOOTSTRAPPING_DIR)
SDK_ROOT = os.path.dirname(BIN_DIR)
def DisallowIncompatiblePythonVersions():
if not platforms.PythonVersion().IsCompatible():
sys.exit(1)
def GetDecodedArgv():
return [console_attr.Decode(arg) for arg in sys.argv]
def _FullPath(tool_dir, exec_name):
return os.path.join(SDK_ROOT, tool_dir, exec_name)
def ExecutePythonTool(tool_dir, exec_name, *args):
"""Execute the given python script with the given args and command line.
Args:
tool_dir: the directory the tool is located in
exec_name: additional path to the executable under the tool_dir
*args: args for the command
"""
py_path = None # Let execution_utils resolve the path.
# Gsutil allows users to set the desired Python interpreter using a separate
# environment variable, so as to allow users to run gsutil using Python 3
# without forcing the rest of Google Cloud CLI to use Python 3 (as it would
# likely break at the time this comment was written).
extra_popen_kwargs = {}
if exec_name == 'gsutil':
gsutil_py = encoding.GetEncodedValue(os.environ, 'CLOUDSDK_GSUTIL_PYTHON')
# Since PY3, Python closes open FDs in child processes, since we need them
# open for completions to work we set the close_fds kwarg to Popen.
extra_popen_kwargs['close_fds'] = False
if gsutil_py:
py_path = gsutil_py
if exec_name == 'bq.py':
bq_py = encoding.GetEncodedValue(os.environ, 'CLOUDSDK_BQ_PYTHON')
if bq_py:
py_path = bq_py
_ExecuteTool(
execution_utils.ArgsForPythonTool(
_FullPath(tool_dir, exec_name), *args, python=py_path),
**extra_popen_kwargs)
def ExecuteJarTool(java_bin, jar_dir, jar_name, classname, flags=None, *args):
"""Execute a given jar with the given args and command line.
Args:
java_bin: str, path to the system Java binary
jar_dir: str, the directory the jar is located in
jar_name: str, file name of the jar under tool_dir
classname: str, name of the main class in the jar
flags: [str], flags for the java binary
*args: args for the command
"""
flags = flags or []
jar_path = _FullPath(jar_dir, jar_name)
classname_arg = [classname] if classname else []
java_args = ['-cp', jar_path] + flags + classname_arg + list(args)
_ExecuteTool(
execution_utils.ArgsForExecutableTool(java_bin, *java_args))
def ExecuteJavaClass(java_bin,
jar_dir,
main_jar,
main_class,
java_flags=None,
main_args=None):
"""Execute a given java class within a directory of jars.
Args:
java_bin: str, path to the system Java binary
jar_dir: str, directory of jars to put on class path
main_jar: str, main jar (placed first on class path)
main_class: str, name of the main class in the jar
java_flags: [str], flags for the java binary
main_args: args for the command
"""
java_flags = java_flags or []
main_args = main_args or []
jar_dir_path = os.path.join(SDK_ROOT, jar_dir, '*')
main_jar_path = os.path.join(SDK_ROOT, jar_dir, main_jar)
classpath = main_jar_path + os.pathsep + jar_dir_path
java_args = (['-cp', classpath]
+ list(java_flags)
+ [main_class]
+ list(main_args))
_ExecuteTool(execution_utils.ArgsForExecutableTool(java_bin, *java_args))
def ExecuteShellTool(tool_dir, exec_name, *args):
"""Execute the given bash script with the given args.
Args:
tool_dir: the directory the tool is located in
exec_name: additional path to the executable under the tool_dir
*args: args for the command
"""
_ExecuteTool(
execution_utils.ArgsForExecutableTool(_FullPath(tool_dir, exec_name),
*args))
def ExecuteCMDTool(tool_dir, exec_name, *args):
"""Execute the given batch file with the given args.
Args:
tool_dir: the directory the tool is located in
exec_name: additional path to the executable under the tool_dir
*args: args for the command
"""
_ExecuteTool(
execution_utils.ArgsForCMDTool(_FullPath(tool_dir, exec_name), *args))
def _GetToolEnv():
env = dict(os.environ)
encoding.SetEncodedValue(env, 'CLOUDSDK_WRAPPER', '1')
encoding.SetEncodedValue(env, 'CLOUDSDK_VERSION', config.CLOUD_SDK_VERSION)
encoding.SetEncodedValue(env, 'CLOUDSDK_PYTHON',
execution_utils.GetPythonExecutable())
return env
def _ExecuteTool(args, **extra_popen_kwargs):
"""Executes a new tool with the given args, plus the args from the cmdline.
Args:
args: [str], The args of the command to execute.
**extra_popen_kwargs: [dict], kwargs to be unpacked in Popen call for tool.
"""
execution_utils.Exec(
args + sys.argv[1:], env=_GetToolEnv(), **extra_popen_kwargs)
def GetDefaultInstalledComponents():
"""Gets the list of components to install by default.
Returns:
list(str), The component ids that should be installed. It will return []
if there are no default components, or if there is any error in reading
the file with the defaults.
"""
default_components_file = os.path.join(BOOTSTRAPPING_DIR,
'.default_components')
try:
with open(default_components_file) as f:
return json.load(f)
# pylint:disable=bare-except, If the file does not exist or is malformed,
# we don't want to expose this as an error. Setup will just continue
# without installing any components by default and will tell the user how
# to install the components they want manually.
except:
pass
return []
def WarnAndExitOnBlockedCommand(args, blocked_commands):
"""Block certain subcommands, warn the user, and exit.
Args:
args: the command line arguments, including the 0th argument which is
the program name.
blocked_commands: a map of blocked commands to the messages that should be
printed when they're run.
"""
bad_arg = None
for arg in args[1:]:
# Flags are skipped and --flag=value are skipped. It is possible for
# '--flag value' to result in a false positive if value happens to be a
# blocked command.
if arg and arg[0] == '-':
continue
if arg in blocked_commands:
bad_arg = arg
break
blocked = bad_arg is not None
if blocked:
sys.stderr.write('It looks like you are trying to run "%s %s".\n'
% (args[0], bad_arg))
sys.stderr.write('The "%s" command is no longer needed with '
'Google Cloud CLI.\n' % bad_arg)
sys.stderr.write(blocked_commands[bad_arg] + '\n')
answer = input('Really run this command? (y/N) ')
if answer not in ['y', 'Y']:
sys.exit(1)
def CheckUpdates(command_path):
"""Check for updates and inform the user.
Args:
command_path: str, The '.' separated path of the command that is currently
being run (i.e. gcloud.foo.bar).
"""
try:
update_manager.UpdateManager.PerformUpdateCheck(command_path=command_path)
# pylint:disable=broad-except, We never want this to escape, ever. Only
# messages printed should reach the user.
except Exception:
pass
def CommandStart(command_name, component_id=None, version=None):
"""Logs that the given command is being executed.
Args:
command_name: str, The name of the command being executed.
component_id: str, The component id that this command belongs to. Used for
version information if version was not specified.
version: str, Directly use this version instead of deriving it from
component.
"""
if version is None and component_id:
version = local_state.InstallationState.VersionForInstalledComponent(
component_id)
metrics.Executions(command_name, version)
def GetActiveProjectAndAccount():
"""Get the active project name and account for the active credentials.
For use with wrapping legacy tools that take projects and credentials on
the command line.
Returns:
(str, str), A tuple whose first element is the project, and whose second
element is the account.
"""
project_name = properties.VALUES.core.project.Get(validate=False)
account = properties.VALUES.core.account.Get(validate=False)
return (project_name, account)
def GetActiveImpersonateServiceAccount():
"""Get the active impersonate_service_account property.
For use with wrapping legacy tools that take impersonate_service_account on
the command line.
Returns:
str, The name of the service account to impersonate.
"""
return properties.VALUES.auth.impersonate_service_account.Get(validate=False)
def ReadFileContents(*path_parts):
"""Returns file content at specified relative path wrt SDK root path."""
return files.ReadFileContents(os.path.join(SDK_ROOT, *path_parts)).strip()
# Register some other sources for credentials and project.
c_store.GceCredentialProvider().Register()

View File

@@ -0,0 +1,167 @@
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
"""A convenience wrapper for starting bq."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import bootstrapping
from googlecloudsdk.api_lib.iamcredentials import util as iamcred_util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import gce
from googlecloudsdk.core.credentials import store
def _MaybeAddOption(args, name, value):
if value is None:
return
args.append('--{name}={value}'.format(name=name, value=value))
def _GetGoogleAuthFlagValue(argv):
for arg in argv[1:]:
if re.fullmatch(r'--use_google_auth(=(T|t)rue)*', arg):
return True
if re.fullmatch(r'(--nouse_google_auth|--use_google_auth=(F|f)alse)', arg):
return False
return None
def _IsOAuthAccessTokenFlagPresent(argv):
for arg in argv[1:]:
if re.fullmatch(r'--oauth_access_token=.+', arg):
return True
return False
def main():
"""Launches bq."""
version = bootstrapping.ReadFileContents('platform/bq', 'VERSION')
bootstrapping.CommandStart('bq', version=version)
blocked_commands = {
'init': 'To authenticate, run gcloud auth.',
}
argv = bootstrapping.GetDecodedArgv()
bootstrapping.WarnAndExitOnBlockedCommand(argv, blocked_commands)
cmd_args = [arg for arg in argv[1:] if not arg.startswith('-')]
use_google_auth = _GetGoogleAuthFlagValue(argv)
use_google_auth_unspecified = use_google_auth is None
nouse_google_auth = not use_google_auth and not use_google_auth_unspecified
args = []
print_logging = False
if len(cmd_args) == 1 and cmd_args[0] == 'info':
print_logging = True
# Check for credentials only if they are needed.
if (
cmd_args
and cmd_args[0] not in ('version', 'help')
and not _IsOAuthAccessTokenFlagPresent(argv)
):
store.IMPERSONATION_TOKEN_PROVIDER = (
iamcred_util.ImpersonationAccessTokenProvider()
)
creds = store.Load() # Checks if there are active credentials
project, account = bootstrapping.GetActiveProjectAndAccount()
if print_logging:
print('Project:', project)
print('Account:', account)
adc_path = config.Paths().LegacyCredentialsAdcPath(account)
single_store_path = config.Paths().LegacyCredentialsBqPath(account)
if use_google_auth:
if print_logging:
print('Using Google auth')
args = ['--use_google_auth']
elif bootstrapping.GetActiveImpersonateServiceAccount():
if print_logging:
print('Using Oauth')
args = ['--oauth_access_token', creds.token]
elif gce.Metadata() and account in gce.Metadata().Accounts():
if print_logging:
print('Using a GCE service account')
args = ['--use_gce_service_account']
elif os.path.isfile(adc_path) and nouse_google_auth:
if print_logging:
print('Using an ADC path')
args = [
'--nouse_google_auth',
'--application_default_credential_file',
adc_path,
'--credential_file',
single_store_path,
]
else:
p12_key_path = config.Paths().LegacyCredentialsP12KeyPath(account)
if os.path.isfile(p12_key_path):
if nouse_google_auth:
if print_logging:
print(
'Falling back to p12 credentials. '
'WARNING these are being deprecated.'
)
print(
'Using the deprecated P12 service account key format with legacy'
' auth may introduce security vulnerabilities and will soon be'
' unsupported. If you are unable to migrate to using the newer'
' JSON key format, file a report to inform the BQ CLI team of'
' your use case.'
)
args = [
'--nouse_google_auth',
'--service_account',
account,
'--service_account_credential_file',
single_store_path,
'--service_account_private_key_file',
p12_key_path,
]
use_client_cert = (
os.getenv('GOOGLE_API_USE_CLIENT_CERTIFICATE', 'false').upper()
== 'TRUE'
)
if use_client_cert:
if print_logging:
print('Using MTLS')
args.append('--mtls')
_MaybeAddOption(args, 'project_id', project)
bootstrapping.CheckUpdates('bq')
proxy_params = properties.VALUES.proxy
_MaybeAddOption(args, 'proxy_address', proxy_params.address.Get())
_MaybeAddOption(args, 'proxy_port', proxy_params.port.Get())
_MaybeAddOption(args, 'proxy_username', proxy_params.username.Get())
_MaybeAddOption(args, 'proxy_password', proxy_params.password.Get())
_MaybeAddOption(
args,
'disable_ssl_validation',
properties.VALUES.auth.disable_ssl_validation.GetBool(),
)
_MaybeAddOption(
args,
'ca_certificates_file',
properties.VALUES.core.custom_ca_certs_file.Get(),
)
if print_logging:
print('Args passed from gcloud:', args)
bootstrapping.ExecutePythonTool('platform/bq', 'bq.py', *args)
if __name__ == '__main__':
bootstrapping.DisallowIncompatiblePythonVersions()
try:
main()
except Exception as e: # pylint: disable=broad-except
exceptions.HandleError(e, 'bq')

View File

@@ -0,0 +1,174 @@
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
"""A convenience wrapper for starting gsutil."""
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import os
import bootstrapping
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import config
from googlecloudsdk.core import context_aware
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import gce as c_gce
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
def _MaybeAddBotoOption(args, section, name, value):
if value is None:
return
args.append('-o')
args.append('{section}:{name}={value}'.format(
section=section, name=name, value=value))
def _GetCertProviderCommand(context_config):
"""Returns the cert provider command from the context config."""
# TODO(b/190102217) - Cleanup code that handles both version of context_config
if hasattr(context_config, 'cert_provider_command'):
return context_config.cert_provider_command
try:
contents = files.ReadFileContents(context_config.config_path)
json_out = json.loads(contents)
if 'cert_provider_command' in json_out:
return json_out['cert_provider_command']
except files.Error as e:
log.debug('context aware settings discovery file %s - %s',
context_config.config_path, e)
def _AddContextAwareOptions(args):
"""Adds device certificate settings for mTLS."""
context_config = context_aware.Config()
# Enterprise certificate is not yet supported for gsutil.
if (
context_config
and context_config.config_type
== context_aware.ConfigType.ENTERPRISE_CERTIFICATE
):
return
# TODO(b/190102217) - Cleanup code that handles both version of context_config
use_client_certificate = (
context_config and
getattr(context_config, 'use_client_certificate', True))
_MaybeAddBotoOption(args, 'Credentials', 'use_client_certificate',
use_client_certificate)
if context_config:
cert_provider_command = _GetCertProviderCommand(context_config)
if isinstance(cert_provider_command, list):
# e.g. cert_provider_command = ['*/apihelper', '--print_certificate']
cert_provider_command = ' '.join(cert_provider_command)
# Don't need to pass mTLS data if gsutil shouldn't be using it.
_MaybeAddBotoOption(args, 'Credentials', 'cert_provider_command',
cert_provider_command)
def main():
"""Launches gsutil."""
args = []
project, account = bootstrapping.GetActiveProjectAndAccount()
pass_credentials = (
properties.VALUES.core.pass_credentials_to_gsutil.GetBool() and
not properties.VALUES.auth.disable_credentials.GetBool())
_MaybeAddBotoOption(args, 'GSUtil', 'default_project_id', project)
if pass_credentials:
# Allow gsutil to only check for the '1' string value, as is done
# with regard to the 'CLOUDSDK_WRAPPER' environment variable.
encoding.SetEncodedValue(
os.environ, 'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL', '1')
if account in c_gce.Metadata().Accounts():
# Tell gsutil that it should obtain credentials from the GCE metadata
# server for the instance's configured service account.
_MaybeAddBotoOption(args, 'GoogleCompute', 'service_account', 'default')
# For auth'n debugging purposes, allow gsutil to reason about whether the
# configured service account was set in a boto file or passed from here.
encoding.SetEncodedValue(
os.environ, 'CLOUDSDK_PASSED_GCE_SERVICE_ACCOUNT_TO_GSUTIL', '1')
else:
legacy_config_path = config.Paths().LegacyCredentialsGSUtilPath(account)
# We construct a BOTO_PATH that tacks the config containing our
# credentials options onto the end of the list of config paths. We ensure
# the other credential options are loaded first so that ours will take
# precedence and overwrite them.
boto_config = encoding.GetEncodedValue(os.environ, 'BOTO_CONFIG', '')
boto_path = encoding.GetEncodedValue(os.environ, 'BOTO_PATH', '')
if boto_config:
boto_path = os.pathsep.join([boto_config, legacy_config_path])
elif boto_path:
boto_path = os.pathsep.join([boto_path, legacy_config_path])
else:
path_parts = ['/etc/boto.cfg',
os.path.expanduser(os.path.join('~', '.boto')),
legacy_config_path]
boto_path = os.pathsep.join(path_parts)
encoding.SetEncodedValue(os.environ, 'BOTO_CONFIG', None)
encoding.SetEncodedValue(os.environ, 'BOTO_PATH', boto_path)
# Tell gsutil whether gcloud analytics collection is enabled.
encoding.SetEncodedValue(
os.environ, 'GA_CID', metrics.GetCIDIfMetricsEnabled())
# Set proxy settings. Note that if these proxy settings are configured in a
# boto config file, the options here will be loaded afterward, overriding
# them.
proxy_params = properties.VALUES.proxy
proxy_address = proxy_params.address.Get()
if proxy_address:
_MaybeAddBotoOption(args, 'Boto', 'proxy', proxy_address)
_MaybeAddBotoOption(args, 'Boto', 'proxy_port', proxy_params.port.Get())
_MaybeAddBotoOption(args, 'Boto', 'proxy_rdns', proxy_params.rdns.GetBool())
_MaybeAddBotoOption(args, 'Boto', 'proxy_user', proxy_params.username.Get())
_MaybeAddBotoOption(args, 'Boto', 'proxy_pass', proxy_params.password.Get())
# Set SSL-related settings.
disable_ssl = properties.VALUES.auth.disable_ssl_validation.GetBool()
_MaybeAddBotoOption(args, 'Boto', 'https_validate_certificates',
None if disable_ssl is None else not disable_ssl)
_MaybeAddBotoOption(args, 'Boto', 'ca_certificates_file',
properties.VALUES.core.custom_ca_certs_file.Get())
# Sync device certificate settings for mTLS.
_AddContextAwareOptions(args)
# Note that the original args to gsutil will be appended after the args we've
# supplied here.
bootstrapping.ExecutePythonTool('platform/gsutil', 'gsutil', *args)
if __name__ == '__main__':
try:
version = bootstrapping.ReadFileContents('platform/gsutil', 'VERSION')
bootstrapping.CommandStart('gsutil', version=version)
blocked_commands = {
'update': 'To update, run: gcloud components update',
}
argv = bootstrapping.GetDecodedArgv()
bootstrapping.WarnAndExitOnBlockedCommand(argv, blocked_commands)
# Don't call bootstrapping.PreRunChecks because anonymous access is
# supported for some endpoints. gsutil will output the appropriate
# error message upon receiving an authentication error.
bootstrapping.CheckUpdates('gsutil')
main()
except Exception as e: # pylint: disable=broad-except
exceptions.HandleError(e, 'gsutil')

View File

@@ -0,0 +1,358 @@
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
"""Do initial setup for the Cloud CLI."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import bootstrapping
# pylint:disable=g-bad-import-order
import argparse
import os
import sys
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import config
from googlecloudsdk.core import platforms_install
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.updater import python_manager
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import platforms
from googlecloudsdk import gcloud_main
# pylint:disable=superfluous-parens
_CLI = gcloud_main.CreateCLI([])
def ParseArgs():
"""Parse args for the installer, so interactive prompts can be avoided."""
def Bool(s):
return s.lower() in ['true', '1']
parser = argparse.ArgumentParser()
parser.add_argument(
'--usage-reporting',
default=None,
type=Bool,
help='(true/false) Enable anonymous usage reporting.',
)
parser.add_argument(
'--screen-reader',
default=None,
type=Bool,
help='(true/false) Enable screen reader mode.',
)
parser.add_argument(
'--universe-domain',
default=None,
help=(
'Universe domain to default to. If specified, sets the'
' [core/universe_domain] property installation-wide.'
),
)
parser.add_argument(
'--rc-path',
help=(
'Profile to update with PATH and completion. If'
' given without --command-completion or'
' --path-update in "quiet" mode, a line will be'
' added to this profile for both command completion'
' and path updating.'
),
)
parser.add_argument(
'--command-completion',
'--bash-completion',
default=None,
type=Bool,
help=(
'(true/false) Add a line for command completion in'
' the profile. In "quiet" mode, if True and you do'
' not provide--rc-path, the default profile'
' will be updated.'
),
)
parser.add_argument(
'--path-update',
default=None,
type=Bool,
help=(
'(true/false) Add a line for path updating in the'
' profile. In "quiet" mode, if True and you do not'
' provide --rc-path, the default profile will be'
' updated.'
),
)
parser.add_argument(
'--disable-installation-options',
action='store_true',
help='DEPRECATED. This flag is no longer used.',
)
parser.add_argument(
'--override-components',
nargs='*',
help=(
'Override the components that would be installed by '
'default and install these instead.'
),
)
parser.add_argument(
'--additional-components',
nargs='+',
help=(
'Additional components to install by default. These'
' components will either be added to the default install '
'list, or to the override-components (if provided).'
),
)
parser.add_argument(
'--update-installed-components',
action='store_true',
help=(
'Update previously installed components. Checks the install folder '
'to make sure that there are no installed components that need '
'to be updated. This is currently just used for updating gcloud '
'via homebrew.'
)
)
# Must have a None default so properties are not always overridden when the
# arg is not provided.
parser.add_argument(
'--quiet',
'-q',
default=None,
action=actions.StoreConstProperty(
properties.VALUES.core.disable_prompts, True
),
help=(
'Disable all interactive prompts. If input is '
'required, defaults will be used or an error will be '
'raised'
),
)
parser.add_argument(
'--install-python',
default=True,
type=Bool,
help='(true/false) Attempt to install Python. MacOS only.',
)
parser.add_argument(
'--no-compile-python',
action='store_false',
help=(
'False. If set, skips python compilation after component'
' installation.'
),
)
return parser.parse_args(bootstrapping.GetDecodedArgv()[1:])
def Prompts(usage_reporting, universe_domain):
"""Display prompts to opt out of usage reporting.
Args:
usage_reporting: bool, If True, enable usage reporting. If None, check the
environmental variable. If None, check if its alternate release channel.
If not, ask.
universe_domain: str, if specified and not 'googleapis.com', set
usage-reporting to False.
"""
if usage_reporting is None:
if (
encoding.GetEncodedValue(
os.environ, 'CLOUDSDK_CORE_DISABLE_USAGE_REPORTING'
)
is not None
):
usage_reporting = not encoding.GetEncodedValue(
os.environ, 'CLOUDSDK_CORE_DISABLE_USAGE_REPORTING'
)
else:
if (
universe_domain is not None
and universe_domain != properties.VALUES.core.universe_domain.default
):
usage_reporting = False
elif config.InstallationConfig.Load().IsAlternateReleaseChannel():
usage_reporting = True
print("""
Usage reporting is always on for alternate release channels.
""")
else:
print("""
To help improve the quality of this product, we collect anonymized usage data
and anonymized stacktraces when crashes are encountered; additional information
is available at <https://cloud.google.com/sdk/usage-statistics>. This data is
handled in accordance with our privacy policy
<https://cloud.google.com/terms/cloud-privacy-notice>. You may choose to opt in this
collection now (by choosing 'Y' at the below prompt), or at any time in the
future by running the following command:
gcloud config set disable_usage_reporting false
""")
usage_reporting = console_io.PromptContinue(
prompt_string='Do you want to help improve the Google Cloud CLI',
default=False)
properties.PersistProperty(
properties.VALUES.core.disable_usage_reporting, not usage_reporting,
scope=properties.Scope.INSTALLATION)
def GetInstalledComponents():
# Check if .install folder already has components installed
platform = platforms.Platform.Current()
manager = update_manager.UpdateManager(platform_filter=platform, warn=False)
installed_components = manager.GetCurrentVersionsInformation()
return list(installed_components.keys())
def Install(
override_components, update_installed_components,
additional_components, compile_python):
"""Do the normal installation of the Cloud CLI."""
# Install the OS specific wrapper scripts for gcloud and any pre-configured
# components for the CLI.
to_install = (override_components if override_components is not None
else bootstrapping.GetDefaultInstalledComponents())
# If there are components that are to be installed by default, this means we
# are working with an incomplete Cloud CLI package. This comes from the curl
# installer or the Windows installer or downloading a seed directly. In this
# case, we will update to the latest version of the CLI. If there are no
# default components, this is a fully packaged CLI. If there are additional
# components requested, just install them without updating the version.
update = bool(to_install)
# If gcloud was previously installed, there may still be some old installed
# components. Ensure those components are up to date. This ensures
# upgrades outside of `gcloud components update` still update installed
# components.
if (update_installed_components and
(installed_components := GetInstalledComponents())):
to_install.extend(installed_components)
if additional_components:
to_install.extend(additional_components)
InstallOrUpdateComponents(to_install, compile_python, update=update)
# Show the list of components if there were no pre-configured ones.
if not to_install:
_CLI.Execute(['--quiet', 'components', 'list'])
def ReInstall(component_ids, compile_python):
"""Do a forced reinstallation of Google Cloud CLI.
Args:
component_ids: [str], The components that should be automatically installed.
compile_python: bool, False if we skip compile python
"""
to_install = bootstrapping.GetDefaultInstalledComponents()
to_install.extend(component_ids)
# We always run in update mode here because we are reinstalling and trying
# to get the latest version anyway.
InstallOrUpdateComponents(component_ids, compile_python, update=True)
def InstallOrUpdateComponents(component_ids, compile_python, update):
"""Installs or updates the given components.
Args:
component_ids: [str], The components to install or update.
compile_python: bool, False if we skip compile python
update: bool, True if we should run update, False to run install. If there
are no components to install, this does nothing unless in update mode (in
which case everything gets updated).
"""
# If we are in installation mode, and there are no specific components to
# install, there is nothing to do. If there are no components in update mode
# things will still get updated to latest.
if not update and not component_ids:
return
print(
"""
This will install all the core command line tools necessary for working with
the Google Cloud Platform.
"""
)
verb = 'update' if update else 'install'
execute_arg_list = ['--quiet', 'components', verb]
if not compile_python:
execute_arg_list.append('--no-compile-python')
else:
execute_arg_list.append('--compile-python')
_CLI.Execute(
execute_arg_list + component_ids
)
def main():
properties.VALUES.context_aware.use_client_certificate.Set(False)
pargs = ParseArgs()
if pargs.screen_reader is not None:
properties.PersistProperty(properties.VALUES.accessibility.screen_reader,
pargs.screen_reader,
scope=properties.Scope.INSTALLATION)
if pargs.universe_domain is not None:
properties.PersistProperty(properties.VALUES.core.universe_domain,
pargs.universe_domain,
scope=properties.Scope.INSTALLATION)
update_manager.RestartIfUsingBundledPython(sdk_root=config.Paths().sdk_root,
command=__file__)
reinstall_components = encoding.GetEncodedValue(
os.environ, 'CLOUDSDK_REINSTALL_COMPONENTS')
try:
if reinstall_components:
ReInstall(reinstall_components.split(','), pargs.no_compile_python)
else:
Prompts(pargs.usage_reporting, pargs.universe_domain)
bootstrapping.CommandStart('INSTALL', component_id='core')
if not config.INSTALLATION_CONFIG.disable_updater:
Install(
pargs.override_components,
pargs.update_installed_components,
pargs.additional_components,
pargs.no_compile_python,
)
platforms_install.UpdateRC(
completion_update=pargs.command_completion,
path_update=pargs.path_update,
rc_path=pargs.rc_path,
bin_path=bootstrapping.BIN_DIR,
sdk_root=bootstrapping.SDK_ROOT,
)
if pargs.install_python:
python_manager.PromptAndInstallPythonOnMac()
print("""\
For more information on how to get started, please visit:
https://cloud.google.com/sdk/docs/quickstarts
""")
except exceptions.ToolException as e:
print(e)
sys.exit(1)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,50 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
"""A convenience wrapper for starting dev_appserver for appengine for Java."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import bootstrapping
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core.updater import update_manager
# Path to the jar's directory relative to the SDK root
_JAR_DIR = os.path.join('platform', 'google_appengine', 'google', 'appengine',
'tools', 'java', 'lib')
# Filename of the jar
_JAR_NAME = 'appengine-tools-api.jar'
# Flags, (enable assertions)
_FLAGS = ['-ea']
# Name of the main class
_CLASSNAME = 'com.google.appengine.tools.KickStart'
# Additional arguments, comes before sys.argv.
# The KickStart main class accepts this classname as its first arg
_ARGS = [
'com.google.appengine.tools.development.DevAppServerMain',
'--promote_yaml'
]
def main():
"""Launches the Java dev_appserver 1."""
update_manager.UpdateManager.EnsureInstalledAndRestart(
['app-engine-java'],
command=__file__)
java_bin = java.RequireJavaInstalled('Java local development server')
bootstrapping.ExecuteJarTool(
java_bin, _JAR_DIR, _JAR_NAME, _CLASSNAME, _FLAGS, *_ARGS)
if __name__ == '__main__':
bootstrapping.DisallowIncompatiblePythonVersions()
bootstrapping.CommandStart('dev_appserver_java', component_id='core')
bootstrapping.CheckUpdates('dev_appserver_java')
main()

View File

@@ -0,0 +1,66 @@
# Copyright 2013 Google Inc. All Rights Reserved.
"""Does some initial setup and checks for all the bootstrapping scripts."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
# We don't want to import any libraries at this point so we handle py2/3
# manually.
SITE_PACKAGES = 'CLOUDSDK_PYTHON_SITEPACKAGES'
VIRTUAL_ENV = 'VIRTUAL_ENV'
if sys.version_info[0] == 2:
SITE_PACKAGES = SITE_PACKAGES.encode('utf-8')
VIRTUAL_ENV = VIRTUAL_ENV.encode('utf-8')
# If we're in a virtualenv, always import site packages. Also, upon request.
# We can't import anything from googlecloudsdk here so we are just going to
# assume no one has done anything as silly as to put any unicode in either of
# these env vars.
import_site_packages = (os.environ.get(SITE_PACKAGES) or
os.environ.get(VIRTUAL_ENV))
if import_site_packages:
# pylint:disable=unused-import
# pylint:disable=g-import-not-at-top
import site
# Put Google Cloud CLI libs on the path
root_dir = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', '..'))
lib_dir = os.path.join(root_dir, 'lib')
third_party_dir = os.path.join(lib_dir, 'third_party')
sys.path = [lib_dir, third_party_dir] + sys.path
# When python is not invoked with the -S option, it can preload google module
# via .pth file setting its __path__. After this happens, our vendored google
# package may not in the __path__. After our vendored dependency directory is
# put at the first place in the sys.path, google module should be reloaded,
# so that our vendored copy can be preferred.
if 'google' in sys.modules:
import google # pylint: disable=g-import-not-at-top
try:
reload(google)
except NameError:
import importlib # pylint: disable=g-import-not-at-top
importlib.reload(google)
# pylint: disable=g-import-not-at-top
from googlecloudsdk.core.util import platforms
# Add more methods to this list for universal checks that need to be performed
def DoAllRequiredChecks():
if not platforms.PythonVersion().IsCompatible():
sys.exit(1)
DoAllRequiredChecks()

View File

@@ -0,0 +1,199 @@
#!/bin/sh
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
"$CLOUDSDK_BQ_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/bin/bootstrapping/bq.py" "$@"

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
"""A convenience wrapper for starting dev_appserver for appengine for python."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
from bootstrapping import bootstrapping
from googlecloudsdk.api_lib.app import wrapper_util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.emulators import datastore_util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import metrics
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
def main():
"""Launches dev_appserver.py."""
argv = bootstrapping.GetDecodedArgv()
runtimes = wrapper_util.GetRuntimes(argv[1:])
options = wrapper_util.ParseDevAppserverFlags(sys.argv[1:])
if options.support_datastore_emulator:
java.RequireJavaInstalled(datastore_util.DATASTORE_TITLE, min_version=8)
components = wrapper_util.GetComponents(runtimes)
components.append('cloud-datastore-emulator')
update_manager.UpdateManager.EnsureInstalledAndRestart(
components,
command=__file__)
args = [
'--skip_sdk_update_check=True'
]
google_analytics_client_id = metrics.GetCIDIfMetricsEnabled()
google_analytics_user_agent = metrics.GetUserAgentIfMetricsEnabled()
if google_analytics_client_id:
args.extend([
'--google_analytics_client_id={}'.format(google_analytics_client_id),
'--google_analytics_user_agent={}'.format(google_analytics_user_agent)
])
# Pass the path to cloud datastore emulator to dev_appserver.
# realpath is needed in the case where __file__ is a path containing symlinks.
sdk_root = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
emulator_dir = os.path.join(sdk_root, 'platform', 'cloud-datastore-emulator')
emulator_script = (
'cloud_datastore_emulator.cmd' if platforms.OperatingSystem.IsWindows()
else 'cloud_datastore_emulator')
args.append('--datastore_emulator_cmd={}'.format(
os.path.join(emulator_dir, emulator_script)))
bootstrapping.ExecutePythonTool(
os.path.join('platform', 'google_appengine'), 'dev_appserver.py', *args)
if __name__ == '__main__':
bootstrapping.DisallowIncompatiblePythonVersions()
try:
bootstrapping.CommandStart('dev_appserver', component_id='core')
bootstrapping.CheckUpdates('dev_appserver')
main()
except Exception as e: # pylint: disable=broad-except
exceptions.HandleError(e, 'dev_appserver')

View File

@@ -0,0 +1,201 @@
#!/bin/sh
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=1
"${CLOUDSDK_ROOT_DIR}/bin/gcloud" auth docker-helper "$@"

View File

@@ -0,0 +1,199 @@
#!/bin/sh
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
exec "$CLOUDSDK_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/lib/gcloud.py" "$@"

View File

@@ -0,0 +1,210 @@
#!/bin/sh
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This Git credential helper (see gitcredentials(7)) can be used for accessing
# Google Cloud Source Repositories:
# https://cloud.google.com/source-repositories/docs/adding-repositories-as-remotes
#
# It is also possible to configure Git to use gcloud as a credential helper
# directly, as "gcloud source repos clone" does:
#
# [credential "https://source.developers.google.com"]
# helper = !gcloud auth git-helper --ignore-unknown \"$@\"
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=1
"${CLOUDSDK_ROOT_DIR}/bin/gcloud" auth git-helper --ignore-unknown "$@"

View File

@@ -0,0 +1,201 @@
#!/bin/sh
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
CLOUDSDK_PYTHON_SITEPACKAGES=1
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
"$CLOUDSDK_GSUTIL_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/bin/bootstrapping/gsutil.py" "$@"

View File

@@ -0,0 +1,199 @@
#!/bin/sh
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
exec "$CLOUDSDK_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/bin/bootstrapping/java_dev_appserver.py" "$@"

View File

@@ -0,0 +1,82 @@
_python_argcomplete() {
local IFS=' '
local prefix=
typeset -i n
(( lastw=${#COMP_WORDS[@]} -1))
if [[ ${COMP_WORDS[lastw]} == --*=* ]]; then
# for bash version 3.2
flag=${COMP_WORDS[lastw]%%=*}
set -- "$1" "$2" '='
elif [[ $3 == '=' ]]; then
flag=${COMP_WORDS[-3]}
fi
if [[ $3 == ssh && $2 == *@* ]] ;then
# handle ssh user@instance specially
prefix=${2%@*}@
COMP_LINE=${COMP_LINE%$2}"${2#*@}"
elif [[ $3 == '=' ]] ; then
# handle --flag=value
prefix=$flag=$2
line=${COMP_LINE%$prefix};
COMP_LINE=$line${prefix/=/ };
prefix=
fi
if [[ $2 == *,* ]]; then
# handle , separated list
prefix=${2%,*},
set -- "$1" "${2#$prefix}" "$3"
COMP_LINE==${COMP_LINE%$prefix*}$2
fi
# Treat --flag=<TAB> as --flag <TAB> to work around bash 4.x bug
if [[ ${COMP_LINE} == *= && ${COMP_WORDS[-2]} == --* ]]; then
COMP_LINE=${COMP_LINE%=}' '
fi
COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) )
if [[ $? != 0 ]]; then
unset COMPREPLY
return
fi
if [[ $prefix != '' ]]; then
for ((n=0; n < ${#COMPREPLY[@]}; n++)); do
COMPREPLY[$n]=$prefix${COMPREPLY[$n]}
done
fi
for ((n=0; n < ${#COMPREPLY[@]}; n++)); do
match=${COMPREPLY[$n]%' '}
if [[ $match != '' ]]; then
COMPREPLY[$n]=${match//? /' '}' '
fi
done
# if flags argument has a single completion and ends in '= ', delete ' '
if [[ ${#COMPREPLY[@]} == 1 && ${COMPREPLY[0]} == -* &&
${COMPREPLY[0]} == *'= ' ]]; then
COMPREPLY[0]=${COMPREPLY[0]%' '}
fi
}
complete -o nospace -F _python_argcomplete "gcloud"
_completer() {
command=$1
name=$2
eval '[[ "$'"${name}"'_COMMANDS" ]] || '"${name}"'_COMMANDS="$('"${command}"')"'
set -- $COMP_LINE
shift
while [[ $1 == -* ]]; do
shift
done
[[ $2 ]] && return
grep -q "${name}\s*$" <<< $COMP_LINE &&
eval 'COMPREPLY=($'"${name}"'_COMMANDS)' &&
return
[[ "$COMP_LINE" == *" " ]] && return
[[ $1 ]] &&
eval 'COMPREPLY=($(echo "$'"${name}"'_COMMANDS" | grep ^'"$1"'))'
}
unset bq_COMMANDS
_bq_completer() {
_completer "CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=1 bq help | grep '^[^ ][^ ]* ' | sed 's/ .*//'" bq
}
complete -F _bq_completer bq
complete -o nospace -F _python_argcomplete gsutil

View File

@@ -0,0 +1,64 @@
autoload -U +X bashcompinit && bashcompinit
zmodload -i zsh/parameter
if ! (( $+functions[compdef] )) ; then
autoload -U +X compinit && compinit
fi
_python_argcomplete() {
local prefix=
if [[ $COMP_LINE == 'gcloud '* ]]; then
if [[ $3 == ssh && $2 == *@* ]] ;then
# handle ssh user@instance specially
prefix=${2%@*}@
COMP_LINE=${COMP_LINE%$2}"${2#*@}"
elif [[ $2 == *'='* ]] ; then
# handle --flag=value
prefix=${2%=*}'='
COMP_LINE=${COMP_LINE%$2}${2/'='/' '}
fi
fi
local IFS=' '
COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) )
if [[ $? != 0 ]]; then
unset COMPREPLY
return
fi
# if one completion without a trailing space, add the space
if [[ ${#COMPREPLY[@]} == 1 && $COMPREPLY != *[=' '] ]]; then
COMPREPLY+=' '
fi
if [[ $prefix != '' ]]; then
typeset -i n
for ((n=0; n < ${#COMPREPLY[@]}; n++));do
COMPREPLY[$n]=$prefix${COMPREPLY[$n]}
done
fi
}
complete -o nospace -o default -F _python_argcomplete "gcloud"
_completer() {
command=$1
name=$2
eval '[[ -n "$'"${name}"'_COMMANDS" ]] || '"${name}"'_COMMANDS="$('"${command}"')"'
set -- $COMP_LINE
shift
while [[ $1 == -* ]]; do
shift
done
[[ -n "$2" ]] && return
grep -q "${name}\s*$" <<< $COMP_LINE &&
eval 'COMPREPLY=($'"${name}"'_COMMANDS)' &&
return
[[ "$COMP_LINE" == *" " ]] && return
[[ -n "$1" ]] &&
eval 'COMPREPLY=($(echo "$'"${name}"'_COMMANDS" | grep ^'"$1"'))'
}
unset bq_COMMANDS
_bq_completer() {
_completer "CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=1 bq help | grep '^[^ ][^ ]* ' | sed 's/ .*//'" bq
}
complete -o default -F _bq_completer bq
complete -o nospace -F _python_argcomplete gsutil

View File

@@ -0,0 +1,985 @@
{
"CLI_VERSION": "2.1.26",
"VERSION": "1",
"capsule": "",
"commands": {
"add-iam-policy-binding": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"add-iam-policy-binding"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Add a binding to a BigQuery resource's policy in IAM.\n\nUsage:\nadd-iam-policy-binding --member=<member>\n--role=<role> <identifier>\n\nOne binding consists of a member and a role, which\nare specified with (required) flags.\n",
"EXAMPLES": "\nbq add-iam-policy-binding \\\n--member='user:myaccount@gmail.com' \\\n--role='roles/bigquery.dataViewer' \\\ntable1\n\nbq add-iam-policy-binding \\\n--member='serviceAccount:my.service.account@my-\ndomain.com' \\\n--role='roles/bigquery.dataEditor' \\\nproject1:dataset1.table1\n\nbq add-iam-policy-binding \\\n--member='allAuthenticatedUsers' \\\n--role='roles/bigquery.dataViewer' \\\n--project_id=proj -t ds.table1\n"
}
},
"cancel": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"cancel"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Request a cancel and waits for the job to be\ncancelled.\n\nRequests a cancel and then either: a) waits until the\njob is done if the sync flag is set [default], or b)\nreturns immediately if the sync flag is not set. Not\nall job types support a cancel, an error is returned\nif it cannot be cancelled. Even for jobs that support\na cancel, success is not guaranteed, the job may have\ncompleted by the time the cancel request is noticed,\nor the job may be in a stage where it cannot be\ncancelled.\n",
"EXAMPLES": "bq cancel job_id # Requests a cancel and waits until\nthe job is done.\nbq --nosync cancel job_id # Requests a cancel and\nreturns immediately.\n"
}
},
"cp": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"cp"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Copies one table to another.\n",
"EXAMPLES": "bq cp dataset.old_table dataset2.new_table\nbq cp --destination_kms_key=kms_key dataset.old_table\ndataset2.new_table\n"
}
},
"extract": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"extract"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Perform an extract operation of source into\ndestination_uris.\n\nUsage:\nextract <source_table> <destination_uris>\n\nUse -m option to extract a source_model.\n",
"EXAMPLES": "bq extract ds.table gs://mybucket/table.csv\nbq extract -m ds.model gs://mybucket/model\n"
}
},
"get-iam-policy": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"get-iam-policy"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Get the IAM policy for a resource.\n\nGets the IAM policy for a dataset, table, routine,\nconnection, or reservation resource, and prints it to\nstdout. The policy is in JSON format.\n\nUsage: get-iam-policy <identifier>\n",
"EXAMPLES": "bq get-iam-policy ds.table1\nbq get-iam-policy --project_id=proj -t ds.table1\nbq get-iam-policy proj:ds.table1\nbq get-iam-policy --reservation proj:ds.reservation1\n"
}
},
"head": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"head"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Displays rows in a table.\n",
"EXAMPLES": "bq head dataset.table\nbq head -j job\nbq head -n 10 dataset.table\nbq head -s 5 -n 10 dataset.table\n"
}
},
"help": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"help"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Help for all or selected command:\nbq help [<command>]\n\nTo retrieve help with global flags:\nbq --help\n\nTo retrieve help with flags only from the main\nmodule:\nbq --helpshort [<command>]\n"
}
},
"info": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"info"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Return the execution information of bq.\n"
}
},
"init": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"init"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Authenticate and create a default .bigqueryrc file.\n"
}
},
"insert": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"insert"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Inserts rows in a table.\n\nInserts the records formatted as newline delimited\nJSON from file into the specified table. If file is\nnot specified, reads from stdin. If there were any\ninsert errors it prints the errors to stdout.\n",
"EXAMPLES": "bq insert dataset.table /tmp/mydata.json\necho '{\"a\":1, \"b\":2}' | bq insert dataset.table\n\nTemplate table examples: Insert to\ndataset.table_suffix table using dataset.table table\nas its template.\nbq insert -x=_suffix dataset.table /tmp/mydata.json\n"
}
},
"load": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"load"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Perform a load operation of source into\ndestination_table.\n\nUsage:\nload <destination_table> <source> [<schema>]\n[--session_id=[session]]\n\nThe <destination_table> is the fully-qualified table\nname of table to create, or append to if the table\nalready exists.\n\nTo load to a temporary table, specify the table name\nin <destination_table> without a dataset and specify\nthe session id with --session_id.\n\nThe <source> argument can be a path to a single local\nfile, or a comma-separated list of URIs.\n\nThe <schema> argument should be either the name of a\nJSON file or a text schema. This schema should be\nomitted if the table already has one.\n\nIn the case that the schema is provided in text form,\nit should be a comma-separated list of entries of the\nform name[:type], where type will default to string\nif not specified.\n\nIn the case that <schema> is a filename, it should be\na JSON file containing a single array, each entry of\nwhich should be an object with properties 'name',\n'type', and (optionally) 'mode'. For more detail: htt\nps://cloud.google.com/bigquery/docs/schemas#specifyin\ng_a_json_schema_file\n\nNote: the case of a single-entry schema with no type\nspecified is ambiguous; one can use name:string to\nforce interpretation as a text schema.\n",
"EXAMPLES": "bq load ds.new_tbl ./info.csv ./info_schema.json\nbq load ds.new_tbl gs://mybucket/info.csv\n./info_schema.json\nbq load ds.small gs://mybucket/small.csv\nname:integer,value:string\nbq load ds.small gs://mybucket/small.csv\nfield1,field2,field3\nbq load temp_tbl --session_id=my_session ./info.csv\n./info_schema.json\n"
}
},
"ls": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"ls"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "List the objects contained in the named collection.\n\nList the objects in the named project or dataset. A\ntrailing : or . can be used to signify a project or\ndataset.\n* With -j, show the jobs in the named project.\n* With -p, show all projects.\n",
"EXAMPLES": "bq ls\nbq ls -j proj\nbq ls -j --filter:'states:RUNNING,PENDING' proj\nbq ls -p -n 1000\nbq ls mydataset\nbq ls -a\nbq ls -m mydataset\nbq ls --routines mydataset\nbq ls --row_access_policies mytable (requires\nwhitelisting)\nbq ls --filter labels.color:red\nbq ls --filter 'labels.color:red labels.size:*'\nbq ls --transfer_config --transfer_location='us'\n--filter='dataSourceIds:play,adwords'\nbq ls --transfer_run\n--filter='states:SUCCEEDED,PENDING'\n--run_attempt='LATEST'\nprojects/p/locations/l/transferConfigs/c\nbq ls --transfer_log\n--message_type='messageTypes:INFO,ERROR'\nprojects/p/locations/l/transferConfigs/c/runs/r\nbq ls --capacity_commitment --project_id=proj\n--location='us'\nbq ls --reservation --project_id=proj --location='us'\nbq ls --reservation_assignment --project_id=proj\n--location='us'\nbq ls --reservation_assignment --project_id=proj\n--location='us'\n<reservation_id>\nbq ls --reservation_group --project_id=proj\n--location='us'\nbq ls --connection --project_id=proj --location=us\nbq ls --migration_workflow --project_id=proj\n--location=us\n"
}
},
"mk": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"mk"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Create a dataset, table, view, or transfer\nconfiguration with this name.\n\nSee 'bq help mk' for more information.\n",
"EXAMPLES": "bq mk new_dataset\nbq mk new_dataset.new_table\nbq --dataset_id=new_dataset mk table\nbq mk -t new_dataset.newtable\nname:integer,value:string\nbq mk --view='select 1 as num' new_dataset.newview\n(--view_udf_resource=path/to/file.js)\nbq mk --materialized_view='select sum(x) as sum_x\nfrom dataset.table'\nnew_dataset.newview\nbq mk -d --data_location=EU new_dataset\nbq mk -d --source_dataset=src_dataset new_dataset\n(requires allowlisting)\nbq mk -d\n--external_source=aws-\nglue://<aws_arn_of_glue_database>\n--connection_id=<connection>\nnew_dataset\nbq mk --transfer_config --target_dataset=dataset\n--display_name=name\n-p='{\"param\":\"value\"}' --data_source=source\n--schedule_start_time={schedule_start_time}\n--schedule_end_time={schedule_end_time}\nbq mk --transfer_run --start_time={start_time}\n--end_time={end_time}\nprojects/p/locations/l/transferConfigs/c\nbq mk --transfer_run --run_time={run_time}\nprojects/p/locations/l/transferConfigs/c\nbq mk --reservation --project_id=project\n--location=us reservation_name\nbq mk --reservation_assignment\n--reservation_id=project:us.dev\n--job_type=QUERY --assignee_type=PROJECT\n--assignee_id=myproject\nbq mk --reservation_assignment\n--reservation_id=project:us.dev\n--job_type=QUERY --assignee_type=FOLDER\n--assignee_id=123\nbq mk --reservation_assignment\n--reservation_id=project:us.dev\n--job_type=QUERY --assignee_type=ORGANIZATION\n--assignee_id=456\nbq mk --reservation_group --project_id=project\n--location=us\nreservation_group_name\nbq mk --connection --connection_type='CLOUD_SQL'\n--properties='{\"instanceId\" : \"instance\",\n\"database\" : \"db\", \"type\" : \"MYSQL\" }'\n--connection_credential='{\"username\":\"u\",\n\"password\":\"p\"}'\n--project_id=proj --location=us --display_name=name\nnew_connection\nbq mk --row_access_policy --policy_id=new_policy\n--target_table='existing_dataset.existing_table'\n--grantees='user:user1@google.com,group:group1@google\n.com'\n--filter_predicate='Region=\"US\"'\nbq mk --source=file.json\nbq mk --migration_workflow --location=us\n--config_file=file.json\n"
}
},
"mkdef": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"mkdef"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Emits a definition in JSON for an external table,\nsuch as GCS.\n\nThe output of this command can be redirected to a\nfile and used for the external_table_definition flag\nwith the \"bq query\" and \"bq mk\" commands. It produces\na definition with the most commonly used values for\noptions. You can modify the output to override option\nvalues.\n\nThe <source_uris> argument is a comma-separated list\nof URIs indicating the data referenced by this\nexternal table.\n\nThe <schema> argument should be either the name of a\nJSON file or a text schema.\n\nIn the case that the schema is provided in text form,\nit should be a comma-separated list of entries of the\nform name[:type], where type will default to string\nif not specified.\n\nIn the case that <schema> is a filename, it should be\na JSON file containing a single array, each entry of\nwhich should be an object with properties 'name',\n'type', and (optionally) 'mode'. For more detail: htt\nps://cloud.google.com/bigquery/docs/schemas#specifyin\ng_a_json_schema_file\n\nNote: the case of a single-entry schema with no type\nspecified is ambiguous; one can use name:string to\nforce interpretation as a text schema.\n\nUsage:\nmkdef <source_uris> [<schema>]\n",
"EXAMPLES": "bq mkdef 'gs://bucket/file.csv'\nfield1:integer,field2:string\n"
}
},
"partition": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"partition"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Copies source tables into partitioned tables.\n\nUsage: bq partition <source_table_prefix>\n<destination_partitioned_table>\n\nCopies tables of the format\n<source_table_prefix><time_unit_suffix> to a\ndestination partitioned table, with the\n<time_unit_suffix> of the source tables becoming the\npartition ID of the destination table partitions. The\nsuffix is <YYYYmmdd> by default, <YYYY> if the\ntime_partitioning_type flag is set to YEAR, <YYYYmm>\nif set to MONTH, and <YYYYmmddHH> if set to HOUR.\n\nIf the destination table does not exist, one will be\ncreated with a schema and that matches the last table\nthat matches the supplied prefix.\n",
"EXAMPLES": "bq partition dataset1.sharded_\ndataset2.partitioned_table\n"
}
},
"query": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"query"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Execute a query.\n\nQuery should be specified on command line, or passed\non stdin.\n",
"EXAMPLES": "bq query 'select count(*) from\npublicdata:samples.shakespeare'\necho 'select count(*) from\npublicdata:samples.shakespeare' | bq query\n\nUsage:\nquery [<sql_query>]\n\nTo cancel a query job, run `bq cancel job_id`.\n"
}
},
"remove-iam-policy-binding": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"remove-iam-policy-binding"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Remove a binding from a BigQuery resource's policy in\nIAM.\n\nUsage:\nremove-iam-policy-binding --member=<member>\n--role=<role> <identifier>\n\nOne binding consists of a member and a role, which\nare specified with (required) flags.\n",
"EXAMPLES": "\nbq remove-iam-policy-binding \\\n--member='user:myaccount@gmail.com' \\\n--role='roles/bigquery.dataViewer' \\\ntable1\n\nbq remove-iam-policy-binding \\\n--member='serviceAccount:my.service.account@my-\ndomain.com' \\\n--role='roles/bigquery.dataEditor' \\\nproject1:dataset1.table1\n\nbq remove-iam-policy-binding \\\n--member='allAuthenticatedUsers' \\\n--role='roles/bigquery.dataViewer' \\\n--project_id=proj -t ds.table1\n"
}
},
"rm": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"rm"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Delete the resource described by the identifier.\n\nAlways requires an identifier, unlike the show and ls\ncommands. By default, also requires confirmation\nbefore deleting. Supports the -d -t flags to signify\nthat the identifier is a dataset or table.\n* With -f, don't ask for confirmation before\ndeleting.\n* With -r, remove all tables in the named dataset.\n",
"EXAMPLES": "bq rm ds.table\nbq rm -m ds.model\nbq rm --routine ds.routine\nbq rm -r -f old_dataset\nbq rm --transfer_config=projects/p/locations/l/transf\nerConfigs/c\nbq rm --connection --project_id=proj --location=us\ncon\nbq rm --capacity_commitment\nproj:US.capacity_commitment_id\nbq rm --reservation --project_id=proj --location=us\nreservation_name\nbq rm --reservation_assignment --project_id=proj\n--location=us\nassignment_name\nbq rm --reservation_group --project_id=proj\n--location=us\nreservation_group_name\n"
}
},
"set-iam-policy": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"set-iam-policy"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Set the IAM policy for a resource.\n\nSets the IAM policy for a dataset, table, routine,\nconnection, or reservation resource. After setting\nthe policy, the new policy is printed to stdout.\nPolicies are in JSON format.\n\nIf the 'etag' field is present in the policy, it must\nmatch the value in the current policy, which can be\nobtained with 'bq get-iam-policy'. Otherwise this\ncommand will fail. This feature allows users to\nprevent concurrent updates.\n\nUsage: set-iam-policy <identifier> <filename>\n",
"EXAMPLES": "bq set-iam-policy ds.table1 /tmp/policy.json\nbq set-iam-policy --project_id=proj -t ds.table1\n/tmp/policy.json\nbq set-iam-policy proj:ds.table1 /tmp/policy.json\nbq set-iam-policy --reservation proj:ds.reservation1\n/tmp/policy.json\n"
}
},
"shell": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"shell"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Start an interactive bq session.\n"
}
},
"show": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"show"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Show all information about an object.\n",
"EXAMPLES": "bq show -j <job_id>\nbq show dataset\nbq show [--schema] dataset.table\nbq show [--view] dataset.view\nbq show [--materialized_view]\ndataset.materialized_view\nbq show -m ds.model\nbq show --routine ds.routine\nbq show --transfer_config\nprojects/p/locations/l/transferConfigs/c\nbq show --transfer_run\nprojects/p/locations/l/transferConfigs/c/runs/r\nbq show --encryption_service_account\nbq show --connection --project_id=project\n--location=us connection\nbq show --capacity_commitment\nproject:US.capacity_commitment_id\nbq show --reservation --location=US\n--project_id=project reservation_name\nbq show --reservation_assignment --project_id=project\n--location=US\n--assignee_type=PROJECT --assignee_id=myproject\n--job_type=QUERY\nbq show --reservation_assignment --project_id=project\n--location=US\n--assignee_type=FOLDER --assignee_id=123\n--job_type=QUERY\nbq show --reservation_assignment --project_id=project\n--location=US\n--assignee_type=ORGANIZATION --assignee_id=456\n--job_type=QUERY\nbq show --reservation_group --location=US\n--project_id=project\nreservation_group_name\nbq show --migration_workflow\nprojects/p/locations/l/workflows/workflow_id\n"
}
},
"truncate": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"truncate"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Truncates table/dataset/project to a particular\ntimestamp.\n",
"EXAMPLES": "bq truncate project_id:dataset\nbq truncate --overwrite project_id:dataset\n--timestamp 123456789\nbq truncate --skip_fully_replicated_tables=false\nproject_id:dataset\n"
}
},
"undelete": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"undelete"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Undelete the dataset described by identifier.\n\nAlways requires an identifier, unlike the show and ls\ncommands. By default, also requires confirmation\nbefore undeleting. Supports:\n- timestamp[int]: This signifies the timestamp\nversion of the dataset that\nneeds to be restored, this should be in milliseconds\n",
"EXAMPLES": "bq undelete dataset\nbq undelete --timestamp 1714720875568 dataset\n"
}
},
"update": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"update"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Updates a dataset, table, view or transfer\nconfiguration with this name.\n\nSee 'bq help update' for more information.\n",
"EXAMPLES": "bq update --description \"Dataset description\"\nexisting_dataset\nbq update --description \"My table\"\nexisting_dataset.existing_table\nbq update --description \"My model\" -m\nexisting_dataset.existing_model\nbq update -t existing_dataset.existing_table\nname:integer,value:string\nbq update --destination_kms_key\nprojects/p/locations/l/keyRings/r/cryptoKeys/k\nexisting_dataset.existing_table\nbq update --view='select 1 as num'\nexisting_dataset.existing_view\n(--view_udf_resource=path/to/file.js)\nbq update --transfer_config --display_name=name\n-p='{\"param\":\"value\"}'\nprojects/p/locations/l/transferConfigs/c\nbq update --transfer_config --target_dataset=dataset\n--refresh_window_days=5 --update_credentials\nprojects/p/locations/l/transferConfigs/c\nbq update --reservation --location=US\n--project_id=my-project\n--bi_reservation_size=2G\nbq update --capacity_commitment --location=US\n--project_id=my-project\n--plan=MONTHLY --renewal_plan=FLEX commitment_id\nbq update --capacity_commitment --location=US\n--project_id=my-project\n--split --slots=500 commitment_id\nbq update --capacity_commitment --location=US\n--project_id=my-project\n--merge commitment_id1,commitment_id2\nbq update --reservation_assignment\n--destination_reservation_id=proj:US.new_reservation\nproj:US.old_reservation.assignment_id\nbq update --connection_credential='{\"username\":\"u\",\n\"password\":\"p\"}'\n--location=US --project_id=my-project\nexisting_connection\nbq update --row_access_policy\n--policy_id=existing_policy\n--target_table='existing_dataset.existing_table'\n--grantees='user:user1@google.com,group:group1@google\n.com'\n--filter_predicate='Region=\"US\"'\n"
}
},
"version": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"version"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Return the version of bq.\n"
}
},
"wait": {
"capsule": "",
"commands": {},
"flags": {},
"groups": {},
"is_group": false,
"is_hidden": false,
"path": [
"bq",
"wait"
],
"positionals": [],
"release": "GA",
"sections": {
"DESCRIPTION": "Wait some number of seconds for a job to finish.\n\nPoll job_id until either (1) the job is DONE or (2)\nthe specified number of seconds have elapsed. Waits\nforever if unspecified. If no job_id is specified,\nand there is only one running job, we poll that job.\n",
"EXAMPLES": "bq wait # Waits forever for the currently running\njob.\nbq wait job_id # Waits forever\nbq wait job_id 100 # Waits 100 seconds\nbq wait job_id 0 # Polls if a job is done, then\nreturns immediately.\n# These may exit with a non-zero status code to\nindicate \"failure\":\nbq wait --fail_on_error job_id # Succeeds if job\nsucceeds.\nbq wait --fail_on_error job_id 100 # Succeeds if job\nsucceeds in 100 sec.\n"
}
}
},
"flags": {
"--alpha": {
"attr": {},
"category": "",
"default": "''",
"description": "<none|query_max_slots|reservation_groups>: Naming an alpha feature with this flag will cause it be used.; repeat this option to specify a list of values",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--alpha",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--api": {
"attr": {},
"category": "",
"default": "'https://bigquery.googleapis.com'",
"description": "API endpoint to talk to.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--api",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--api_version": {
"attr": {},
"category": "",
"default": "'v2'",
"description": "API version to use.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--api_version",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--apilog": {
"attr": {},
"category": "",
"default": "",
"description": "Log all API requests and responses to the file or directory specified by this flag. Also accepts \"stdout\" and \"stderr\". Specifying the empty string will direct to stdout.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--apilog",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--bigquery_discovery_api_key": {
"attr": {},
"category": "",
"default": "",
"description": "API key to use for discovery doc requests.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--bigquery_discovery_api_key",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--bigqueryrc": {
"attr": {},
"category": "",
"default": "'/user/forge-00/.bigqueryrc'",
"description": "Path to configuration file. The configuration file specifies new defaults for any flags, and can be overridden by specifying the flag on the command line. If the --bigqueryrc flag is not specified, the BIGQUERYRC environment variable is used. If that is not specified, the path \"~/.bigqueryrc\" is used.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--bigqueryrc",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--ca_certificates_file": {
"attr": {},
"category": "",
"default": "''",
"description": "Location of CA certificates file.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--ca_certificates_file",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--dataset_id": {
"attr": {},
"category": "",
"default": "''",
"description": "Default dataset reference to use for requests (Ignored when not applicable.). Can be set as \"project:dataset\" or \"dataset\". If project is missing, the value of the project_id flag will be used.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--dataset_id",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--debug_mode": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Show tracebacks on Python exceptions.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--debug_mode",
"nargs": "0",
"type": "bool",
"value": ""
},
"--disable_ssl_validation": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Disables HTTPS certificates validation. This is off by default.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--disable_ssl_validation",
"nargs": "0",
"type": "bool",
"value": ""
},
"--discovery_file": {
"attr": {},
"category": "",
"default": "''",
"description": "Filename for JSON document to read for the base BigQuery API discovery, excluding Model, Routine, RowAccessPolicy, and IAMPolicy APIs.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--discovery_file",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--enable_gdrive": {
"attr": {},
"category": "",
"default": "'true'",
"description": "When set to true, requests new OAuth token with GDrive scope. When set to false, requests new OAuth token without GDrive scope. Unless authenticated with a service account, to use this flag, the use_google_auth flag must be set to false.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--enable_gdrive",
"nargs": "0",
"type": "bool",
"value": ""
},
"--enable_resumable_uploads": {
"attr": {},
"category": "",
"default": "",
"description": "Enables resumable uploads over HTTP (Only applies to load jobs that load data from local files.). Defaults to True.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--enable_resumable_uploads",
"nargs": "0",
"type": "bool",
"value": ""
},
"--fingerprint_job_id": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Whether to use a job id that is derived from a fingerprint of the job configuration. This will prevent the same job from running multiple times accidentally.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--fingerprint_job_id",
"nargs": "0",
"type": "bool",
"value": ""
},
"--format": {
"attr": {},
"category": "",
"default": "",
"description": "<none|json|prettyjson|csv|sparse|pretty>: Format for command output. Options include: pretty: formatted table output sparse: simpler table output prettyjson: easy-to-read JSON format json: maximally compact JSON csv: csv format with header The first three are intended to be human-readable, and the latter three are for passing to another program. If no format is selected, one will be chosen based on the command run.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--format",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--headless": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Whether this bq session is running without user interaction. This affects behavior that expects user interaction, like whether debug_mode will break into the debugger and lowers the frequency of informational printing.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--headless",
"nargs": "0",
"type": "bool",
"value": ""
},
"--httplib2_debuglevel": {
"attr": {},
"category": "",
"default": "",
"description": "Instruct httplib2 to print debugging messages by setting debuglevel to the given value. (an integer)",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--httplib2_debuglevel",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--job_id": {
"attr": {},
"category": "",
"default": "",
"description": "A unique job_id to use for the request. If not specified, this client will generate a job_id. Applies only to commands that launch jobs, such as cp, extract, load, and query.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--job_id",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--job_property": {
"attr": {},
"category": "",
"default": "",
"description": "Additional key-value pairs to include in the properties field of the job configuration; repeat this option to specify a list of values",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--job_property",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--jobs_query_use_request_id": {
"attr": {},
"category": "",
"default": "'false'",
"description": "If true, sends request_id in jobs.query request.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--jobs_query_use_request_id",
"nargs": "0",
"type": "bool",
"value": ""
},
"--jobs_query_use_results_from_response": {
"attr": {},
"category": "",
"default": "'true'",
"description": "If true, results from jobs.query response are used.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--jobs_query_use_results_from_response",
"nargs": "0",
"type": "bool",
"value": ""
},
"--location": {
"attr": {},
"category": "",
"default": "",
"description": "Default geographic location to use when creating datasets or determining where jobs should run (Ignored when not applicable.)",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--location",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--max_rows_per_request": {
"attr": {},
"category": "",
"default": "",
"description": "Specifies the max number of rows to return per read. (an integer)",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--max_rows_per_request",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--mtls": {
"attr": {},
"category": "",
"default": "'false'",
"description": "If set will use mtls client certificate on connections to BigQuery.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--mtls",
"nargs": "0",
"type": "bool",
"value": ""
},
"--project_id": {
"attr": {},
"category": "",
"default": "''",
"description": "Default project to use for requests.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--project_id",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--proxy_address": {
"attr": {},
"category": "",
"default": "''",
"description": "The name or IP address of the proxy host to use for connecting to GCP.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--proxy_address",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--proxy_password": {
"attr": {},
"category": "",
"default": "''",
"description": "The password to use when authenticating with proxy host.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--proxy_password",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--proxy_port": {
"attr": {},
"category": "",
"default": "''",
"description": "The port number to use to connect to the proxy host.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--proxy_port",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--proxy_username": {
"attr": {},
"category": "",
"default": "'false'",
"description": "The user name to use when authenticating with proxy host. -q,--[no]quiet: If True, ignore status updates while jobs are running.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--proxy_username",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--request_reason": {
"attr": {},
"category": "",
"default": "'true'",
"description": "A reason for making the request intended to be recorded in audit logging. -sync,--[no]synchronous_mode: If True, wait for command completion before returning, and use the job completion status for error codes. If False, simply create the job, and use the success of job creation as the error code.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--request_reason",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--trace": {
"attr": {},
"category": "",
"default": "",
"description": "A tracing token to include in api requests.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--trace",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--universe_domain": {
"attr": {},
"category": "",
"default": "",
"description": "The universe domain to use in TPC domains.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--universe_domain",
"nargs": "1",
"type": "string",
"value": "VALUE"
},
"--use_gcloud_config": {
"attr": {},
"category": "",
"default": "'true'",
"description": "If true, use gcloud config to override default flag values.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--use_gcloud_config",
"nargs": "0",
"type": "bool",
"value": ""
},
"--use_lep": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Use a LEP endpoint based on the operation's location.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--use_lep",
"nargs": "0",
"type": "bool",
"value": ""
},
"--use_regional_endpoints": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Use a regional endpoint based on the operation's location.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--use_regional_endpoints",
"nargs": "0",
"type": "bool",
"value": ""
},
"--use_rep": {
"attr": {},
"category": "",
"default": "'false'",
"description": "Use a REP endpoint based on the operation's location.",
"group": "",
"is_global": true,
"is_hidden": false,
"is_required": false,
"name": "--use_rep",
"nargs": "0",
"type": "bool",
"value": ""
}
},
"groups": {},
"is_group": true,
"is_hidden": false,
"path": [
"bq"
],
"positionals": [],
"release": "GA",
"sections": {}
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
update: sudo apt-get install {package}
remove: sudo apt-get remove {package}
update-all: sudo apt-get update && sudo apt-get --only-upgrade install {package}

View File

@@ -0,0 +1,47 @@
app-engine-go: google-cloud-cli-app-engine-go
app-engine-grpc: google-cloud-cli-app-engine-grpc
app-engine-java: google-cloud-cli-app-engine-java
app-engine-php: unavailable
app-engine-python: google-cloud-cli-app-engine-python
app-engine-python-extras: google-cloud-cli-app-engine-python-extras
alpha: google-cloud-cli
anthoscli: google-cloud-cli-anthoscli
anthos-auth: google-cloud-cli-anthos-auth
beta: google-cloud-cli
bigtable: google-cloud-cli-bigtable-emulator
bq: google-cloud-cli
cbt: google-cloud-cli-cbt
cloud-build-local: google-cloud-cli-cloud-build-local
cloud-run-proxy: google-cloud-cli-cloud-run-proxy
cloud_sql_proxy: unavailable
cloud-sql-proxy: unavailable
run-compose: google-cloud-cli-run-compose
cloud-datastore-emulator: google-cloud-cli-datastore-emulator
cloud-firestore-emulator: google-cloud-cli-firestore-emulator
cloud-spanner-emulator: google-cloud-cli-spanner-emulator
core: google-cloud-cli
pkg: unavailable
docker-credential-gcr: google-cloud-cli-docker-credential-gcr
enterprise-certificate-proxy: google-cloud-cli-enterprise-certificate-proxy
gcloud: google-cloud-cli
gcloud-crc32c: google-cloud-cli
gke-gcloud-auth-plugin: google-cloud-cli-gke-gcloud-auth-plugin
gsutil: google-cloud-cli
istioctl: google-cloud-cli-istioctl
kpt: google-cloud-cli-kpt
kubectl: kubectl
spanner-migration-tool: google-cloud-cli-spanner-migration-tool
kubectl-oidc: google-cloud-cli-kubectl-oidc
local-extract: google-cloud-cli-local-extract
log-streaming: google-cloud-cli-log-streaming
package-go-module: google-cloud-cli-package-go-module
minikube: google-cloud-cli-minikube
nomos: google-cloud-cli-nomos
pubsub-emulator: google-cloud-cli-pubsub-emulator
skaffold: google-cloud-cli-skaffold
kustomize: unavailable
config-connector: google-cloud-cli-config-connector
terraform-tools: google-cloud-cli-terraform-tools
bundled-python3-unix: unavailable
managed-flink-client: google-cloud-cli-managed-flink-client
spanner-cli: google-cloud-cli-spanner-cli

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,217 @@
#!/bin/sh
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
echo Welcome to the Google Cloud CLI!
# <cloud-sdk-sh-preamble>
#
# CLOUDSDK_ROOT_DIR (a) installation root dir
# CLOUDSDK_PYTHON (u) python interpreter path
# CLOUDSDK_GSUTIL_PYTHON (u) python interpreter path for gsutil
# CLOUDSDK_PYTHON_ARGS (u) python interpreter arguments
# CLOUDSDK_PYTHON_SITEPACKAGES (u) use python site packages
# CLOUDSDK_BQ_PYTHON (u) python interpreter for bq
# CLOUDSDK_ENCODING (u) python io encoding for gcloud
#
# (a) always defined by the preamble
# (u) user definition overrides preamble
# Wrapper around 'which' and 'command -v', tries which first, then falls back
# to command -v
_cloudsdk_which() {
which "$1" 2>/dev/null || command -v "$1" 2>/dev/null
}
order_python_no_check() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if _cloudsdk_which $python_version > /dev/null; then
selected_version=$python_version
fi
fi
done
if [ -z "$selected_version" ]; then
selected_version=python
fi
echo $selected_version
}
order_python() {
selected_version=""
for python_version in "$@"
do
if [ -z "$selected_version" ]; then
if "$python_version" -c "import sys; sys.exit(0 if ((3,8) <= (sys.version_info.major, sys.version_info.minor) <= (3,13)) else 1)" > /dev/null 2>&1; then
selected_version=$python_version
fi
fi
done
echo $selected_version
}
# Determines the real cloud sdk root dir given the script path.
# Would be easier with a portable "readlink -f".
_cloudsdk_root_dir() {
case $1 in
/*) _cloudsdk_path=$1
;;
*/*) _cloudsdk_path=$PWD/$1
;;
*) _cloudsdk_path=$(_cloudsdk_which $1)
case $_cloudsdk_path in
/*) ;;
*) _cloudsdk_path=$PWD/$_cloudsdk_path ;;
esac
;;
esac
_cloudsdk_dir=0
while :
do
while _cloudsdk_link=$(readlink "$_cloudsdk_path")
do
case $_cloudsdk_link in
/*) _cloudsdk_path=$_cloudsdk_link ;;
*) _cloudsdk_path=$(dirname "$_cloudsdk_path")/$_cloudsdk_link ;;
esac
done
case $_cloudsdk_dir in
1) break ;;
esac
if [ -d "${_cloudsdk_path}" ]; then
break
fi
_cloudsdk_dir=1
_cloudsdk_path=$(dirname "$_cloudsdk_path")
done
while :
do case $_cloudsdk_path in
*/) _cloudsdk_path=$(dirname "$_cloudsdk_path/.")
;;
*/.) _cloudsdk_path=$(dirname "$_cloudsdk_path")
;;
*/bin) dirname "$_cloudsdk_path"
break
;;
*) echo "$_cloudsdk_path"
break
;;
esac
done
}
CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0")
setup_cloudsdk_python() {
# if $CLOUDSDK_PYTHON is not set, look for bundled python else
# prefer python3 over python
if [ -z "$CLOUDSDK_PYTHON" ]; then
# Is bundled python present and working?
ARCH=$(uname -m 2>/dev/null)
if [ -x "$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" ] && \
[ "$ARCH" = "x86_64" ] && \
"$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3" --version > /dev/null 2>&1;
then
CLOUDSDK_PYTHON="$CLOUDSDK_ROOT_DIR/platform/bundledpythonunix/bin/python3"
CLOUDSDK_PYTHON_SITEPACKAGES=1
else
GLOBAL_CONFIG="$HOME/.config/gcloud"
if [ "$CLOUDSDK_CONFIG" ];
then
GLOBAL_CONFIG="$CLOUDSDK_CONFIG"
fi
# If there is an enabled virtualenv activate it
if [ -f "$GLOBAL_CONFIG/virtenv/bin/activate" ];
then
if [ -f "$GLOBAL_CONFIG/virtenv/enabled" ];
then
. "$GLOBAL_CONFIG/virtenv/bin/activate"
fi
fi
primary_python=python3.13
CLOUDSDK_PYTHON=$(order_python python3 "$primary_python" python3.12 python3.14 python3.11 python3.10 python3.9 python)
if [ -z "$CLOUDSDK_PYTHON" ]; then
CLOUDSDK_PYTHON=$(order_python_no_check python3 python)
fi
fi
fi
}
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use
# CLOUDSDK_PYTHON to configure which python gcloud uses.
unset PYTHONHOME
# if CLOUDSDK_PYTHON_SITEPACKAGES and VIRTUAL_ENV are empty
case :$CLOUDSDK_PYTHON_SITEPACKAGES:$VIRTUAL_ENV: in
:::) # add -S to CLOUDSDK_PYTHON_ARGS if not already there
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) ;;
" ") CLOUDSDK_PYTHON_ARGS="-S"
;;
*) CLOUDSDK_PYTHON_ARGS="$CLOUDSDK_PYTHON_ARGS -S"
;;
esac
unset CLOUDSDK_PYTHON_SITEPACKAGES
;;
*) # remove -S from CLOUDSDK_PYTHON_ARGS if already there
while :; do
case " $CLOUDSDK_PYTHON_ARGS " in
*" -S "*) CLOUDSDK_PYTHON_ARGS=${CLOUDSDK_PYTHON_ARGS%%-S*}' '${CLOUDSDK_PYTHON_ARGS#*-S} ;;
*) break ;;
esac
done
# if CLOUDSDK_PYTHON_SITEPACKAGES is empty
[ -z "$CLOUDSDK_PYTHON_SITEPACKAGES" ] &&
CLOUDSDK_PYTHON_SITEPACKAGES=1
export CLOUDSDK_PYTHON_SITEPACKAGES
;;
esac
# Allow users to set the Python interpreter used to launch gsutil, falling
# back to the CLOUDSDK_PYTHON interpreter otherwise.
if [ -z "$CLOUDSDK_GSUTIL_PYTHON" ]; then
CLOUDSDK_GSUTIL_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_BQ_PYTHON" ]; then
CLOUDSDK_BQ_PYTHON="$CLOUDSDK_PYTHON"
fi
if [ -z "$CLOUDSDK_ENCODING" ]; then
if [ -z "$PYTHONIOENCODING" ]; then
CLOUDSDK_ENCODING=UTF-8
else
CLOUDSDK_ENCODING="$PYTHONIOENCODING"
fi
fi
export CLOUDSDK_ROOT_DIR
export CLOUDSDK_PYTHON_ARGS
export CLOUDSDK_GSUTIL_PYTHON
export CLOUDSDK_BQ_PYTHON
export CLOUDSDK_ENCODING
export PYTHONIOENCODING="$CLOUDSDK_ENCODING"
# </cloud-sdk-sh-preamble>
if [ -z "$CLOUDSDK_PYTHON" ]; then
if [ -z "$( _cloudsdk_which python)" ]; then
echo
echo "To use the Google Cloud CLI, you must have Python installed and on your PATH."
echo "As an alternative, you may also set the CLOUDSDK_PYTHON environment variable"
echo "to the location of your Python executable."
exit 1
fi
fi
# Warns user if they are running as root.
if [ $(id -u) = 0 ]; then
echo "WARNING: You appear to be running this script as root. This may cause "
echo "the installation to be inaccessible to users other than the root user."
fi
"$CLOUDSDK_PYTHON" $CLOUDSDK_PYTHON_ARGS "${CLOUDSDK_ROOT_DIR}/bin/bootstrapping/install.py" "$@"

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
#
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud command line tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import contextlib
import os
import sys
_GCLOUD_PY_DIR = os.path.dirname(__file__)
_THIRD_PARTY_DIR = os.path.join(_GCLOUD_PY_DIR, 'third_party')
# From Python 3.11 onwards, the script directory is not prepended to sys.path by
# default if PYTHONSAFEPATH env var is set.
# NOMUTANTS--Tested through the installed SDK in e2e.bundle.sanity_test.
if _GCLOUD_PY_DIR not in sys.path:
sys.path.insert(0, _GCLOUD_PY_DIR)
if os.path.isdir(_THIRD_PARTY_DIR):
sys.path.insert(0, _THIRD_PARTY_DIR)
def _fix_google_module():
"""Reloads the google module to prefer our vendored copy.
When python is not invoked with the -S option, it can preload google module
via .pth file setting its __path__. After this happens, our vendored google
package may not in the __path__. After our vendored dependency directory is
put at the first place in the sys.path, google module should be reloaded,
so that our vendored copy can be preferred.
"""
if 'google' not in sys.modules:
return
import google # pylint: disable=g-import-not-at-top
try:
reload(google)
except NameError:
import importlib # pylint: disable=g-import-not-at-top
importlib.reload(google)
def reorder_sys_path(sys_path):
"""If site packages are enabled reorder them.
Make sure bundled_python site-packages appear first in the sys.path.
Args:
sys_path: list current sys path
Returns:
modified syspath if CLOUDSDK_PYTHON_SITEPACKAGES is on, prefer bundled
python site packages over all other. Note the returns syspath has the same
elements but a different order.
"""
if 'CLOUDSDK_PYTHON_SITEPACKAGES' in os.environ:
new_path = []
other_site_packages = []
for path in sys_path:
if 'site-packages' in path and 'platform/bundledpythonunix' not in path:
other_site_packages.append(path)
else:
new_path.append(path)
new_path.extend(other_site_packages)
return new_path
else:
return sys_path
def _import_gcloud_main():
"""Returns reference to gcloud_main module."""
# pylint:disable=g-import-not-at-top
import googlecloudsdk.gcloud_main
return googlecloudsdk.gcloud_main
MIN_SUPPORTED_PY3_VERSION = (3, 9)
MAX_SUPPORTED_PY3_VERSION = (3, 14)
def python_version_string(python_version):
return '{}.{}'.format(python_version[0], python_version[1])
@contextlib.contextmanager
def gcloud_exception_handler():
"""Handles exceptions from gcloud to provide a helpful message."""
try:
yield
except Exception: # pylint: disable=broad-except
# We want to catch *everything* here to display a nice message to the user
# pylint:disable=g-import-not-at-top
python_version = sys.version_info[:2]
if (python_version < MIN_SUPPORTED_PY3_VERSION or
python_version > MAX_SUPPORTED_PY3_VERSION):
valid_python_version = False
if python_version > MAX_SUPPORTED_PY3_VERSION:
support_message = 'not currently supported by gcloud'
else:
support_message = 'no longer supported by gcloud'
error_message = (
'You are running gcloud with Python {python_version}, which is '
'{support_message}.\nInstall a compatible version of Python '
'{min_python_version}-{max_python_version} and set the '
'CLOUDSDK_PYTHON environment variable to point to it.'.format(
python_version=python_version_string(python_version),
support_message=support_message,
min_python_version=python_version_string(
MIN_SUPPORTED_PY3_VERSION),
max_python_version=python_version_string(
MAX_SUPPORTED_PY3_VERSION))
)
else:
valid_python_version = True
error_message = (
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python '
'{min_python_version}-{max_python_version} executable:\n '
'{executable}\n\nIf it is not, please set the CLOUDSDK_PYTHON '
'environment variable to point to a working Python '
'executable.').format(
executable=sys.executable,
min_python_version=python_version_string(
MIN_SUPPORTED_PY3_VERSION),
max_python_version=python_version_string(
MAX_SUPPORTED_PY3_VERSION))
# We DON'T want to suggest `gcloud components reinstall` here (ex. as
# opposed to the similar message in gcloud_main.py), as we know that no
# commands will work.
sys.stderr.write(
(
'ERROR: gcloud failed to load. {error_message}\n\n'
'If you are still experiencing problems, please reinstall the '
'Google Cloud CLI using the instructions here:\n '
'https://cloud.google.com/sdk/docs/install\n'
).format(error_message=error_message)
)
if valid_python_version:
import traceback
sys.stderr.write('\n\n{}\n'.format(
'\n'.join(traceback.format_exc().splitlines())))
sys.exit(1)
def main():
with gcloud_exception_handler():
sys.path = reorder_sys_path(sys.path)
# pylint:disable=g-import-not-at-top
from googlecloudsdk.core.util import encoding
if encoding.GetEncodedValue(os.environ, '_ARGCOMPLETE'):
try:
# pylint:disable=g-import-not-at-top
from googlecloudsdk.command_lib.static_completion import lookup
lookup.Complete()
return
except Exception: # pylint:disable=broad-except, hide completion errors
if encoding.GetEncodedValue(os.environ,
'_ARGCOMPLETE_TRACE') == 'static':
raise
with gcloud_exception_handler():
_fix_google_module()
gcloud_main = _import_gcloud_main()
sys.exit(gcloud_main.main())
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,42 @@
# -*- coding: utf-8 -*- #
#
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package marker file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import importlib.util
import sys
from googlecloudsdk.core.util import lazy_regex
# Lazy loader doesn't work in some environments, such as par files
try:
module_name = 'googlecloudsdk.api_lib.app.yaml_parsing'
try:
sys.modules[module_name]
except KeyError:
spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
loader.exec_module(module)
except ImportError:
pass
lazy_regex.initialize_lazy_compile()

View File

@@ -0,0 +1,14 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access approval requests API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
def Approve(name):
"""Approve an approval request."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsApprovalRequestsApproveRequest(
name=name)
return client.organizations_approvalRequests.Approve(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersApprovalRequestsApproveRequest(name=name)
return client.folders_approvalRequests.Approve(req)
req = msgs.AccessapprovalProjectsApprovalRequestsApproveRequest(name=name)
return client.projects_approvalRequests.Approve(req)
def Dismiss(name):
"""Dismiss an approval request."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsApprovalRequestsDismissRequest(
name=name)
return client.organizations_approvalRequests.Dismiss(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersApprovalRequestsDismissRequest(name=name)
return client.folders_approvalRequests.Dismiss(req)
req = msgs.AccessapprovalProjectsApprovalRequestsDismissRequest(name=name)
return client.projects_approvalRequests.Dismiss(req)
def Invalidate(name):
"""Invalidate an approval request."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsApprovalRequestsInvalidateRequest(
name=name)
return client.organizations_approvalRequests.Invalidate(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersApprovalRequestsInvalidateRequest(name=name)
return client.folders_approvalRequests.Invalidate(req)
req = msgs.AccessapprovalProjectsApprovalRequestsInvalidateRequest(name=name)
return client.projects_approvalRequests.Invalidate(req)
def Get(name):
"""Get an approval request by name."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsApprovalRequestsGetRequest(name=name)
return client.organizations_approvalRequests.Get(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersApprovalRequestsGetRequest(name=name)
return client.folders_approvalRequests.Get(req)
req = msgs.AccessapprovalProjectsApprovalRequestsGetRequest(name=name)
return client.projects_approvalRequests.Get(req)
def List(parent, filter=None):
"""List approval requests for the parent resource."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
req = None
svc = None
if 'organizations/' in parent:
req = msgs.AccessapprovalOrganizationsApprovalRequestsListRequest(
parent=parent)
svc = client.organizations_approvalRequests
elif 'folders/' in parent:
req = msgs.AccessapprovalFoldersApprovalRequestsListRequest(parent=parent)
svc = client.folders_approvalRequests
else:
req = msgs.AccessapprovalProjectsApprovalRequestsListRequest(parent=parent)
svc = client.projects_approvalRequests
if filter:
req.filter = filter
else:
req.filter = 'PENDING'
return list_pager.YieldFromList(
svc, req, field='approvalRequests', batch_size_attribute='pageSize')

View File

@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access approval service account API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
def Get(name):
"""Get the access approval service account for a resource."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsGetServiceAccountRequest(name=name)
return client.organizations.GetServiceAccount(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersGetServiceAccountRequest(name=name)
return client.folders.GetServiceAccount(req)
req = msgs.AccessapprovalProjectsGetServiceAccountRequest(name=name)
return client.projects.GetServiceAccount(req)

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access approval settings API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
def Delete(name):
"""Delete the access approval settings for a resource."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsDeleteAccessApprovalSettingsRequest(
name=name
)
return client.organizations.DeleteAccessApprovalSettings(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersDeleteAccessApprovalSettingsRequest(
name=name
)
return client.folders.DeleteAccessApprovalSettings(req)
req = msgs.AccessapprovalProjectsDeleteAccessApprovalSettingsRequest(
name=name
)
return client.projects.DeleteAccessApprovalSettings(req)
def Get(name):
"""Get the access approval settings for a resource."""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsGetAccessApprovalSettingsRequest(
name=name
)
return client.organizations.GetAccessApprovalSettings(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersGetAccessApprovalSettingsRequest(name=name)
return client.folders.GetAccessApprovalSettings(req)
req = msgs.AccessapprovalProjectsGetAccessApprovalSettingsRequest(name=name)
return client.projects.GetAccessApprovalSettings(req)
def Update(
name,
notification_emails,
enrolled_services,
active_key_version,
preferred_request_expiration_days,
prefer_no_broad_approval_requests,
notification_pubsub_topic,
request_scope_max_width_preference,
require_customer_visible_justification,
approval_policy,
update_mask,
):
"""Update the access approval settings for a resource.
Args:
name: the settings resource name (e.g. projects/123/accessApprovalSettings)
notification_emails: list of email addresses
enrolled_services: list of services
active_key_version: KMS signing key version resource name
preferred_request_expiration_days: the default expiration time for approval
requests
prefer_no_broad_approval_requests: communicates the preference to Google
personnel to request access with as targeted a resource scope as possible
notification_pubsub_topic: A pubsub topic to which notifications relating to
approval requests should be sent
request_scope_max_width_preference: specifies broadest scope of access for
access requests without a specific method
require_customer_visible_justification: to configure if a customer visible
justification (i.e. Vector Case) is required for a Googler to create an
Access Ticket to send to the customer when attempting to access customer
resources.
approval_policy: the policy for approving requests
update_mask: which fields to update
Returns:
updated settings
"""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
settings = None
services_protos = [
msgs.EnrolledService(cloudProduct=s) for s in enrolled_services
]
settings = msgs.AccessApprovalSettings(
name=name,
enrolledServices=services_protos,
notificationEmails=notification_emails,
activeKeyVersion=active_key_version,
preferredRequestExpirationDays=preferred_request_expiration_days,
preferNoBroadApprovalRequests=prefer_no_broad_approval_requests,
notificationPubsubTopic=notification_pubsub_topic,
requestScopeMaxWidthPreference=request_scope_max_width_preference,
requireCustomerVisibleJustification=require_customer_visible_justification,
approvalPolicy=approval_policy,
)
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsUpdateAccessApprovalSettingsRequest(
name=name, accessApprovalSettings=settings, updateMask=update_mask
)
return client.organizations.UpdateAccessApprovalSettings(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersUpdateAccessApprovalSettingsRequest(
name=name, accessApprovalSettings=settings, updateMask=update_mask
)
return client.folders.UpdateAccessApprovalSettings(req)
req = msgs.AccessapprovalProjectsUpdateAccessApprovalSettingsRequest(
name=name, accessApprovalSettings=settings, updateMask=update_mask
)
return client.projects.UpdateAccessApprovalSettings(req)

View File

@@ -0,0 +1,206 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified diff resource printer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import difflib
import io
import re
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_projection_spec
from googlecloudsdk.core.resource import resource_projector
from googlecloudsdk.core.resource import resource_transform
from googlecloudsdk.core.resource import yaml_printer
class ACMDiffPrinter(resource_printer_base.ResourcePrinter):
"""A printer for an ndiff of the first two projection columns.
A unified diff of the first two projection columns.
Printer attributes:
format: The format of the diffed resources. Each resource is converted
to this format and the diff of the converted resources is displayed.
The default is 'yaml'.
"""
def __init__(self, *args, **kwargs):
super(ACMDiffPrinter, self).__init__(
*args, by_columns=True, non_empty_projection_required=True, **kwargs)
self._print_format = self.attributes.get('format', 'yaml')
def _Diff(self, old, new):
"""Prints a modified ndiff of formatter output for old and new.
IngressPolicies:
ingressFrom:
sources:
accessLevel: accessPolicies/123456789/accessLevels/my_level
-resource: projects/123456789012
+resource: projects/234567890123
EgressPolicies:
+egressTo:
+operations:
+actions:
+action: method_for_all
+actionType: METHOD
+serviceName: chemisttest.googleapis.com
+resources:
+projects/345678901234
Args:
old: The old original resource.
new: The new changed resource.
"""
# Fill a buffer with the object as rendered originally.
buf_old = io.StringIO()
printer = self.Printer(self._print_format, out=buf_old)
printer.PrintSingleRecord(old)
# Fill a buffer with the object as rendered after the change.
buf_new = io.StringIO()
printer = self.Printer(self._print_format, out=buf_new)
printer.PrintSingleRecord(new)
lines_old = ''
lines_new = ''
# Send these two buffers to the ndiff() function for printing.
if old is not None:
lines_old = self._FormatYamlPrinterLinesForDryRunDescribe(
buf_old.getvalue().split('\n'))
if new is not None:
lines_new = self._FormatYamlPrinterLinesForDryRunDescribe(
buf_new.getvalue().split('\n'))
lines_diff = difflib.ndiff(lines_old, lines_new)
empty_line_pattern = re.compile(r'^\s*$')
empty_config_pattern = re.compile(r'^(\+|-)\s+\{\}$')
for line in lines_diff:
# We want to show the entire contents of resource, but without the
# additional information added by ndiff, which always leads with '?'. We
# also don't want to show empty lines produced from comparing unset
# fields, as well as lines produced from comparing empty messages, which
# will look like '+ {}' or '- {}'.
if line and line[0] != '?' and not empty_line_pattern.match(
line) and not empty_config_pattern.match(line):
print(line)
def _AddRecord(self, record, delimit=False):
"""Immediately prints the first two columns of record as a unified diff.
Records with less than 2 columns are silently ignored.
Args:
record: A JSON-serializable object.
delimit: Prints resource delimiters if True.
"""
title = self.attributes.get('title')
if title:
self._out.Print(title)
self._title = None
if len(record) > 1:
self._Diff(record[0], record[1])
def _FormatYamlPrinterLinesForDryRunDescribe(self, lines):
"""Tweak yaml printer formatted resources for ACM's dry run describe output.
Args:
lines: yaml printer formatted strings
Returns:
lines with no '-' prefix for yaml array elements.
"""
return [line.replace('-', ' ', 1) for line in lines]
class Error(exceptions.Error):
"""Exceptions for this module."""
class UnknownFormatError(Error):
"""Unknown format name exception."""
_FORMATTERS = {
'default': yaml_printer.YamlPrinter,
'diff': ACMDiffPrinter,
'yaml': yaml_printer.YamlPrinter,
}
def Print(resources, print_format, out=None, defaults=None, single=False):
"""Prints the given resources.
Args:
resources: A singleton or list of JSON-serializable Python objects.
print_format: The _FORMATTER name with optional projection expression.
out: Output stream, log.out if None.
defaults: Optional resource_projection_spec.ProjectionSpec defaults.
single: If True then resources is a single item and not a list. For example,
use this to print a single object as JSON.
"""
printer = Printer(print_format, out=out, defaults=defaults)
# None means the printer is disabled.
if printer:
printer.Print(resources, single)
def Printer(print_format, out=None, defaults=None, console_attr=None):
"""Returns a resource printer given a format string.
Args:
print_format: The _FORMATTERS name with optional attributes and projection.
out: Output stream, log.out if None.
defaults: Optional resource_projection_spec.ProjectionSpec defaults.
console_attr: The console attributes for the output stream. Ignored by some
printers. If None then printers that require it will initialize it to
match out.
Raises:
UnknownFormatError: The print_format is invalid.
Returns:
An initialized ResourcePrinter class or None if printing is disabled.
"""
projector = resource_projector.Compile(
expression=print_format,
defaults=resource_projection_spec.ProjectionSpec(
defaults=defaults, symbols=resource_transform.GetTransforms()))
printer_name = projector.Projection().Name()
if not printer_name:
# Do not print, do not consume resources.
return None
try:
printer_class = _FORMATTERS[printer_name]
except KeyError:
raise UnknownFormatError("""\
Format for acm_printer must be one of {0}; received [{1}].
""".format(', '.join(SupportedFormats()), printer_name))
printer = printer_class(
out=out,
name=printer_name,
printer=Printer,
projector=projector,
console_attr=console_attr)
return printer
def SupportedFormats():
"""Returns a sorted list of supported format names."""
return sorted(_FORMATTERS)

View File

@@ -0,0 +1,120 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for Authorized Orgs Desc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.core import log
from googlecloudsdk.core import resources as core_resources
def _SetIfNotNone(field_name, field_value, obj, update_mask):
"""Sets specified field to the provided value and adds it to update mask.
Args:
field_name: The name of the field to set the value of.
field_value: The value to set the field to. If it is None, the field will
NOT be set.
obj: The object on which the value is to be set.
update_mask: The update mask to add this field to.
Returns:
True if the field was set and False otherwise.
"""
if field_value is not None:
setattr(obj, field_name, field_value)
update_mask.append(field_name)
return True
return False
class Client(object):
"""High-level API client for Authorized Orgs."""
def __init__(self, client=None, messages=None, version='v1'):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def Get(self, authorized_orgs_desc_ref):
return self.client.accessPolicies_authorizedOrgsDescs.Get(
self.messages
.AccesscontextmanagerAccessPoliciesAuthorizedOrgsDescsGetRequest(
name=authorized_orgs_desc_ref.RelativeName()))
def List(self, policy_ref, limit=None):
req = self.messages.AccesscontextmanagerAccessPoliciesAuthorizedOrgsDescsListRequest(
parent=policy_ref.RelativeName())
return list_pager.YieldFromList(
self.client.accessPolicies_authorizedOrgsDescs,
req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=None,
field='authorizedOrgsDescs')
def _ApplyPatch(self, authorized_orgs_desc_ref, authorized_orgs_desc,
update_mask):
"""Applies a PATCH to the provided Authorized Orgs Desc."""
m = self.messages
request_type = (
m.AccesscontextmanagerAccessPoliciesAuthorizedOrgsDescsPatchRequest)
request = request_type(
authorizedOrgsDesc=authorized_orgs_desc,
name=authorized_orgs_desc_ref.RelativeName(),
updateMask=','.join(update_mask),
)
operation = self.client.accessPolicies_authorizedOrgsDescs.Patch(request)
poller = util.OperationPoller(
self.client.accessPolicies_authorizedOrgsDescs, self.client.operations,
authorized_orgs_desc_ref)
operation_ref = core_resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))
def Patch(self, authorized_orgs_desc_ref, orgs=None):
"""Patch an authorized orgs desc.
Args:
authorized_orgs_desc_ref: AuthorizedOrgsDesc, reference to the
authorizedOrgsDesc to patch
orgs: list of str, the names of orgs ( 'organizations/...') or None if not
updating.
Returns:
AuthorizedOrgsDesc, the updated Authorized Orgs Desc.
"""
m = self.messages
authorized_orgs_desc = m.AuthorizedOrgsDesc()
update_mask = []
_SetIfNotNone('orgs', orgs, authorized_orgs_desc, update_mask)
# No update mask implies no fields were actually edited, so this is a no-op.
if not update_mask:
log.warning(
'The update specified results in an identical resource. Skipping request.'
)
return authorized_orgs_desc
return self._ApplyPatch(authorized_orgs_desc_ref, authorized_orgs_desc,
update_mask)

View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for access context manager cloud-bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import util
class Client(object):
"""Client for Access Context Manager Access cloud-bindings service."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE

View File

@@ -0,0 +1,112 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for access context manager levels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.core import resources
class Client(object):
def __init__(self, client=None, messages=None, version=None):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def List(self, policy_ref, limit=None):
req = (
self.messages.AccesscontextmanagerAccessPoliciesAccessLevelsListRequest(
parent=policy_ref.RelativeName()
)
)
return list_pager.YieldFromList(
self.client.accessPolicies_accessLevels,
req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=None,
field='accessLevels',
)
def Patch(
self,
level_ref,
description=None,
title=None,
basic_level_combine_function=None,
basic_level_conditions=None,
custom_level_expr=None,
):
"""Patch an access level.
Args:
level_ref: resources.Resource, reference to the level to patch
description: str, description of the level or None if not updating
title: str, title of the level or None if not updating
basic_level_combine_function: ZoneTypeValueValuesEnum, combine function
enum value of the level or None if not updating
basic_level_conditions: list of Condition, the conditions for a basic
level or None if not updating
custom_level_expr: the expression of the Custom level, or none if not
updating.
Returns:
AccessLevel, the updated access level
"""
level = self.messages.AccessLevel()
update_mask = []
if description is not None:
update_mask.append('description')
level.description = description
if title is not None:
update_mask.append('title')
level.title = title
if basic_level_combine_function is not None:
update_mask.append('basic.combiningFunction')
level.basic = level.basic or self.messages.BasicLevel()
level.basic.combiningFunction = basic_level_combine_function
if basic_level_conditions is not None:
update_mask.append('basic.conditions')
level.basic = level.basic or self.messages.BasicLevel()
level.basic.conditions = basic_level_conditions
if custom_level_expr is not None:
update_mask.append('custom')
level.custom = level.custom or self.messages.CustomLevel()
level.custom.expr = custom_level_expr
update_mask.sort() # For ease-of-testing
m = self.messages
request_type = m.AccesscontextmanagerAccessPoliciesAccessLevelsPatchRequest
request = request_type(
accessLevel=level,
name=level_ref.RelativeName(),
updateMask=','.join(update_mask),
)
operation = self.client.accessPolicies_accessLevels.Patch(request)
poller = util.OperationPoller(self.client.accessPolicies_accessLevels,
self.client.operations, level_ref)
operation_ref = resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))

View File

@@ -0,0 +1,80 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for access context manager policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.core import resources
class Client(object):
"""Client for Access Context Manager Access Policies service."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def List(self, organization_ref, limit=None):
req = self.messages.AccesscontextmanagerAccessPoliciesListRequest(
parent=organization_ref.RelativeName())
return list_pager.YieldFromList(
self.client.accessPolicies, req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=None,
field='accessPolicies')
def Patch(self, policy_ref, title=None):
"""Patch an access policy.
Args:
policy_ref: resources.Resource, reference to the policy to patch
title: str, title of the policy or None if not updating
Returns:
AccessPolicy, the updated access policy
"""
policy = self.messages.AccessPolicy()
update_mask = []
if title is not None:
update_mask.append('title')
policy.title = title
update_mask.sort() # For ease-of-testing
m = self.messages
request_type = m.AccesscontextmanagerAccessPoliciesPatchRequest
request = request_type(
accessPolicy=policy,
name=policy_ref.RelativeName(),
updateMask=','.join(update_mask),
)
operation = self.client.accessPolicies.Patch(request)
poller = waiter.CloudOperationPoller(self.client.accessPolicies,
self.client.operations)
poller = util.OperationPoller(
self.client.accessPolicies, self.client.operations, policy_ref)
operation_ref = resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))

View File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for Supported Permissions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
class Client(object):
"""High-level API client for Supported Permissions."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def List(self, page_size=100, limit=None):
"""Make API call to list VPC Service Controls supported permissions.
Args:
page_size: The page size to list.
limit: The maximum number of permissions to display.
Returns:
The list of VPC Service Controls supported permissions.
"""
req = self.messages.AccesscontextmanagerPermissionsListRequest()
return list_pager.YieldFromList(
self.client.permissions,
req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=page_size,
field='supportedPermissions',
)

View File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for Supported Services."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
class Client(object):
"""High-level API client for Supported Services."""
def __init__(self, client=None, messages=None, version='v1'):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def Get(self, supported_services_ref):
return self.client.services.Get(
self.messages.AccesscontextmanagerServicesGetRequest(
name=supported_services_ref.RelativeName()
)
)
def List(self, page_size=200, limit=None):
"""Make API call to list VPC Service Controls supported services.
Args:
page_size: The page size to list.
limit: The maximum number of services to display.
Returns:
The list of VPC Service Controls supported services
"""
req = self.messages.AccesscontextmanagerServicesListRequest()
return list_pager.YieldFromList(
self.client.services,
req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=page_size,
field='supportedServices',
)

View File

@@ -0,0 +1,51 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API utilities for access context manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import waiter
_API_NAME = 'accesscontextmanager'
def _GetDefaultVersion():
return apis.ResolveVersion(_API_NAME)
def GetMessages(version=None):
version = version or _GetDefaultVersion()
return apis.GetMessagesModule(_API_NAME, version)
def GetClient(version=None):
version = version or _GetDefaultVersion()
return apis.GetClientInstance(_API_NAME, version)
class OperationPoller(waiter.CloudOperationPoller):
def __init__(self, result_service, operation_service, resource_ref):
super(OperationPoller, self).__init__(result_service, operation_service)
self.resource_ref = resource_ref
def GetResult(self, operation):
del operation # Unused in GetResult
request_type = self.result_service.GetRequestType('Get')
return self.result_service.Get(request_type(
name=self.resource_ref.RelativeName()))

View File

@@ -0,0 +1,353 @@
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API library for VPC Service Controls Service Perimeters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.accesscontextmanager import util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.core import log
from googlecloudsdk.core import resources as core_resources
import six
def _SetIfNotNone(field_name, field_value, obj, update_mask):
"""Sets specified field to the provided value and adds it to update mask.
Args:
field_name: The name of the field to set the value of.
field_value: The value to set the field to. If it is None, the field will
NOT be set.
obj: The object on which the value is to be set.
update_mask: The update mask to add this field to.
Returns:
True if the field was set and False otherwise.
"""
if field_value is not None:
setattr(obj, field_name, field_value)
update_mask.append(field_name)
return True
return False
def _CreateServicePerimeterConfig(messages,
mask_prefix,
resources,
restricted_services,
levels,
vpc_allowed_services,
enable_vpc_accessible_services,
vpc_yaml_flag_used,
vpc_accessible_services_config=None,
ingress_policies=None,
egress_policies=None):
"""Returns a ServicePerimeterConfig and its update mask."""
config = messages.ServicePerimeterConfig()
mask = []
_SetIfNotNone('resources', resources, config, mask)
_SetIfNotNone('restrictedServices', restricted_services, config, mask)
_SetIfNotNone('ingressPolicies', ingress_policies, config, mask)
_SetIfNotNone('egressPolicies', egress_policies, config, mask)
if levels is not None:
mask.append('accessLevels')
level_names = []
for l in levels:
# If the caller supplies the levels as strings already, use them directly.
if isinstance(l, six.string_types):
level_names.append(l)
else:
# Otherwise, the caller needs to supply resource objects for Access
# Levels, and we extract the level name from those.
level_names.append(l.RelativeName())
config.accessLevels = level_names
if vpc_yaml_flag_used:
mask.append('vpcAccessibleServices')
config.vpcAccessibleServices = vpc_accessible_services_config
elif (
enable_vpc_accessible_services is not None
or vpc_allowed_services is not None
):
service_filter = messages.VpcAccessibleServices()
service_filter_mask = []
_SetIfNotNone('allowedServices', vpc_allowed_services, service_filter,
service_filter_mask)
_SetIfNotNone('enableRestriction', enable_vpc_accessible_services,
service_filter, service_filter_mask)
config.vpcAccessibleServices = service_filter
mask.extend(['vpcAccessibleServices.' + m for m in service_filter_mask])
if not mask:
return None, []
return config, ['{}.{}'.format(mask_prefix, item) for item in mask]
class Client(object):
"""High-level API client for VPC Service Controls Service Perimeters."""
def __init__(self, client=None, messages=None, version='v1'):
self.client = client or util.GetClient(version=version)
self.messages = messages or self.client.MESSAGES_MODULE
def Get(self, zone_ref):
return self.client.accessPolicies_servicePerimeters.Get(
self.messages
.AccesscontextmanagerAccessPoliciesServicePerimetersGetRequest(
name=zone_ref.RelativeName()))
def List(self, policy_ref, limit=None):
req = self.messages.AccesscontextmanagerAccessPoliciesServicePerimetersListRequest(
parent=policy_ref.RelativeName())
return list_pager.YieldFromList(
self.client.accessPolicies_servicePerimeters,
req,
limit=limit,
batch_size_attribute='pageSize',
batch_size=None,
field='servicePerimeters')
def Commit(self, policy_ref, etag):
commit_req = self.messages.CommitServicePerimetersRequest(etag=etag)
req = self.messages.AccesscontextmanagerAccessPoliciesServicePerimetersCommitRequest(
parent=policy_ref.RelativeName(),
commitServicePerimetersRequest=commit_req)
operation = self.client.accessPolicies_servicePerimeters.Commit(req)
poller = waiter.CloudOperationPollerNoResources(self.client.operations)
operation_ref = core_resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for COMMIT operation [{}]'.format(operation_ref.Name()))
def _ApplyPatch(self, perimeter_ref, perimeter, update_mask):
"""Applies a PATCH to the provided Service Perimeter."""
m = self.messages
update_mask = sorted(update_mask) # For ease-of-testing
request_type = (
m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest)
request = request_type(
servicePerimeter=perimeter,
name=perimeter_ref.RelativeName(),
updateMask=','.join(update_mask),
)
operation = self.client.accessPolicies_servicePerimeters.Patch(request)
poller = util.OperationPoller(self.client.accessPolicies_servicePerimeters,
self.client.operations, perimeter_ref)
operation_ref = core_resources.REGISTRY.Parse(
operation.name, collection='accesscontextmanager.operations')
return waiter.WaitFor(
poller, operation_ref,
'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))
def Patch(
self,
perimeter_ref,
description=None,
title=None,
perimeter_type=None,
resources=None,
restricted_services=None,
levels=None,
vpc_allowed_services=None,
enable_vpc_accessible_services=None,
vpc_yaml_flag_used=False,
vpc_accessible_services_config=None,
ingress_policies=None,
egress_policies=None,
etag=None,
):
"""Patch a service perimeter.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch
description: str, description of the zone or None if not updating
title: str, title of the zone or None if not updating
perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
or None if not updating
resources: list of str, the names of resources (for now, just
'projects/...') in the zone or None if not updating.
restricted_services: list of str, the names of services
('example.googleapis.com') that *are* restricted by the access zone or
None if not updating.
levels: list of Resource, the access levels (in the same policy) that must
be satisfied for calls into this zone or None if not updating.
vpc_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to be made within the
access zone, or None if not updating.
enable_vpc_accessible_services: bool, whether to restrict the set of APIs
callable within the access zone, or None if not updating.
vpc_yaml_flag_used: bool, whether the vpc yaml flag was used.
vpc_accessible_services_config: VpcAccessibleServices, or None if not
updating.
ingress_policies: list of IngressPolicy, or None if not updating.
egress_policies: list of EgressPolicy, or None if not updating.
etag: str, the optional etag for the version of the Perimeter that
this operation is to be performed on.
Returns:
ServicePerimeter, the updated Service Perimeter.
"""
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = []
_SetIfNotNone('title', title, perimeter, update_mask)
_SetIfNotNone('description', description, perimeter, update_mask)
_SetIfNotNone('perimeterType', perimeter_type, perimeter, update_mask)
_SetIfNotNone('etag', etag, perimeter, update_mask)
config, config_mask_additions = _CreateServicePerimeterConfig(
messages=m,
mask_prefix='status',
resources=resources,
restricted_services=restricted_services,
levels=levels,
vpc_allowed_services=vpc_allowed_services,
enable_vpc_accessible_services=enable_vpc_accessible_services,
vpc_yaml_flag_used=vpc_yaml_flag_used,
vpc_accessible_services_config=vpc_accessible_services_config,
ingress_policies=ingress_policies,
egress_policies=egress_policies)
perimeter.status = config
update_mask.extend(config_mask_additions)
# No update mask implies no fields were actually edited, so this is a no-op.
if not update_mask:
log.warning(
'The update specified results in an identical resource. Skipping request.'
)
return perimeter
return self._ApplyPatch(perimeter_ref, perimeter, update_mask)
def PatchDryRunConfig(
self,
perimeter_ref,
description=None,
title=None,
perimeter_type=None,
resources=None,
restricted_services=None,
levels=None,
vpc_allowed_services=None,
enable_vpc_accessible_services=None,
vpc_yaml_flag_used=False,
vpc_accessible_services_config=None,
ingress_policies=None,
egress_policies=None,
etag=None,
):
"""Patch the dry-run config (spec) for a Service Perimeter.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch
description: str, description of the zone or None if not updating
title: str, title of the zone or None if not updating
perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
or None if not updating
resources: list of str, the names of resources (for now, just
'projects/...') in the zone or None if not updating.
restricted_services: list of str, the names of services
('example.googleapis.com') that *are* restricted by the access zone or
None if not updating.
levels: list of Resource, the access levels (in the same policy) that must
be satisfied for calls into this zone or None if not updating.
vpc_allowed_services: list of str, the names of services
('example.googleapis.com') that *are* allowed to be made within the
access zone, or None if not updating.
enable_vpc_accessible_services: bool, whether to restrict the set of APIs
callable within the access zone, or None if not updating.
vpc_yaml_flag_used: bool, whether the vpc yaml flag was used.
vpc_accessible_services_config: VpcAccessibleServices, or None if not
updating.
ingress_policies: list of IngressPolicy, or None if not updating.
egress_policies: list of EgressPolicy, or None if not updating.
etag: str, the optional etag for the version of the Perimeter that
this operation is to be performed on.
Returns:
ServicePerimeter, the updated Service Perimeter.
"""
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = []
if _SetIfNotNone('title', title, perimeter, update_mask):
perimeter.name = perimeter_ref.RelativeName() # Necessary for upsert.
update_mask.append('name')
_SetIfNotNone('description', description, perimeter, update_mask)
_SetIfNotNone('perimeterType', perimeter_type, perimeter, update_mask)
_SetIfNotNone('etag', etag, perimeter, update_mask)
config, config_mask_additions = _CreateServicePerimeterConfig(
messages=m,
mask_prefix='spec',
resources=resources,
restricted_services=restricted_services,
levels=levels,
vpc_allowed_services=vpc_allowed_services,
enable_vpc_accessible_services=enable_vpc_accessible_services,
vpc_yaml_flag_used=vpc_yaml_flag_used,
vpc_accessible_services_config=vpc_accessible_services_config,
ingress_policies=ingress_policies,
egress_policies=egress_policies)
perimeter.spec = config
update_mask.extend(config_mask_additions)
perimeter.useExplicitDryRunSpec = True
update_mask.append('useExplicitDryRunSpec')
return self._ApplyPatch(perimeter_ref, perimeter, update_mask)
def EnforceDryRunConfig(self, perimeter_ref):
"""Promotes a Service Perimeter's dry-run config to enforcement config.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch
Returns:
ServicePerimeter, the updated Service Perimeter.
"""
original_perimeter = self.Get(perimeter_ref)
m = self.messages
perimeter = m.ServicePerimeter()
update_mask = ['status', 'spec', 'useExplicitDryRunSpec']
perimeter.status = original_perimeter.spec
perimeter.spec = None
perimeter.useExplicitDryRunSpec = False
return self._ApplyPatch(perimeter_ref, perimeter, update_mask)
def UnsetSpec(self, perimeter_ref, use_explicit_dry_run_spec):
"""Unsets the spec for a Service Perimeter.
Args:
perimeter_ref: resources.Resource, reference to the perimeter to patch.
use_explicit_dry_run_spec: The value to use for the perimeter field of the
same name.
Returns:
ServicePerimeter, the updated Service Perimeter.
"""
perimeter = self.messages.ServicePerimeter()
perimeter.useExplicitDryRunSpec = use_explicit_dry_run_spec
perimeter.spec = None
update_mask = ['spec', 'useExplicitDryRunSpec']
return self._ApplyPatch(perimeter_ref, perimeter, update_mask)

View File

@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API utilities for `gcloud active-directory` commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
API_VERSION_FOR_TRACK = {
base.ReleaseTrack.BETA: 'v1beta1',
base.ReleaseTrack.ALPHA: 'v1alpha1'
}
def Client(api_version):
"""Creates a managedidentities client."""
return apis.GetClientInstance('managedidentities', api_version)
def Messages(api_version):
"""Messages for the managedidentities API."""
return apis.GetMessagesModule('managedidentities', api_version)

View File

@@ -0,0 +1,25 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for user-visible error exceptions to raise in the CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class ActiveDirectoryError(exceptions.Error):
"""Generic managedidentities error."""

View File

@@ -0,0 +1,133 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for querying custom jobs in AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.export import util as export_util
from googlecloudsdk.core.console import console_io
class CustomJobsClient(object):
"""Client used for interacting with CustomJob endpoint."""
def __init__(self, version=constants.GA_VERSION):
client = apis.GetClientInstance(constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self._messages = client.MESSAGES_MODULE
self._version = version
self._service = client.projects_locations_customJobs
self._message_prefix = constants.AI_PLATFORM_MESSAGE_PREFIX[version]
def GetMessage(self, message_name):
"""Returns the API message class by name."""
return getattr(
self._messages,
'{prefix}{name}'.format(prefix=self._message_prefix,
name=message_name), None)
def CustomJobMessage(self):
"""Retures the CustomJob resource message."""
return self.GetMessage('CustomJob')
def Create(self,
parent,
job_spec,
display_name=None,
kms_key_name=None,
labels=None):
"""Constructs a request and sends it to the endpoint to create a custom job instance.
Args:
parent: str, The project resource path of the custom job to create.
job_spec: The CustomJobSpec message instance for the job creation request.
display_name: str, The display name of the custom job to create.
kms_key_name: A customer-managed encryption key to use for the custom job.
labels: LabelValues, map-like user-defined metadata to organize the custom
jobs.
Returns:
A CustomJob message instance created.
"""
custom_job = self.CustomJobMessage()(
displayName=display_name, jobSpec=job_spec)
if kms_key_name is not None:
custom_job.encryptionSpec = self.GetMessage('EncryptionSpec')(
kmsKeyName=kms_key_name)
if labels:
custom_job.labels = labels
if self._version == constants.BETA_VERSION:
return self._service.Create(
self._messages.AiplatformProjectsLocationsCustomJobsCreateRequest(
parent=parent, googleCloudAiplatformV1beta1CustomJob=custom_job))
else:
return self._service.Create(
self._messages.AiplatformProjectsLocationsCustomJobsCreateRequest(
parent=parent, googleCloudAiplatformV1CustomJob=custom_job))
def List(self, limit=None, region=None):
return list_pager.YieldFromList(
self._service,
self._messages.AiplatformProjectsLocationsCustomJobsListRequest(
parent=region),
field='customJobs',
batch_size_attribute='pageSize',
limit=limit)
def Get(self, name):
request = self._messages.AiplatformProjectsLocationsCustomJobsGetRequest(
name=name)
return self._service.Get(request)
def Cancel(self, name):
request = self._messages.AiplatformProjectsLocationsCustomJobsCancelRequest(
name=name)
return self._service.Cancel(request)
def CheckJobComplete(self, name):
"""Returns a function to decide if log fetcher should continue polling.
Args:
name: String id of job.
Returns:
A one-argument function decides if log fetcher should continue.
"""
request = self._messages.AiplatformProjectsLocationsCustomJobsGetRequest(
name=name)
response = self._service.Get(request)
def ShouldContinue(periods_without_logs):
if periods_without_logs <= 1:
return True
return response.endTime is None
return ShouldContinue
def ImportResourceMessage(self, yaml_file, message_name):
"""Import a messages class instance typed by name from a YAML file."""
data = console_io.ReadFromFileOrStdin(yaml_file, binary=False)
message_type = self.GetMessage(message_name)
return export_util.Import(message_type=message_type, stream=data)

View File

@@ -0,0 +1,222 @@
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with AI Platform deployment resource pools API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import flags
class DeploymentResourcePoolsClient(object):
"""High-level client for the AI Platform deployment resource pools surface."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version]
)
self.messages = messages or self.client.MESSAGES_MODULE
def CreateBeta(
self,
location_ref,
deployment_resource_pool_id,
autoscaling_metric_specs=None,
accelerator_dict=None,
min_replica_count=None,
max_replica_count=None,
machine_type=None,
tpu_topology=None,
multihost_gpu_node_count=None,
reservation_affinity=None,
spot=False,
required_replica_count=None,
):
"""Creates a new deployment resource pool using v1beta1 API.
Args:
location_ref: Resource, the parsed location to create a deployment
resource pool.
deployment_resource_pool_id: str, The ID to use for the
DeploymentResourcePool, which will become the final component of the
DeploymentResourcePool's resource name.
autoscaling_metric_specs: dict or None, the metric specification that
defines the target resource utilization for calculating the desired
replica count.
accelerator_dict: dict or None, the accelerator attached to the deployment
resource pool from args.
min_replica_count: int or None, The minimum number of machine replicas
this deployment resource pool will be always deployed on. This value
must be greater than or equal to 1.
max_replica_count: int or None, The maximum number of replicas this
deployment resource pool may be deployed on when the traffic against it
increases.
machine_type: str or None, Immutable. The type of the machine.
tpu_topology: str or None, the topology of the TPU to serve the model.
multihost_gpu_node_count: int or None, the number of nodes per replica for
multihost GPU deployments.
reservation_affinity: dict or None, the reservation affinity of the
deployed model which specifies which reservations the deployed model can
use.
spot: bool, whether or not deploy the model on spot resources.
required_replica_count: int or None, The required number of replicas this
deployment resource pool will be considered successfully deployed. This
value must be greater than or equal to 1 and less than or equal to
min_replica_count.
Returns:
A long-running operation for Create.
"""
machine_spec = self.messages.GoogleCloudAiplatformV1beta1MachineSpec()
if machine_type is not None:
machine_spec.machineType = machine_type
if tpu_topology is not None:
machine_spec.tpuTopology = tpu_topology
if multihost_gpu_node_count is not None:
machine_spec.multihostGpuNodeCount = multihost_gpu_node_count
accelerator = flags.ParseAcceleratorFlag(
accelerator_dict, constants.BETA_VERSION
)
if accelerator is not None:
machine_spec.acceleratorType = accelerator.acceleratorType
machine_spec.acceleratorCount = accelerator.acceleratorCount
if reservation_affinity is not None:
machine_spec.reservationAffinity = flags.ParseReservationAffinityFlag(
reservation_affinity, constants.BETA_VERSION
)
dedicated = self.messages.GoogleCloudAiplatformV1beta1DedicatedResources(
machineSpec=machine_spec, spot=spot
)
dedicated.minReplicaCount = min_replica_count or 1
if max_replica_count is not None:
dedicated.maxReplicaCount = max_replica_count
if required_replica_count is not None:
dedicated.requiredReplicaCount = required_replica_count
if autoscaling_metric_specs is not None:
autoscaling_metric_specs_list = []
for name, target in sorted(autoscaling_metric_specs.items()):
autoscaling_metric_specs_list.append(
self.messages.GoogleCloudAiplatformV1beta1AutoscalingMetricSpec(
metricName=constants.OP_AUTOSCALING_METRIC_NAME_MAPPER[name],
target=target
)
)
dedicated.autoscalingMetricSpecs = autoscaling_metric_specs_list
pool = self.messages.GoogleCloudAiplatformV1beta1DeploymentResourcePool(
dedicatedResources=dedicated
)
pool_request = self.messages.GoogleCloudAiplatformV1beta1CreateDeploymentResourcePoolRequest(
deploymentResourcePool=pool,
deploymentResourcePoolId=deployment_resource_pool_id
)
req = self.messages.AiplatformProjectsLocationsDeploymentResourcePoolsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1CreateDeploymentResourcePoolRequest=pool_request
)
operation = self.client.projects_locations_deploymentResourcePools.Create(
req
)
return operation
def DeleteBeta(self, deployment_resource_pool_ref):
"""Deletes a deployment resource pool using v1beta1 API.
Args:
deployment_resource_pool_ref: str, The deployment resource pool to delete.
Returns:
A GoogleProtobufEmpty response message for delete.
"""
req = self.messages.AiplatformProjectsLocationsDeploymentResourcePoolsDeleteRequest(
name=deployment_resource_pool_ref.RelativeName()
)
operation = self.client.projects_locations_deploymentResourcePools.Delete(
req
)
return operation
def DescribeBeta(self, deployment_resource_pool_ref):
"""Describes a deployment resource pool using v1beta1 API.
Args:
deployment_resource_pool_ref: str, Deployment resource pool to describe.
Returns:
GoogleCloudAiplatformV1beta1DeploymentResourcePool response message.
"""
req = self.messages.AiplatformProjectsLocationsDeploymentResourcePoolsGetRequest(
name=deployment_resource_pool_ref.RelativeName()
)
response = self.client.projects_locations_deploymentResourcePools.Get(req)
return response
def ListBeta(self, location_ref):
"""Lists deployment resource pools using v1beta1 API.
Args:
location_ref: Resource, the parsed location to list deployment resource
pools.
Returns:
Nested attribute containing list of deployment resource pools.
"""
req = self.messages.AiplatformProjectsLocationsDeploymentResourcePoolsListRequest(
parent=location_ref.RelativeName()
)
return list_pager.YieldFromList(
self.client.projects_locations_deploymentResourcePools,
req,
field='deploymentResourcePools',
batch_size_attribute='pageSize'
)
def QueryDeployedModelsBeta(self, deployment_resource_pool_ref):
"""Queries deployed models sharing a specified deployment resource pool using v1beta1 API.
Args:
deployment_resource_pool_ref: str, Deployment resource pool to query.
Returns:
GoogleCloudAiplatformV1beta1QueryDeployedModelsResponse message.
"""
req = self.messages.AiplatformProjectsLocationsDeploymentResourcePoolsQueryDeployedModelsRequest(
deploymentResourcePool=deployment_resource_pool_ref.RelativeName()
)
response = self.client.projects_locations_deploymentResourcePools.QueryDeployedModels(
req
)
return response

View File

@@ -0,0 +1,87 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for streaming prediction results from the Vertex AI PredictionService API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.api_lib.util import apis
class PredictionStreamer(object):
"""Streams prediction responses using gRPC."""
def __init__(self, version):
self.client = apis.GetGapicClientInstance('aiplatform', version)
def StreamDirectPredict(
self,
endpoint,
inputs,
parameters,
):
"""Streams prediction results from the Cloud Vertex AI PredictionService API.
Args:
endpoint: The name of the endpoint to stream predictions from.
inputs: The inputs to send to the endpoint.
parameters: The parameters to send to the endpoint.
Yields:
Streamed prediction results.
"""
# Construct the request.
request = self.client.types.StreamDirectPredictRequest(endpoint=endpoint)
for curr_input in inputs:
request.inputs.append(
self.client.types.Tensor.from_json(json.dumps(curr_input))
)
request.parameters = self.client.types.Tensor.from_json(
json.dumps(parameters)
)
for prediction in self.client.prediction.stream_direct_predict(
iter([request])
):
yield prediction
def StreamDirectRawPredict(
self,
endpoint,
method_name,
input,
):
"""Streams prediction results from the Cloud Vertex AI PredictionService API.
Args:
endpoint: The name of the endpoint to stream predictions from.
method_name: The name of the method to call.
input: The input bytes to send to the endpoint.
Yields:
Streamed prediction results.
"""
# Construct the request.
request = self.client.types.StreamDirectRawPredictRequest(
endpoint=endpoint, method_name=method_name, input=input
)
for prediction in self.client.prediction.stream_direct_raw_predict(
iter([request])
):
yield prediction

View File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for querying hptuning-jobs in AI platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import messages as messages_util
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.core import yaml
def GetAlgorithmEnum(version=constants.BETA_VERSION):
messages = apis.GetMessagesModule(constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
if version == constants.GA_VERSION:
return messages.GoogleCloudAiplatformV1StudySpec.AlgorithmValueValuesEnum
else:
return messages.GoogleCloudAiplatformV1beta1StudySpec.AlgorithmValueValuesEnum
class HpTuningJobsClient(object):
"""Client used for interacting with HyperparameterTuningJob endpoint."""
def __init__(self, version):
client = apis.GetClientInstance(constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self._messages = client.MESSAGES_MODULE
self._service = client.projects_locations_hyperparameterTuningJobs
self.version = version
self._message_prefix = constants.AI_PLATFORM_MESSAGE_PREFIX[version]
def _GetMessage(self, message_name):
"""Returns the API messsages class by name."""
return getattr(
self._messages,
'{prefix}{name}'.format(prefix=self._message_prefix,
name=message_name), None)
def HyperparameterTuningJobMessage(self):
"""Returns the HyperparameterTuningJob resource message."""
return self._GetMessage('HyperparameterTuningJob')
def AlgorithmEnum(self):
"""Returns enum message representing Algorithm."""
return self._GetMessage('StudySpec').AlgorithmValueValuesEnum
def Create(
self,
config_path,
display_name,
parent=None,
max_trial_count=None,
parallel_trial_count=None,
algorithm=None,
kms_key_name=None,
network=None,
service_account=None,
enable_web_access=False,
enable_dashboard_access=False,
labels=None):
"""Creates a hyperparameter tuning job with given parameters.
Args:
config_path: str, the file path of the hyperparameter tuning job
configuration.
display_name: str, the display name of the created hyperparameter tuning
job.
parent: str, parent of the created hyperparameter tuning job. e.g.
/projects/xxx/locations/xxx/
max_trial_count: int, the desired total number of Trials. The default
value is 1.
parallel_trial_count: int, the desired number of Trials to run in
parallel. The default value is 1.
algorithm: AlgorithmValueValuesEnum, the search algorithm specified for
the Study.
kms_key_name: str, A customer-managed encryption key to use for the
hyperparameter tuning job.
network: str, user network to which the job should be peered with
(overrides yaml file)
service_account: str, A service account (email address string) to use for
the job.
enable_web_access: bool, Whether to enable the interactive shell for the
job.
enable_dashboard_access: bool, Whether to enable the dashboard defined for
the job.
labels: LabelsValues, map-like user-defined metadata to organize the
hp-tuning jobs.
Returns:
Created hyperparameter tuning job.
"""
job_spec = self.HyperparameterTuningJobMessage()
if config_path:
data = yaml.load_path(config_path)
if data:
job_spec = messages_util.DictToMessageWithErrorCheck(
data, self.HyperparameterTuningJobMessage())
if not job_spec.maxTrialCount and not max_trial_count:
job_spec.maxTrialCount = 1
elif max_trial_count:
job_spec.maxTrialCount = max_trial_count
if not job_spec.parallelTrialCount and not parallel_trial_count:
job_spec.parallelTrialCount = 1
elif parallel_trial_count:
job_spec.parallelTrialCount = parallel_trial_count
if network:
job_spec.trialJobSpec.network = network
if service_account:
job_spec.trialJobSpec.serviceAccount = service_account
if enable_web_access:
job_spec.trialJobSpec.enableWebAccess = enable_web_access
if enable_dashboard_access:
job_spec.trialJobSpec.enableDashboardAccess = enable_dashboard_access
if display_name:
job_spec.displayName = display_name
if algorithm and job_spec.studySpec:
job_spec.studySpec.algorithm = algorithm
if kms_key_name is not None:
job_spec.encryptionSpec = self._GetMessage('EncryptionSpec')(
kmsKeyName=kms_key_name)
if labels:
job_spec.labels = labels
if self.version == constants.GA_VERSION:
request = self._messages.AiplatformProjectsLocationsHyperparameterTuningJobsCreateRequest(
parent=parent,
googleCloudAiplatformV1HyperparameterTuningJob=job_spec)
else:
request = self._messages.AiplatformProjectsLocationsHyperparameterTuningJobsCreateRequest(
parent=parent,
googleCloudAiplatformV1beta1HyperparameterTuningJob=job_spec)
return self._service.Create(request)
def Get(self, name=None):
request = self._messages.AiplatformProjectsLocationsHyperparameterTuningJobsGetRequest(
name=name)
return self._service.Get(request)
def Cancel(self, name=None):
request = self._messages.AiplatformProjectsLocationsHyperparameterTuningJobsCancelRequest(
name=name)
return self._service.Cancel(request)
def List(self, limit=None, region=None):
return list_pager.YieldFromList(
self._service,
self._messages
.AiplatformProjectsLocationsHyperparameterTuningJobsListRequest(
parent=region),
field='hyperparameterTuningJobs',
batch_size_attribute='pageSize',
limit=limit)
def CheckJobComplete(self, name):
"""Returns a function to decide if log fetcher should continue polling.
Args:
name: String id of job.
Returns:
A one-argument function decides if log fetcher should continue.
"""
request = self._messages.AiplatformProjectsLocationsHyperparameterTuningJobsGetRequest(
name=name)
response = self._service.Get(request)
def ShouldContinue(periods_without_logs):
if periods_without_logs <= 1:
return True
return response.endTime is None
return ShouldContinue

View File

@@ -0,0 +1,518 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with AI Platform index endpoints API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
def _ParseIndex(index_id, location_id):
"""Parses a index ID into a index resource object."""
return resources.REGISTRY.Parse(
index_id,
params={
'locationsId': location_id,
'projectsId': properties.VALUES.core.project.GetOrFail
},
collection='aiplatform.projects.locations.indexes')
class IndexEndpointsClient(object):
"""High-level client for the AI Platform index endpoints surface."""
def __init__(self, client=None, messages=None, version=constants.GA_VERSION):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_indexEndpoints
def CreateBeta(self, location_ref, args):
"""Create a new index endpoint."""
labels = labels_util.ParseCreateArgs(
args,
self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint.LabelsValue)
encryption_spec = None
if args.encryption_kms_key_name is not None:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec(
kmsKeyName=args.encryption_kms_key_name))
private_service_connect_config = None
if args.enable_private_service_connect:
private_service_connect_config = (
self.messages.GoogleCloudAiplatformV1beta1PrivateServiceConnectConfig(
enablePrivateServiceConnect=args.enable_private_service_connect,
projectAllowlist=(args.project_allowlist
if args.project_allowlist else [])
)
)
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1IndexEndpoint=self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint(
displayName=args.display_name,
description=args.description,
publicEndpointEnabled=args.public_endpoint_enabled,
labels=labels,
encryptionSpec=encryption_spec,
privateServiceConnectConfig=private_service_connect_config,
),
)
elif args.network is not None:
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1IndexEndpoint=self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint(
displayName=args.display_name,
description=args.description,
network=args.network,
labels=labels,
encryptionSpec=encryption_spec,
),
)
else:
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1IndexEndpoint=self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint(
displayName=args.display_name,
description=args.description,
publicEndpointEnabled=True,
labels=labels,
encryptionSpec=encryption_spec,
privateServiceConnectConfig=private_service_connect_config,
),
)
return self._service.Create(req)
def Create(self, location_ref, args):
"""Create a new v1 index endpoint."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1IndexEndpoint.LabelsValue)
encryption_spec = None
if args.encryption_kms_key_name is not None:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1EncryptionSpec(
kmsKeyName=args.encryption_kms_key_name))
private_service_connect_config = None
if args.enable_private_service_connect:
private_service_connect_config = (
self.messages.GoogleCloudAiplatformV1PrivateServiceConnectConfig(
enablePrivateServiceConnect=args.enable_private_service_connect,
projectAllowlist=(args.project_allowlist
if args.project_allowlist else []),
)
)
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1IndexEndpoint=self.messages.GoogleCloudAiplatformV1IndexEndpoint(
displayName=args.display_name,
description=args.description,
publicEndpointEnabled=args.public_endpoint_enabled,
labels=labels,
encryptionSpec=encryption_spec,
privateServiceConnectConfig=private_service_connect_config,
),
)
elif args.network is not None:
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1IndexEndpoint=self.messages.GoogleCloudAiplatformV1IndexEndpoint(
displayName=args.display_name,
description=args.description,
network=args.network,
labels=labels,
encryptionSpec=encryption_spec,
),
)
else:
req = self.messages.AiplatformProjectsLocationsIndexEndpointsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1IndexEndpoint=self.messages.GoogleCloudAiplatformV1IndexEndpoint(
displayName=args.display_name,
description=args.description,
publicEndpointEnabled=True,
labels=labels,
encryptionSpec=encryption_spec,
privateServiceConnectConfig=private_service_connect_config,
),
)
return self._service.Create(req)
def PatchBeta(self, index_endpoint_ref, args):
"""Update an index endpoint."""
index_endpoint = self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint()
update_mask = []
if args.display_name is not None:
index_endpoint.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
index_endpoint.description = args.description
update_mask.append('description')
def GetLabels():
return self.Get(index_endpoint_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args,
self.messages.GoogleCloudAiplatformV1beta1IndexEndpoint.LabelsValue,
GetLabels)
if labels_update.needs_update:
index_endpoint.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsIndexEndpointsPatchRequest(
name=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1beta1IndexEndpoint=index_endpoint,
updateMask=','.join(update_mask))
return self._service.Patch(request)
def Patch(self, index_endpoint_ref, args):
"""Update an v1 index endpoint."""
index_endpoint = self.messages.GoogleCloudAiplatformV1IndexEndpoint()
update_mask = []
if args.display_name is not None:
index_endpoint.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
index_endpoint.description = args.description
update_mask.append('description')
def GetLabels():
return self.Get(index_endpoint_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1IndexEndpoint.LabelsValue,
GetLabels)
if labels_update.needs_update:
index_endpoint.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsIndexEndpointsPatchRequest(
name=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1IndexEndpoint=index_endpoint,
updateMask=','.join(update_mask))
return self._service.Patch(request)
def DeployIndexBeta(self, index_endpoint_ref, args):
"""Deploy an index to an index endpoint."""
index_ref = _ParseIndex(args.index, args.region)
deployed_index = self.messages.GoogleCloudAiplatformV1beta1DeployedIndex(
displayName=args.display_name,
id=args.deployed_index_id,
index=index_ref.RelativeName(),
)
if args.reserved_ip_ranges is not None:
deployed_index.reservedIpRanges.extend(args.reserved_ip_ranges)
if args.deployment_group is not None:
deployed_index.deploymentGroup = args.deployment_group
if args.deployment_tier:
deployed_index.deploymentTier = self.messages.GoogleCloudAiplatformV1beta1DeployedIndex.DeploymentTierValueValuesEnum(
args.deployment_tier.upper())
if args.enable_access_logging is not None:
deployed_index.enableAccessLogging = args.enable_access_logging
if args.audiences is not None and args.allowed_issuers is not None:
auth_provider = self.messages.GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider()
auth_provider.audiences.extend(args.audiences)
auth_provider.allowedIssuers.extend(args.allowed_issuers)
deployed_index.deployedIndexAuthConfig = (
self.messages.GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig(
authProvider=auth_provider))
if args.machine_type is not None:
dedicated_resources = (
self.messages.GoogleCloudAiplatformV1beta1DedicatedResources()
)
dedicated_resources.machineSpec = (
self.messages.GoogleCloudAiplatformV1beta1MachineSpec(
machineType=args.machine_type
)
)
if args.min_replica_count is not None:
dedicated_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
dedicated_resources.maxReplicaCount = args.max_replica_count
deployed_index.dedicatedResources = dedicated_resources
else:
automatic_resources = (
self.messages.GoogleCloudAiplatformV1beta1AutomaticResources()
)
if args.min_replica_count is not None:
automatic_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
automatic_resources.maxReplicaCount = args.max_replica_count
deployed_index.automaticResources = automatic_resources
deploy_index_req = self.messages.GoogleCloudAiplatformV1beta1DeployIndexRequest(
deployedIndex=deployed_index)
request = self.messages.AiplatformProjectsLocationsIndexEndpointsDeployIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1beta1DeployIndexRequest=deploy_index_req)
return self._service.DeployIndex(request)
def DeployIndex(self, index_endpoint_ref, args):
"""Deploy an v1 index to an index endpoint."""
index_ref = _ParseIndex(args.index, args.region)
deployed_index = self.messages.GoogleCloudAiplatformV1DeployedIndex(
displayName=args.display_name,
id=args.deployed_index_id,
index=index_ref.RelativeName(),
enableAccessLogging=args.enable_access_logging
)
if args.reserved_ip_ranges is not None:
deployed_index.reservedIpRanges.extend(args.reserved_ip_ranges)
if args.deployment_group is not None:
deployed_index.deploymentGroup = args.deployment_group
if args.deployment_tier:
deployed_index.deploymentTier = self.messages.GoogleCloudAiplatformV1DeployedIndex.DeploymentTierValueValuesEnum(
args.deployment_tier.upper())
# JWT Authentication config
if args.audiences is not None and args.allowed_issuers is not None:
auth_provider = self.messages.GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider()
auth_provider.audiences.extend(args.audiences)
auth_provider.allowedIssuers.extend(args.allowed_issuers)
deployed_index.deployedIndexAuthConfig = (
self.messages.GoogleCloudAiplatformV1DeployedIndexAuthConfig(
authProvider=auth_provider))
# PSC automation configs
if args.psc_automation_configs is not None:
deployed_index.pscAutomationConfigs = []
for psc_automation_config in args.psc_automation_configs:
deployed_index.pscAutomationConfigs.append(
self.messages.GoogleCloudAiplatformV1PSCAutomationConfig(
projectId=psc_automation_config['project-id'],
network=psc_automation_config['network'],
)
)
if args.machine_type is not None:
dedicated_resources = (
self.messages.GoogleCloudAiplatformV1DedicatedResources()
)
dedicated_resources.machineSpec = (
self.messages.GoogleCloudAiplatformV1MachineSpec(
machineType=args.machine_type
)
)
if args.min_replica_count is not None:
dedicated_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
dedicated_resources.maxReplicaCount = args.max_replica_count
deployed_index.dedicatedResources = dedicated_resources
else:
automatic_resources = (
self.messages.GoogleCloudAiplatformV1AutomaticResources()
)
if args.min_replica_count is not None:
automatic_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
automatic_resources.maxReplicaCount = args.max_replica_count
deployed_index.automaticResources = automatic_resources
deploy_index_req = self.messages.GoogleCloudAiplatformV1DeployIndexRequest(
deployedIndex=deployed_index)
request = self.messages.AiplatformProjectsLocationsIndexEndpointsDeployIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1DeployIndexRequest=deploy_index_req)
return self._service.DeployIndex(request)
def UndeployIndexBeta(self, index_endpoint_ref, args):
"""Undeploy an index to an index endpoint."""
undeploy_index_req = self.messages.GoogleCloudAiplatformV1beta1UndeployIndexRequest(
deployedIndexId=args.deployed_index_id)
request = self.messages.AiplatformProjectsLocationsIndexEndpointsUndeployIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1beta1UndeployIndexRequest=undeploy_index_req)
return self._service.UndeployIndex(request)
def UndeployIndex(self, index_endpoint_ref, args):
"""Undeploy an v1 index to an index endpoint."""
undeploy_index_req = self.messages.GoogleCloudAiplatformV1UndeployIndexRequest(
deployedIndexId=args.deployed_index_id)
request = self.messages.AiplatformProjectsLocationsIndexEndpointsUndeployIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1UndeployIndexRequest=undeploy_index_req)
return self._service.UndeployIndex(request)
def MutateDeployedIndexBeta(self, index_endpoint_ref, args):
"""Mutate a deployed index from an index endpoint."""
deployed_index = self.messages.GoogleCloudAiplatformV1beta1DeployedIndex(
id=args.deployed_index_id,
enableAccessLogging=args.enable_access_logging,
)
if args.machine_type is not None:
deployed_index.dedicatedResources = self._GetDedicatedResourcesBeta(args)
else:
deployed_index.automaticResources = self._GetAutomaticResourcesBeta(args)
if args.reserved_ip_ranges is not None:
deployed_index.reservedIpRanges.extend(args.reserved_ip_ranges)
if args.deployment_group is not None:
deployed_index.deploymentGroup = args.deployment_group
if args.audiences is not None and args.allowed_issuers is not None:
auth_provider = self.messages.GoogleCloudAiplatformV1beta1DeployedIndexAuthConfigAuthProvider()
auth_provider.audiences.extend(args.audiences)
auth_provider.allowedIssuers.extend(args.allowed_issuers)
deployed_index.deployedIndexAuthConfig = (
self.messages.GoogleCloudAiplatformV1beta1DeployedIndexAuthConfig(
authProvider=auth_provider))
request = self.messages.AiplatformProjectsLocationsIndexEndpointsMutateDeployedIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1beta1DeployedIndex=deployed_index)
return self._service.MutateDeployedIndex(request)
def MutateDeployedIndex(self, index_endpoint_ref, args):
"""Mutate a deployed index from an index endpoint."""
deployed_index = self.messages.GoogleCloudAiplatformV1DeployedIndex(
id=args.deployed_index_id,
enableAccessLogging=args.enable_access_logging,
)
if args.machine_type is not None:
deployed_index.dedicatedResources = self._GetDedicatedResources(args)
else:
deployed_index.automaticResources = self._GetAutomaticResources(args)
if args.reserved_ip_ranges is not None:
deployed_index.reservedIpRanges.extend(args.reserved_ip_ranges)
if args.deployment_group is not None:
deployed_index.deploymentGroup = args.deployment_group
if args.audiences is not None and args.allowed_issuers is not None:
auth_provider = self.messages.GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider()
auth_provider.audiences.extend(args.audiences)
auth_provider.allowedIssuers.extend(args.allowed_issuers)
deployed_index.deployedIndexAuthConfig = (
self.messages.GoogleCloudAiplatformV1DeployedIndexAuthConfig(
authProvider=auth_provider))
request = self.messages.AiplatformProjectsLocationsIndexEndpointsMutateDeployedIndexRequest(
indexEndpoint=index_endpoint_ref.RelativeName(),
googleCloudAiplatformV1DeployedIndex=deployed_index)
return self._service.MutateDeployedIndex(request)
def Get(self, index_endpoint_ref):
request = self.messages.AiplatformProjectsLocationsIndexEndpointsGetRequest(
name=index_endpoint_ref.RelativeName())
return self._service.Get(request)
def List(self, limit=None, region_ref=None):
return list_pager.YieldFromList(
self._service,
self.messages.AiplatformProjectsLocationsIndexEndpointsListRequest(
parent=region_ref.RelativeName()),
field='indexEndpoints',
batch_size_attribute='pageSize',
limit=limit)
def Delete(self, index_endpoint_ref):
request = self.messages.AiplatformProjectsLocationsIndexEndpointsDeleteRequest(
name=index_endpoint_ref.RelativeName())
return self._service.Delete(request)
def _GetDedicatedResourcesBeta(self, args):
"""Construct dedicated resources for beta API."""
dedicated_resources = (
self.messages.GoogleCloudAiplatformV1beta1DedicatedResources()
)
dedicated_resources.machineSpec = (
self.messages.GoogleCloudAiplatformV1beta1MachineSpec(
machineType=args.machine_type
)
)
if args.min_replica_count is not None:
dedicated_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
dedicated_resources.maxReplicaCount = args.max_replica_count
return dedicated_resources
def _GetAutomaticResourcesBeta(self, args):
"""Construct automatic resources for beta API."""
automatic_resources = (
self.messages.GoogleCloudAiplatformV1beta1AutomaticResources()
)
if args.min_replica_count is not None:
automatic_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
automatic_resources.maxReplicaCount = args.max_replica_count
return automatic_resources
def _GetDedicatedResources(self, args):
"""Construct dedicated resources for GA API."""
dedicated_resources = (
self.messages.GoogleCloudAiplatformV1DedicatedResources()
)
dedicated_resources.machineSpec = (
self.messages.GoogleCloudAiplatformV1MachineSpec(
machineType=args.machine_type
)
)
if args.min_replica_count is not None:
dedicated_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
dedicated_resources.maxReplicaCount = args.max_replica_count
return dedicated_resources
def _GetAutomaticResources(self, args):
"""Construct automatic resources for GA API."""
automatic_resources = (
self.messages.GoogleCloudAiplatformV1AutomaticResources()
)
if args.min_replica_count is not None:
automatic_resources.minReplicaCount = args.min_replica_count
if args.max_replica_count is not None:
automatic_resources.maxReplicaCount = args.max_replica_count
return automatic_resources

View File

@@ -0,0 +1,313 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with AI Platform indexes API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import extra_types
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import messages as messages_util
from googlecloudsdk.calliope import exceptions as gcloud_exceptions
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import yaml
class IndexesClient(object):
"""High-level client for the AI Platform indexes surface."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_indexes
def _ReadIndexMetadata(self, metadata_file):
"""Parse json metadata file."""
if not metadata_file:
raise gcloud_exceptions.BadArgumentException(
'--metadata-file', 'Index metadata file must be specified.')
index_metadata = None
# Yaml is a superset of json, so parse json file as yaml.
data = yaml.load_path(metadata_file)
if data:
index_metadata = messages_util.DictToMessageWithErrorCheck(
data, extra_types.JsonValue)
return index_metadata
def Get(self, index_ref):
request = self.messages.AiplatformProjectsLocationsIndexesGetRequest(
name=index_ref.RelativeName())
return self._service.Get(request)
def List(self, limit=None, region_ref=None):
return list_pager.YieldFromList(
self._service,
self.messages.AiplatformProjectsLocationsIndexesListRequest(
parent=region_ref.RelativeName()),
field='indexes',
batch_size_attribute='pageSize',
limit=limit)
def CreateBeta(self, location_ref, args):
"""Create a new index."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1beta1Index.LabelsValue)
index_update_method = None
if args.index_update_method:
if args.index_update_method == 'stream-update':
index_update_method = (
self.messages.GoogleCloudAiplatformV1beta1Index.
IndexUpdateMethodValueValuesEnum.STREAM_UPDATE)
elif args.index_update_method == 'batch-update':
index_update_method = (
self.messages.GoogleCloudAiplatformV1beta1Index.
IndexUpdateMethodValueValuesEnum.BATCH_UPDATE)
else:
raise gcloud_exceptions.BadArgumentException(
'--index-update-method',
'Invalid index update method: {}'.format(args.index_update_method),
)
encryption_spec = None
if args.encryption_kms_key_name is not None:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec(
kmsKeyName=args.encryption_kms_key_name))
req = self.messages.AiplatformProjectsLocationsIndexesCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1Index=self.messages
.GoogleCloudAiplatformV1beta1Index(
displayName=args.display_name,
description=args.description,
metadata=self._ReadIndexMetadata(args.metadata_file),
labels=labels,
indexUpdateMethod=index_update_method,
encryptionSpec=encryption_spec
))
return self._service.Create(req)
def Create(self, location_ref, args):
"""Create a new v1 index."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1Index.LabelsValue)
index_update_method = None
if args.index_update_method:
if args.index_update_method == 'stream-update':
index_update_method = (
self.messages.GoogleCloudAiplatformV1Index
.IndexUpdateMethodValueValuesEnum.STREAM_UPDATE)
elif args.index_update_method == 'batch-update':
index_update_method = (
self.messages.GoogleCloudAiplatformV1Index.IndexUpdateMethodValueValuesEnum.BATCH_UPDATE
)
else:
raise gcloud_exceptions.BadArgumentException(
'--index-update-method',
'Invalid index update method: {}'.format(args.index_update_method),
)
encryption_spec = None
if args.encryption_kms_key_name is not None:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1EncryptionSpec(
kmsKeyName=args.encryption_kms_key_name))
req = self.messages.AiplatformProjectsLocationsIndexesCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1Index=self.messages.GoogleCloudAiplatformV1Index(
displayName=args.display_name,
description=args.description,
metadata=self._ReadIndexMetadata(args.metadata_file),
labels=labels,
indexUpdateMethod=index_update_method,
encryptionSpec=encryption_spec
))
return self._service.Create(req)
def PatchBeta(self, index_ref, args):
"""Update an index."""
index = self.messages.GoogleCloudAiplatformV1beta1Index()
update_mask = []
if args.metadata_file is not None:
index.metadata = self._ReadIndexMetadata(args.metadata_file)
update_mask.append('metadata')
else:
if args.display_name is not None:
index.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
index.description = args.description
update_mask.append('description')
def GetLabels():
return self.Get(index_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1beta1Index.LabelsValue,
GetLabels)
if labels_update.needs_update:
index.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsIndexesPatchRequest(
name=index_ref.RelativeName(),
googleCloudAiplatformV1beta1Index=index,
updateMask=','.join(update_mask))
return self._service.Patch(request)
def Patch(self, index_ref, args):
"""Update an v1 index."""
index = self.messages.GoogleCloudAiplatformV1Index()
update_mask = []
if args.metadata_file is not None:
index.metadata = self._ReadIndexMetadata(args.metadata_file)
update_mask.append('metadata')
else:
if args.display_name is not None:
index.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
index.description = args.description
update_mask.append('description')
def GetLabels():
return self.Get(index_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1Index.LabelsValue,
GetLabels)
if labels_update.needs_update:
index.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsIndexesPatchRequest(
name=index_ref.RelativeName(),
googleCloudAiplatformV1Index=index,
updateMask=','.join(update_mask))
return self._service.Patch(request)
def Delete(self, index_ref):
request = self.messages.AiplatformProjectsLocationsIndexesDeleteRequest(
name=index_ref.RelativeName())
return self._service.Delete(request)
def RemoveDatapointsBeta(self, index_ref, args):
"""Remove data points from a v1beta1 index."""
if args.datapoint_ids and args.datapoints_from_file:
raise errors.ArgumentError(
'datapoint_ids and datapoints_from_file can not be set'
' at the same time.'
)
if args.datapoint_ids:
req = self.messages.AiplatformProjectsLocationsIndexesRemoveDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1beta1RemoveDatapointsRequest=self.messages
.GoogleCloudAiplatformV1beta1RemoveDatapointsRequest(
datapointIds=args.datapoint_ids))
if args.datapoints_from_file:
data = yaml.load_path(args.datapoints_from_file)
req = self.messages.AiplatformProjectsLocationsIndexesRemoveDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1beta1RemoveDatapointsRequest=self.messages
.GoogleCloudAiplatformV1beta1RemoveDatapointsRequest(
datapointIds=data))
return self._service.RemoveDatapoints(req)
def RemoveDatapoints(self, index_ref, args):
"""Remove data points from a v1 index."""
if args.datapoint_ids and args.datapoints_from_file:
raise errors.ArgumentError(
'`--datapoint_ids` and `--datapoints_from_file` can not be set at the'
' same time.'
)
if args.datapoint_ids:
req = self.messages.AiplatformProjectsLocationsIndexesRemoveDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1RemoveDatapointsRequest=self.messages
.GoogleCloudAiplatformV1RemoveDatapointsRequest(
datapointIds=args.datapoint_ids))
if args.datapoints_from_file:
data = yaml.load_path(args.datapoints_from_file)
req = self.messages.AiplatformProjectsLocationsIndexesRemoveDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1RemoveDatapointsRequest=self.messages
.GoogleCloudAiplatformV1RemoveDatapointsRequest(
datapointIds=data))
return self._service.RemoveDatapoints(req)
def UpsertDatapointsBeta(self, index_ref, args):
"""Upsert data points from a v1beta1 index."""
datapoints = []
if args.datapoints_from_file:
data = yaml.load_path(args.datapoints_from_file)
for datapoint_json in data:
datapoint = messages_util.DictToMessageWithErrorCheck(
datapoint_json,
self.messages.GoogleCloudAiplatformV1beta1IndexDatapoint)
datapoints.append(datapoint)
update_mask = None
if args.update_mask:
update_mask = ','.join(args.update_mask)
req = self.messages.AiplatformProjectsLocationsIndexesUpsertDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1beta1UpsertDatapointsRequest=self.messages
.GoogleCloudAiplatformV1beta1UpsertDatapointsRequest(
datapoints=datapoints,
updateMask=update_mask))
return self._service.UpsertDatapoints(req)
def UpsertDatapoints(self, index_ref, args):
"""Upsert data points from a v1 index."""
datapoints = []
if args.datapoints_from_file:
data = yaml.load_path(args.datapoints_from_file)
for datapoint_json in data:
datapoint = messages_util.DictToMessageWithErrorCheck(
datapoint_json,
self.messages.GoogleCloudAiplatformV1IndexDatapoint)
datapoints.append(datapoint)
update_mask = None
if args.update_mask:
update_mask = ','.join(args.update_mask)
req = self.messages.AiplatformProjectsLocationsIndexesUpsertDatapointsRequest(
index=index_ref.RelativeName(),
googleCloudAiplatformV1UpsertDatapointsRequest=self.messages
.GoogleCloudAiplatformV1UpsertDatapointsRequest(
datapoints=datapoints,
updateMask=update_mask))
return self._service.UpsertDatapoints(req)

View File

@@ -0,0 +1,515 @@
# -*- coding: utf-8 -*- #
# Copyright 2024 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Vertex AI Model Garden APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import flags
_HF_WILDCARD_FILTER = 'is_hf_wildcard(true)'
_NATIVE_MODEL_FILTER = 'is_hf_wildcard(false)'
_VERIFIED_DEPLOYMENT_FILTER = (
'labels.VERIFIED_DEPLOYMENT_CONFIG=VERIFIED_DEPLOYMENT_SUCCEED'
)
def IsHuggingFaceModel(model_name: str) -> bool:
"""Returns whether the model is a Hugging Face model."""
return bool(re.match(r'^[^/]+/[^/@]+$', model_name))
def IsCustomWeightsModel(model: str) -> bool:
"""Returns whether the model is a custom weights model."""
return bool(re.match(r'^gs://', model))
def DeployCustomWeightsModel(
messages,
projects_locations_service,
model,
machine_type,
accelerator_type,
accelerator_count,
project,
location,
):
"""Deploys a custom weights model."""
deploy_request = messages.GoogleCloudAiplatformV1beta1DeployRequest()
deploy_request.customModel = (
messages.GoogleCloudAiplatformV1beta1DeployRequestCustomModel(
gcsUri=model
)
)
if machine_type:
deploy_request.deployConfig = messages.GoogleCloudAiplatformV1beta1DeployRequestDeployConfig(
dedicatedResources=messages.GoogleCloudAiplatformV1beta1DedicatedResources(
machineSpec=messages.GoogleCloudAiplatformV1beta1MachineSpec(
machineType=machine_type,
acceleratorType=accelerator_type,
acceleratorCount=accelerator_count,
),
minReplicaCount=1,
),
)
request = messages.AiplatformProjectsLocationsDeployRequest(
destination=f'projects/{project}/locations/{location}',
googleCloudAiplatformV1beta1DeployRequest=deploy_request,
)
return projects_locations_service.Deploy(request)
class ModelGardenClient(object):
"""Client used for interacting with Model Garden APIs."""
def __init__(self, version=constants.BETA_VERSION):
client = apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version],
)
self._messages = client.MESSAGES_MODULE
self._publishers_models_service = client.publishers_models
self._projects_locations_service = client.projects_locations
def GetPublisherModel(
self,
model_name,
is_hugging_face_model=False,
include_equivalent_model_garden_model_deployment_configs=True,
hugging_face_token=None,
):
"""Get a publisher model.
Args:
model_name: The name of the model to get. The format should be
publishers/{publisher}/models/{model}
is_hugging_face_model: Whether the model is a hugging face model.
include_equivalent_model_garden_model_deployment_configs: Whether to
include equivalent Model Garden model deployment configs for Hugging
Face models.
hugging_face_token: The Hugging Face access token to access the model
artifacts for gated models unverified by Model Garden.
Returns:
A publisher model.
"""
request = self._messages.AiplatformPublishersModelsGetRequest(
name=model_name,
isHuggingFaceModel=is_hugging_face_model,
includeEquivalentModelGardenModelDeploymentConfigs=include_equivalent_model_garden_model_deployment_configs,
huggingFaceToken=hugging_face_token,
)
return self._publishers_models_service.Get(request)
def Deploy(
self,
project,
location,
model,
accept_eula,
accelerator_type,
accelerator_count,
machine_type,
endpoint_display_name,
hugging_face_access_token,
spot,
reservation_affinity,
use_dedicated_endpoint,
enable_fast_tryout,
container_image_uri=None,
container_command=None,
container_args=None,
container_env_vars=None,
container_ports=None,
container_grpc_ports=None,
container_predict_route=None,
container_health_route=None,
container_deployment_timeout_seconds=None,
container_shared_memory_size_mb=None,
container_startup_probe_exec=None,
container_startup_probe_period_seconds=None,
container_startup_probe_timeout_seconds=None,
container_health_probe_exec=None,
container_health_probe_period_seconds=None,
container_health_probe_timeout_seconds=None,
):
"""Deploy an open weight model.
Args:
project: The project to deploy the model to.
location: The location to deploy the model to.
model: The name of the model to deploy or its gcs uri for custom weights.
accept_eula: Whether to accept the end-user license agreement.
accelerator_type: The type of accelerator to use.
accelerator_count: The number of accelerators to use.
machine_type: The type of machine to use.
endpoint_display_name: The display name of the endpoint.
hugging_face_access_token: The Hugging Face access token.
spot: Whether to deploy the model on Spot VMs.
reservation_affinity: The reservation affinity to use.
use_dedicated_endpoint: Whether to use a dedicated endpoint.
enable_fast_tryout: Whether to enable fast tryout.
container_image_uri: Immutable. URI of the Docker image to be used as the
custom container for serving predictions. This URI must identify an
image in Artifact Registry or Container Registry. Learn more about the
[container publishing requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#publishing), including
permissions requirements for the Vertex AI Service Agent. The container
image is ingested upon ModelService.UploadModel, stored internally, and
this original path is afterwards not used. To learn about the
requirements for the Docker image itself, see [Custom container
requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#). You can use the URI
to one of Vertex AI's [pre-built container images for
prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-
built-containers) in this field.
container_command: Specifies the command that runs when the container
starts. This overrides the container's [ENTRYPOINT](https://docs.docker.
com/engine/reference/builder/#entrypoint). Specify this field as an
array of executable and arguments, similar to a Docker `ENTRYPOINT`'s
"exec" form, not its "shell" form. If you do not specify this field,
then the container's `ENTRYPOINT` runs, in conjunction with the args
field or the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if
either exists. If this field is not specified and the container does not
have an `ENTRYPOINT`, then refer to the Docker documentation about [how
`CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). If you specify this field, then you
can also specify the `args` field to provide additional arguments for
this command. However, if you specify this field, then the container's
`CMD` is ignored. See the [Kubernetes documentation about how the
`command` and `args` fields interact with a container's `ENTRYPOINT` and
`CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-
command-argument-container/#notes). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `command` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_args: Specifies arguments for the command that runs when the
container starts. This overrides the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
this field as an array of executable and arguments, similar to a Docker
`CMD`'s "default parameters" form. If you don't specify this field but
do specify the command field, then the command from the `command` field
runs without any additional arguments. See the [Kubernetes documentation
about how the `command` and `args` fields interact with a container's
`ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-
application/define-command-argument-container/#notes). If you don't
specify this field and don't specify the `command` field, then the
container's
[`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd)
and `CMD` determine what runs based on their default behavior. See the
Docker documentation about [how `CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `args` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core)..
container_env_vars: List of environment variables to set in the container.
After the container starts running, code running in the container can
read these environment variables. Additionally, the command and args
fields can reference these variables. Later entries in this list can
also reference earlier entries. For example, the following example sets
the variable `VAR_2` to have the value `foo bar`: ```json [ { "name":
"VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" }
] ``` If you switch the order of the variables in the example, then the
expansion does not occur. This field corresponds to the `env` field of
the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_ports: List of ports to expose from the container. Vertex AI
sends any http prediction requests that it receives to the first port on
this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, it defaults to following value: ```json [ { "containerPort":
8080 } ] ``` Vertex AI does not use ports other than the first one
listed. This field corresponds to the `ports` field of the Kubernetes
Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_grpc_ports: List of ports to expose from the container. Vertex
AI sends any grpc prediction requests that it receives to the first port
on this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, gRPC requests to the container will be disabled. Vertex AI
does not use ports other than the first one listed. This field
corresponds to the `ports` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_predict_route: HTTP path on the container to send prediction
requests to. Vertex AI forwards requests sent using
projects.locations.endpoints.predict to this path on the container's IP
address and port. Vertex AI then returns the container's response in the
API response. For example, if you set this field to `/foo`, then when
Vertex AI receives a prediction request, it forwards the request body in
a POST request to the `/foo` path on the port of your container
specified by the first value of this `ModelContainerSpec`'s ports field.
If you don't specify this field, it defaults to the following value when
you deploy this Model to an Endpoint:
/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The
placeholders in this value are replaced as follows: * ENDPOINT: The last
segment (following `endpoints/`)of the Endpoint.name][] field of the
Endpoint where this Model has been deployed. (Vertex AI makes this value
available to your container code as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_health_route: HTTP path on the container to send health checks
to. Vertex AI intermittently sends GET requests to this path on the
container's IP address and port to check that the container is healthy.
Read more about [health checks](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#health). For example,
if you set this field to `/bar`, then Vertex AI intermittently sends a
GET request to the `/bar` path on the port of your container specified
by the first value of this `ModelContainerSpec`'s ports field. If you
don't specify this field, it defaults to the following value when you
deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/
DEPLOYED_MODEL:predict The placeholders in this value are replaced as
follows * ENDPOINT: The last segment (following `endpoints/`)of the
Endpoint.name][] field of the Endpoint where this Model has been
deployed. (Vertex AI makes this value available to your container code
as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_deployment_timeout_seconds (int): Deployment timeout in seconds.
container_shared_memory_size_mb (int): The amount of the VM memory to
reserve as the shared memory for the model in megabytes.
container_startup_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by startup probe. An example of this argument would be
["cat", "/tmp/healthy"]
container_startup_probe_period_seconds (int): How often (in seconds) to
perform the startup probe. Default to 10 seconds. Minimum value is 1.
container_startup_probe_timeout_seconds (int): Number of seconds after
which the startup probe times out. Defaults to 1 second. Minimum value
is 1.
container_health_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by health probe. An example of this argument would be ["cat",
"/tmp/healthy"]
container_health_probe_period_seconds (int): How often (in seconds) to
perform the health probe. Default to 10 seconds. Minimum value is 1.
container_health_probe_timeout_seconds (int): Number of seconds after
which the health probe times out. Defaults to 1 second. Minimum value is
1.
Returns:
The deploy long-running operation.
"""
container_spec = None
if container_image_uri:
container_spec = (
self._messages.GoogleCloudAiplatformV1beta1ModelContainerSpec(
healthRoute=container_health_route,
imageUri=container_image_uri,
predictRoute=container_predict_route,
)
)
if container_command:
container_spec.command = container_command
if container_args:
container_spec.args = container_args
if container_env_vars:
container_spec.env = [
self._messages.GoogleCloudAiplatformV1beta1EnvVar(
name=k, value=container_env_vars[k]
)
for k in container_env_vars
]
if container_ports:
container_spec.ports = [
self._messages.GoogleCloudAiplatformV1beta1Port(containerPort=port)
for port in container_ports
]
if container_grpc_ports:
container_spec.grpcPorts = [
self._messages.GoogleCloudAiplatformV1beta1Port(containerPort=port)
for port in container_grpc_ports
]
if container_deployment_timeout_seconds:
container_spec.deploymentTimeout = (
str(container_deployment_timeout_seconds) + 's'
)
if container_shared_memory_size_mb:
container_spec.sharedMemorySizeMb = container_shared_memory_size_mb
if (
container_startup_probe_exec
or container_startup_probe_period_seconds
or container_startup_probe_timeout_seconds
):
startup_probe_exec = None
if container_startup_probe_exec:
startup_probe_exec = (
self._messages.GoogleCloudAiplatformV1beta1ProbeExecAction(
command=container_startup_probe_exec
)
)
container_spec.startupProbe = (
self._messages.GoogleCloudAiplatformV1beta1Probe(
exec_=startup_probe_exec,
periodSeconds=container_startup_probe_period_seconds,
timeoutSeconds=container_startup_probe_timeout_seconds,
)
)
if (
container_health_probe_exec
or container_health_probe_period_seconds
or container_health_probe_timeout_seconds
):
health_probe_exec = None
if container_health_probe_exec:
health_probe_exec = (
self._messages.GoogleCloudAiplatformV1beta1ProbeExecAction(
command=container_health_probe_exec
)
)
container_spec.healthProbe = (
self._messages.GoogleCloudAiplatformV1beta1Probe(
exec_=health_probe_exec,
periodSeconds=container_health_probe_period_seconds,
timeoutSeconds=container_health_probe_timeout_seconds,
)
)
if IsCustomWeightsModel(model):
return DeployCustomWeightsModel(
self._messages,
self._projects_locations_service,
model,
machine_type,
accelerator_type,
accelerator_count,
project,
location,
)
elif IsHuggingFaceModel(model):
deploy_request = self._messages.GoogleCloudAiplatformV1beta1DeployRequest(
huggingFaceModelId=model
)
else:
deploy_request = self._messages.GoogleCloudAiplatformV1beta1DeployRequest(
publisherModelName=model
)
deploy_request.modelConfig = (
self._messages.GoogleCloudAiplatformV1beta1DeployRequestModelConfig(
huggingFaceAccessToken=hugging_face_access_token,
acceptEula=accept_eula,
containerSpec=container_spec,
)
)
deploy_request.endpointConfig = (
self._messages.GoogleCloudAiplatformV1beta1DeployRequestEndpointConfig(
endpointDisplayName=endpoint_display_name,
dedicatedEndpointEnabled=use_dedicated_endpoint,
)
)
deploy_request.deployConfig = self._messages.GoogleCloudAiplatformV1beta1DeployRequestDeployConfig(
dedicatedResources=self._messages.GoogleCloudAiplatformV1beta1DedicatedResources(
machineSpec=self._messages.GoogleCloudAiplatformV1beta1MachineSpec(
machineType=machine_type,
acceleratorType=accelerator_type,
acceleratorCount=accelerator_count,
reservationAffinity=flags.ParseReservationAffinityFlag(
reservation_affinity, constants.BETA_VERSION
),
),
minReplicaCount=1,
spot=spot,
),
fastTryoutEnabled=enable_fast_tryout,
)
request = self._messages.AiplatformProjectsLocationsDeployRequest(
destination=f'projects/{project}/locations/{location}',
googleCloudAiplatformV1beta1DeployRequest=deploy_request,
)
return self._projects_locations_service.Deploy(request)
def ListPublisherModels(
self,
limit=None,
batch_size=100,
list_hf_models=False,
model_filter=None,
):
"""List publisher models in Model Garden.
Args:
limit: The maximum number of items to list. None if all available records
should be yielded.
batch_size: The number of items to list per page.
list_hf_models: Whether to only list Hugging Face models.
model_filter: The filter on model name to apply on server-side.
Returns:
The list of publisher models in Model Garden..
"""
filter_str = _NATIVE_MODEL_FILTER
if list_hf_models:
filter_str = ' AND '.join(
[_HF_WILDCARD_FILTER, _VERIFIED_DEPLOYMENT_FILTER]
)
if model_filter:
filter_str = (
f'{filter_str} AND (model_user_id=~"(?i).*{model_filter}.*" OR'
f' display_name=~"(?i).*{model_filter}.*")'
)
return list_pager.YieldFromList(
self._publishers_models_service,
self._messages.AiplatformPublishersModelsListRequest(
parent='publishers/*',
listAllVersions=True,
filter=filter_str,
),
field='publisherModels',
batch_size_attribute='pageSize',
batch_size=batch_size,
limit=limit,
)

View File

@@ -0,0 +1,528 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with AI Platform model monitoring jobs API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from apitools.base.py import encoding
from apitools.base.py import extra_types
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.ai import util as api_util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import messages as messages_util
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.ai import model_monitoring_jobs_util
from googlecloudsdk.command_lib.ai import validation as common_validation
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core import yaml
import six
def _ParseEndpoint(endpoint_id, region_ref):
"""Parses a endpoint ID into a endpoint resource object."""
region = region_ref.AsDict()['locationsId']
return resources.REGISTRY.Parse(
endpoint_id,
params={
'locationsId': region,
'projectsId': properties.VALUES.core.project.GetOrFail
},
collection='aiplatform.projects.locations.endpoints')
def _ParseDataset(dataset_id, region_ref):
"""Parses a dataset ID into a dataset resource object."""
region = region_ref.AsDict()['locationsId']
return resources.REGISTRY.Parse(
dataset_id,
params={
'locationsId': region,
'projectsId': properties.VALUES.core.project.GetOrFail
},
collection='aiplatform.projects.locations.datasets')
class ModelMonitoringJobsClient(object):
"""High-level client for the AI Platform model deployment monitoring jobs surface."""
def __init__(self, client=None, messages=None, version=None):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_modelDeploymentMonitoringJobs
self._version = version
def _ConstructDriftThresholds(self, feature_thresholds,
feature_attribution_thresholds):
"""Construct drift thresholds from user input.
Args:
feature_thresholds: Dict or None, key: feature_name, value: thresholds.
feature_attribution_thresholds: Dict or None, key:feature_name, value:
attribution score thresholds.
Returns:
PredictionDriftDetectionConfig
"""
prediction_drift_detection = api_util.GetMessage(
'ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig',
self._version)()
additional_properties = []
attribution_additional_properties = []
if feature_thresholds:
for key, value in feature_thresholds.items():
threshold = 0.3 if not value else float(value)
additional_properties.append(prediction_drift_detection
.DriftThresholdsValue().AdditionalProperty(
key=key,
value=api_util.GetMessage(
'ThresholdConfig',
self._version)(value=threshold)))
prediction_drift_detection.driftThresholds = prediction_drift_detection.DriftThresholdsValue(
additionalProperties=additional_properties)
if feature_attribution_thresholds:
for key, value in feature_attribution_thresholds.items():
threshold = 0.3 if not value else float(value)
attribution_additional_properties.append(
prediction_drift_detection.AttributionScoreDriftThresholdsValue(
).AdditionalProperty(
key=key,
value=api_util.GetMessage('ThresholdConfig',
self._version)(value=threshold)))
prediction_drift_detection.attributionScoreDriftThresholds = prediction_drift_detection.AttributionScoreDriftThresholdsValue(
additionalProperties=attribution_additional_properties)
return prediction_drift_detection
def _ConstructSkewThresholds(self, feature_thresholds,
feature_attribution_thresholds):
"""Construct skew thresholds from user input.
Args:
feature_thresholds: Dict or None, key: feature_name, value: thresholds.
feature_attribution_thresholds: Dict or None, key:feature_name, value:
attribution score thresholds.
Returns:
TrainingPredictionSkewDetectionConfig
"""
training_prediction_skew_detection = api_util.GetMessage(
'ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig',
self._version)()
additional_properties = []
attribution_additional_properties = []
if feature_thresholds:
for key, value in feature_thresholds.items():
threshold = 0.3 if not value else float(value)
additional_properties.append(training_prediction_skew_detection
.SkewThresholdsValue().AdditionalProperty(
key=key,
value=api_util.GetMessage(
'ThresholdConfig',
self._version)(value=threshold)))
training_prediction_skew_detection.skewThresholds = training_prediction_skew_detection.SkewThresholdsValue(
additionalProperties=additional_properties)
if feature_attribution_thresholds:
for key, value in feature_attribution_thresholds.items():
threshold = 0.3 if not value else float(value)
attribution_additional_properties.append(
training_prediction_skew_detection
.AttributionScoreSkewThresholdsValue().AdditionalProperty(
key=key,
value=api_util.GetMessage('ThresholdConfig',
self._version)(value=threshold)))
training_prediction_skew_detection.attributionScoreSkewThresholds = training_prediction_skew_detection.AttributionScoreSkewThresholdsValue(
additionalProperties=attribution_additional_properties)
return training_prediction_skew_detection
def _ConstructObjectiveConfigForUpdate(self, existing_monitoring_job,
feature_thresholds,
feature_attribution_thresholds):
"""Construct monitoring objective config.
Update the feature thresholds for skew/drift detection to all the existing
deployed models under the job.
Args:
existing_monitoring_job: Existing monitoring job.
feature_thresholds: Dict or None, key: feature_name, value: thresholds.
feature_attribution_thresholds: Dict or None, key: feature_name, value:
attribution score thresholds.
Returns:
A list of model monitoring objective config.
"""
prediction_drift_detection = self._ConstructDriftThresholds(
feature_thresholds, feature_attribution_thresholds)
training_prediction_skew_detection = self._ConstructSkewThresholds(
feature_thresholds, feature_attribution_thresholds)
objective_configs = []
for objective_config in existing_monitoring_job.modelDeploymentMonitoringObjectiveConfigs:
if objective_config.objectiveConfig.trainingPredictionSkewDetectionConfig:
if training_prediction_skew_detection.skewThresholds:
objective_config.objectiveConfig.trainingPredictionSkewDetectionConfig.skewThresholds = training_prediction_skew_detection.skewThresholds
if training_prediction_skew_detection.attributionScoreSkewThresholds:
objective_config.objectiveConfig.trainingPredictionSkewDetectionConfig.attributionScoreSkewThresholds = training_prediction_skew_detection.attributionScoreSkewThresholds
if objective_config.objectiveConfig.predictionDriftDetectionConfig:
if prediction_drift_detection.driftThresholds:
objective_config.objectiveConfig.predictionDriftDetectionConfig.driftThresholds = prediction_drift_detection.driftThresholds
if prediction_drift_detection.attributionScoreDriftThresholds:
objective_config.objectiveConfig.predictionDriftDetectionConfig.attributionScoreDriftThresholds = prediction_drift_detection.attributionScoreDriftThresholds
if training_prediction_skew_detection.attributionScoreSkewThresholds or prediction_drift_detection.attributionScoreDriftThresholds:
objective_config.objectiveConfig.explanationConfig = api_util.GetMessage(
'ModelMonitoringObjectiveConfigExplanationConfig', self._version)(
enableFeatureAttributes=True)
objective_configs.append(objective_config)
return objective_configs
def _ConstructObjectiveConfigForCreate(self, location_ref, endpoint_name,
feature_thresholds,
feature_attribution_thresholds,
dataset, bigquery_uri, data_format,
gcs_uris, target_field,
training_sampling_rate):
"""Construct monitoring objective config.
Apply the feature thresholds for skew or drift detection to all the deployed
models under the endpoint.
Args:
location_ref: Location reference.
endpoint_name: Endpoint resource name.
feature_thresholds: Dict or None, key: feature_name, value: thresholds.
feature_attribution_thresholds: Dict or None, key: feature_name, value:
attribution score thresholds.
dataset: Vertex AI Dataset Id.
bigquery_uri: The BigQuery table of the unmanaged Dataset used to train
this Model.
data_format: Google Cloud Storage format, supported format: csv,
tf-record.
gcs_uris: The Google Cloud Storage uri of the unmanaged Dataset used to
train this Model.
target_field: The target field name the model is to predict.
training_sampling_rate: Training Dataset sampling rate.
Returns:
A list of model monitoring objective config.
"""
objective_config_template = api_util.GetMessage(
'ModelDeploymentMonitoringObjectiveConfig', self._version)()
if feature_thresholds or feature_attribution_thresholds:
if dataset or bigquery_uri or gcs_uris or data_format:
training_dataset = api_util.GetMessage(
'ModelMonitoringObjectiveConfigTrainingDataset', self._version)()
if target_field is None:
raise errors.ArgumentError(
"Target field must be provided if you'd like to do training-prediction skew detection."
)
training_dataset.targetField = target_field
training_dataset.loggingSamplingStrategy = api_util.GetMessage(
'SamplingStrategy', self._version)(
randomSampleConfig=api_util.GetMessage(
'SamplingStrategyRandomSampleConfig', self._version)(
sampleRate=training_sampling_rate))
if dataset:
training_dataset.dataset = _ParseDataset(dataset,
location_ref).RelativeName()
elif bigquery_uri:
training_dataset.bigquerySource = api_util.GetMessage(
'BigQuerySource', self._version)(
inputUri=bigquery_uri)
elif gcs_uris or data_format:
if gcs_uris is None:
raise errors.ArgumentError(
'Data format is defined but no Google Cloud Storage uris are provided. Please use --gcs-uris to provide training datasets.'
)
if data_format is None:
raise errors.ArgumentError(
'No Data format is defined for Google Cloud Storage training dataset. Please use --data-format to define the Data format.'
)
training_dataset.dataFormat = data_format
training_dataset.gcsSource = api_util.GetMessage(
'GcsSource', self._version)(
uris=gcs_uris)
training_prediction_skew_detection = self._ConstructSkewThresholds(
feature_thresholds, feature_attribution_thresholds)
objective_config_template.objectiveConfig = api_util.GetMessage(
'ModelMonitoringObjectiveConfig', self._version
)(trainingDataset=training_dataset,
trainingPredictionSkewDetectionConfig=training_prediction_skew_detection
)
else:
prediction_drift_detection = self._ConstructDriftThresholds(
feature_thresholds, feature_attribution_thresholds)
objective_config_template.objectiveConfig = api_util.GetMessage(
'ModelMonitoringObjectiveConfig', self._version)(
predictionDriftDetectionConfig=prediction_drift_detection)
if feature_attribution_thresholds:
objective_config_template.objectiveConfig.explanationConfig = api_util.GetMessage(
'ModelMonitoringObjectiveConfigExplanationConfig', self._version)(
enableFeatureAttributes=True)
get_endpoint_req = self.messages.AiplatformProjectsLocationsEndpointsGetRequest(
name=endpoint_name)
endpoint = self.client.projects_locations_endpoints.Get(get_endpoint_req)
objective_configs = []
for deployed_model in endpoint.deployedModels:
objective_config = copy.deepcopy(objective_config_template)
objective_config.deployedModelId = deployed_model.id
objective_configs.append(objective_config)
return objective_configs
def _ParseCreateLabels(self, args):
"""Parses create labels."""
return labels_util.ParseCreateArgs(
args,
api_util.GetMessage('ModelDeploymentMonitoringJob',
self._version)().LabelsValue)
def _ParseUpdateLabels(self, model_monitoring_job_ref, args):
"""Parses update labels."""
def GetLabels():
return self.Get(model_monitoring_job_ref).labels
return labels_util.ProcessUpdateArgsLazy(
args,
api_util.GetMessage('ModelDeploymentMonitoringJob',
self._version)().LabelsValue, GetLabels)
def Create(self, location_ref, args):
"""Creates a model deployment monitoring job."""
endpoint_ref = _ParseEndpoint(args.endpoint, location_ref)
job_spec = api_util.GetMessage('ModelDeploymentMonitoringJob',
self._version)()
kms_key_name = common_validation.GetAndValidateKmsKey(args)
if kms_key_name is not None:
job_spec.encryptionSpec = api_util.GetMessage('EncryptionSpec',
self._version)(
kmsKeyName=kms_key_name)
if args.monitoring_config_from_file:
data = yaml.load_path(args.monitoring_config_from_file)
if data:
job_spec = messages_util.DictToMessageWithErrorCheck(
data,
api_util.GetMessage('ModelDeploymentMonitoringJob', self._version))
else:
job_spec.modelDeploymentMonitoringObjectiveConfigs = self._ConstructObjectiveConfigForCreate(
location_ref, endpoint_ref.RelativeName(), args.feature_thresholds,
args.feature_attribution_thresholds, args.dataset, args.bigquery_uri,
args.data_format, args.gcs_uris, args.target_field,
args.training_sampling_rate)
job_spec.endpoint = endpoint_ref.RelativeName()
job_spec.displayName = args.display_name
job_spec.labels = self._ParseCreateLabels(args)
enable_anomaly_cloud_logging = False if args.anomaly_cloud_logging is None else args.anomaly_cloud_logging
job_spec.modelMonitoringAlertConfig = api_util.GetMessage(
'ModelMonitoringAlertConfig', self._version)(
enableLogging=enable_anomaly_cloud_logging,
emailAlertConfig=api_util.GetMessage(
'ModelMonitoringAlertConfigEmailAlertConfig',
self._version)(userEmails=args.emails),
notificationChannels=args.notification_channels)
job_spec.loggingSamplingStrategy = api_util.GetMessage(
'SamplingStrategy', self._version)(
randomSampleConfig=api_util.GetMessage(
'SamplingStrategyRandomSampleConfig', self._version)(
sampleRate=args.prediction_sampling_rate))
job_spec.modelDeploymentMonitoringScheduleConfig = api_util.GetMessage(
'ModelDeploymentMonitoringScheduleConfig', self._version)(
monitorInterval='{}s'.format(
six.text_type(3600 * int(args.monitoring_frequency))))
if args.predict_instance_schema:
job_spec.predictInstanceSchemaUri = args.predict_instance_schema
if args.analysis_instance_schema:
job_spec.analysisInstanceSchemaUri = args.analysis_instance_schema
if args.log_ttl:
job_spec.logTtl = '{}s'.format(six.text_type(86400 * int(args.log_ttl)))
if args.sample_predict_request:
instance_json = model_monitoring_jobs_util.ReadInstanceFromArgs(
args.sample_predict_request)
job_spec.samplePredictInstance = encoding.PyValueToMessage(
extra_types.JsonValue, instance_json)
if self._version == constants.BETA_VERSION:
return self._service.Create(
self.messages.
AiplatformProjectsLocationsModelDeploymentMonitoringJobsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1beta1ModelDeploymentMonitoringJob=job_spec
))
else:
return self._service.Create(
self.messages.
AiplatformProjectsLocationsModelDeploymentMonitoringJobsCreateRequest(
parent=location_ref.RelativeName(),
googleCloudAiplatformV1ModelDeploymentMonitoringJob=job_spec))
def Patch(self, model_monitoring_job_ref, args):
"""Update a model deployment monitoring job."""
model_monitoring_job_to_update = api_util.GetMessage(
'ModelDeploymentMonitoringJob', self._version)()
update_mask = []
job_spec = api_util.GetMessage('ModelDeploymentMonitoringJob',
self._version)()
if args.monitoring_config_from_file:
data = yaml.load_path(args.monitoring_config_from_file)
if data:
job_spec = messages_util.DictToMessageWithErrorCheck(
data,
api_util.GetMessage('ModelDeploymentMonitoringJob', self._version))
model_monitoring_job_to_update.modelDeploymentMonitoringObjectiveConfigs = job_spec.modelDeploymentMonitoringObjectiveConfigs
update_mask.append('model_deployment_monitoring_objective_configs')
if args.feature_thresholds or args.feature_attribution_thresholds:
get_monitoring_job_req = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsGetRequest(
name=model_monitoring_job_ref.RelativeName())
model_monitoring_job = self._service.Get(get_monitoring_job_req)
model_monitoring_job_to_update.modelDeploymentMonitoringObjectiveConfigs = self._ConstructObjectiveConfigForUpdate(
model_monitoring_job, args.feature_thresholds,
args.feature_attribution_thresholds)
update_mask.append('model_deployment_monitoring_objective_configs')
if args.display_name:
model_monitoring_job_to_update.displayName = args.display_name
update_mask.append('display_name')
if args.emails:
model_monitoring_job_to_update.modelMonitoringAlertConfig = (
api_util.GetMessage('ModelMonitoringAlertConfig', self._version)(
emailAlertConfig=api_util.GetMessage(
'ModelMonitoringAlertConfigEmailAlertConfig', self._version
)(userEmails=args.emails)
)
)
update_mask.append('model_monitoring_alert_config.email_alert_config')
if args.anomaly_cloud_logging is not None:
if args.emails:
model_monitoring_job_to_update.modelMonitoringAlertConfig.enableLogging = (
args.anomaly_cloud_logging
)
else:
model_monitoring_job_to_update.modelMonitoringAlertConfig = (
api_util.GetMessage('ModelMonitoringAlertConfig', self._version)(
enableLogging=args.anomaly_cloud_logging
)
)
update_mask.append('model_monitoring_alert_config.enable_logging')
if args.notification_channels:
if args.emails or args.anomaly_cloud_logging is not None:
model_monitoring_job_to_update.modelMonitoringAlertConfig.notificationChannels = (
args.notification_channels
)
else:
model_monitoring_job_to_update.modelMonitoringAlertConfig = (
api_util.GetMessage('ModelMonitoringAlertConfig', self._version)(
notificationChannels=args.notification_channels
)
)
update_mask.append('model_monitoring_alert_config.notification_channels')
# sampling rate
if args.prediction_sampling_rate:
model_monitoring_job_to_update.loggingSamplingStrategy = api_util.GetMessage(
'SamplingStrategy', self._version)(
randomSampleConfig=api_util.GetMessage(
'SamplingStrategyRandomSampleConfig', self._version)(
sampleRate=args.prediction_sampling_rate))
update_mask.append('logging_sampling_strategy')
# schedule
if args.monitoring_frequency:
model_monitoring_job_to_update.modelDeploymentMonitoringScheduleConfig = api_util.GetMessage(
'ModelDeploymentMonitoringScheduleConfig', self._version)(
monitorInterval='{}s'.format(
six.text_type(3600 * int(args.monitoring_frequency))))
update_mask.append('model_deployment_monitoring_schedule_config')
if args.analysis_instance_schema:
model_monitoring_job_to_update.analysisInstanceSchemaUri = args.analysis_instance_schema
update_mask.append('analysis_instance_schema_uri')
if args.log_ttl:
model_monitoring_job_to_update.logTtl = '{}s'.format(
six.text_type(86400 * int(args.log_ttl)))
update_mask.append('log_ttl')
labels_update = self._ParseUpdateLabels(model_monitoring_job_ref, args)
if labels_update.needs_update:
model_monitoring_job_to_update.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
if self._version == constants.BETA_VERSION:
req = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsPatchRequest(
name=model_monitoring_job_ref.RelativeName(),
googleCloudAiplatformV1beta1ModelDeploymentMonitoringJob=model_monitoring_job_to_update,
updateMask=','.join(update_mask))
else:
req = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsPatchRequest(
name=model_monitoring_job_ref.RelativeName(),
googleCloudAiplatformV1ModelDeploymentMonitoringJob=model_monitoring_job_to_update,
updateMask=','.join(update_mask))
return self._service.Patch(req)
def Get(self, model_monitoring_job_ref):
request = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsGetRequest(
name=model_monitoring_job_ref.RelativeName())
return self._service.Get(request)
def List(self, limit=None, region_ref=None):
return list_pager.YieldFromList(
self._service,
self.messages
.AiplatformProjectsLocationsModelDeploymentMonitoringJobsListRequest(
parent=region_ref.RelativeName()),
field='modelDeploymentMonitoringJobs',
batch_size_attribute='pageSize',
limit=limit)
def Delete(self, model_monitoring_job_ref):
request = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsDeleteRequest(
name=model_monitoring_job_ref.RelativeName())
return self._service.Delete(request)
def Pause(self, model_monitoring_job_ref):
request = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsPauseRequest(
name=model_monitoring_job_ref.RelativeName())
return self._service.Pause(request)
def Resume(self, model_monitoring_job_ref):
request = self.messages.AiplatformProjectsLocationsModelDeploymentMonitoringJobsResumeRequest(
name=model_monitoring_job_ref.RelativeName())
return self._service.Resume(request)

View File

@@ -0,0 +1,895 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for AI Platform models API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
class ModelsClient(object):
"""High-level client for the AI Platform models surface.
Attributes:
client: An instance of the given client, or the API client aiplatform of
Beta version.
messages: The messages module for the given client, or the API client
aiplatform of Beta version.
"""
def __init__(self, client=None, messages=None):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[constants.BETA_VERSION])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_models
def UploadV1Beta1(
self,
region_ref=None,
display_name=None,
description=None,
version_description=None,
artifact_uri=None,
container_image_uri=None,
container_command=None,
container_args=None,
container_env_vars=None,
container_ports=None,
container_grpc_ports=None,
container_predict_route=None,
container_health_route=None,
container_deployment_timeout_seconds=None,
container_shared_memory_size_mb=None,
container_startup_probe_exec=None,
container_startup_probe_period_seconds=None,
container_startup_probe_timeout_seconds=None,
container_health_probe_exec=None,
container_health_probe_period_seconds=None,
container_health_probe_timeout_seconds=None,
explanation_spec=None,
parent_model=None,
model_id=None,
version_aliases=None,
labels=None,
base_model_source=None,
):
"""Constructs, sends an UploadModel request and returns the LRO to be done.
Args:
region_ref: The resource reference for a given region. None if the region
reference is not provided.
display_name: The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description: The description of the Model.
version_description: The description of the Model version.
artifact_uri: The path to the directory containing the Model artifact and
any of its supporting files. Not present for AutoML Models.
container_image_uri: Immutable. URI of the Docker image to be used as the
custom container for serving predictions. This URI must identify an
image in Artifact Registry or Container Registry. Learn more about the
[container publishing requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#publishing), including
permissions requirements for the Vertex AI Service Agent. The container
image is ingested upon ModelService.UploadModel, stored internally, and
this original path is afterwards not used. To learn about the
requirements for the Docker image itself, see [Custom container
requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#). You can use the URI
to one of Vertex AI's [pre-built container images for
prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-
built-containers) in this field.
container_command: Specifies the command that runs when the container
starts. This overrides the container's [ENTRYPOINT](https://docs.docker.
com/engine/reference/builder/#entrypoint). Specify this field as an
array of executable and arguments, similar to a Docker `ENTRYPOINT`'s
"exec" form, not its "shell" form. If you do not specify this field,
then the container's `ENTRYPOINT` runs, in conjunction with the args
field or the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if
either exists. If this field is not specified and the container does not
have an `ENTRYPOINT`, then refer to the Docker documentation about [how
`CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). If you specify this field, then you
can also specify the `args` field to provide additional arguments for
this command. However, if you specify this field, then the container's
`CMD` is ignored. See the [Kubernetes documentation about how the
`command` and `args` fields interact with a container's `ENTRYPOINT` and
`CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-
command-argument-container/#notes). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `command` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_args: Specifies arguments for the command that runs when the
container starts. This overrides the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
this field as an array of executable and arguments, similar to a Docker
`CMD`'s "default parameters" form. If you don't specify this field but
do specify the command field, then the command from the `command` field
runs without any additional arguments. See the [Kubernetes documentation
about how the `command` and `args` fields interact with a container's
`ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-
application/define-command-argument-container/#notes). If you don't
specify this field and don't specify the `command` field, then the
container's
[`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd)
and `CMD` determine what runs based on their default behavior. See the
Docker documentation about [how `CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `args` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core)..
container_env_vars: List of environment variables to set in the container.
After the container starts running, code running in the container can
read these environment variables. Additionally, the command and args
fields can reference these variables. Later entries in this list can
also reference earlier entries. For example, the following example sets
the variable `VAR_2` to have the value `foo bar`: ```json [ { "name":
"VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" }
] ``` If you switch the order of the variables in the example, then the
expansion does not occur. This field corresponds to the `env` field of
the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_ports: List of ports to expose from the container. Vertex AI
sends any http prediction requests that it receives to the first port on
this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, it defaults to following value: ```json [ { "containerPort":
8080 } ] ``` Vertex AI does not use ports other than the first one
listed. This field corresponds to the `ports` field of the Kubernetes
Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_grpc_ports: List of ports to expose from the container. Vertex
AI sends any grpc prediction requests that it receives to the first port
on this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, gRPC requests to the container will be disabled. Vertex AI
does not use ports other than the first one listed. This field
corresponds to the `ports` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_predict_route: HTTP path on the container to send prediction
requests to. Vertex AI forwards requests sent using
projects.locations.endpoints.predict to this path on the container's IP
address and port. Vertex AI then returns the container's response in the
API response. For example, if you set this field to `/foo`, then when
Vertex AI receives a prediction request, it forwards the request body in
a POST request to the `/foo` path on the port of your container
specified by the first value of this `ModelContainerSpec`'s ports field.
If you don't specify this field, it defaults to the following value when
you deploy this Model to an Endpoint:
/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The
placeholders in this value are replaced as follows: * ENDPOINT: The last
segment (following `endpoints/`)of the Endpoint.name][] field of the
Endpoint where this Model has been deployed. (Vertex AI makes this value
available to your container code as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_health_route: HTTP path on the container to send health checks
to. Vertex AI intermittently sends GET requests to this path on the
container's IP address and port to check that the container is healthy.
Read more about [health checks](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#health). For example,
if you set this field to `/bar`, then Vertex AI intermittently sends a
GET request to the `/bar` path on the port of your container specified
by the first value of this `ModelContainerSpec`'s ports field. If you
don't specify this field, it defaults to the following value when you
deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/
DEPLOYED_MODEL:predict The placeholders in this value are replaced as
follows * ENDPOINT: The last segment (following `endpoints/`)of the
Endpoint.name][] field of the Endpoint where this Model has been
deployed. (Vertex AI makes this value available to your container code
as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_deployment_timeout_seconds (int): Deployment timeout in seconds.
container_shared_memory_size_mb (int): The amount of the VM memory to
reserve as the shared memory for the model in megabytes.
container_startup_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by startup probe. An example of this argument would be
["cat", "/tmp/healthy"]
container_startup_probe_period_seconds (int): How often (in seconds) to
perform the startup probe. Default to 10 seconds. Minimum value is 1.
container_startup_probe_timeout_seconds (int): Number of seconds after
which the startup probe times out. Defaults to 1 second. Minimum value
is 1.
container_health_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by health probe. An example of this argument would be ["cat",
"/tmp/healthy"]
container_health_probe_period_seconds (int): How often (in seconds) to
perform the health probe. Default to 10 seconds. Minimum value is 1.
container_health_probe_timeout_seconds (int): Number of seconds after
which the health probe times out. Defaults to 1 second. Minimum value is
1.
explanation_spec: The default explanation specification for this Model.
The Model can be used for requesting explanation after being deployed if
it is populated. The Model can be used for batch explanation if it is
populated. All fields of the explanation_spec can be overridden by
explanation_spec of DeployModelRequest.deployed_model, or
explanation_spec of BatchPredictionJob. If the default explanation
specification is not set for this Model, this Model can still be used
for requesting explanation by setting explanation_spec of
DeployModelRequest.deployed_model and for batch explanation by setting
explanation_spec of BatchPredictionJob.
parent_model: The resource name of the model into which to upload the
version. Only specify this field when uploading a new version.
model_id: The ID to use for the uploaded Model, which will become the
final component of the model resource name. This value may be up to 63
characters, and valid characters are `[a-z0-9_-]`. The first character
cannot be a number or hyphen..
version_aliases: User provided version aliases so that a model version can
be referenced via alias (i.e. projects/{project}/locations/{location}/mo
dels/{model_id}@{version_alias} instead of auto-generated version id
(i.e.
projects/{project}/locations/{location}/models/{model_id}@{version_id}).
The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A
default version alias will be created for the first version of the
model, and there must be exactly one default version alias for a model.
labels: The labels with user-defined metadata to organize your Models.
Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters,
underscores and dashes. International characters are allowed. See
https://goo.gl/xmQnxf for more information and examples of labels.
base_model_source: A GoogleCloudAiplatformV1beta1ModelBaseModelSource
object that indicates the source of the model. Currently it only
supports specifying the Model Garden models and Generative AI Studio
models.
Returns:
Response from calling upload model with given request arguments.
"""
container_spec = (
self.messages.GoogleCloudAiplatformV1beta1ModelContainerSpec(
healthRoute=container_health_route,
imageUri=container_image_uri,
predictRoute=container_predict_route,
)
)
if container_command:
container_spec.command = container_command
if container_args:
container_spec.args = container_args
if container_env_vars:
container_spec.env = [
self.messages.GoogleCloudAiplatformV1beta1EnvVar(
name=k, value=container_env_vars[k]) for k in container_env_vars
]
if container_ports:
container_spec.ports = [
self.messages.GoogleCloudAiplatformV1beta1Port(containerPort=port)
for port in container_ports
]
if container_grpc_ports:
container_spec.grpcPorts = [
self.messages.GoogleCloudAiplatformV1beta1Port(containerPort=port)
for port in container_grpc_ports
]
if container_deployment_timeout_seconds:
container_spec.deploymentTimeout = (
str(container_deployment_timeout_seconds) + 's'
)
if container_shared_memory_size_mb:
container_spec.sharedMemorySizeMb = container_shared_memory_size_mb
if (
container_startup_probe_exec
or container_startup_probe_period_seconds
or container_startup_probe_timeout_seconds
):
startup_probe_exec = None
if container_startup_probe_exec:
startup_probe_exec = (
self.messages.GoogleCloudAiplatformV1beta1ProbeExecAction(
command=container_startup_probe_exec
)
)
container_spec.startupProbe = (
self.messages.GoogleCloudAiplatformV1beta1Probe(
exec_=startup_probe_exec,
periodSeconds=container_startup_probe_period_seconds,
timeoutSeconds=container_startup_probe_timeout_seconds,
)
)
if (
container_health_probe_exec
or container_health_probe_period_seconds
or container_health_probe_timeout_seconds
):
health_probe_exec = None
if container_health_probe_exec:
health_probe_exec = (
self.messages.GoogleCloudAiplatformV1beta1ProbeExecAction(
command=container_health_probe_exec
)
)
container_spec.healthProbe = (
self.messages.GoogleCloudAiplatformV1beta1Probe(
exec_=health_probe_exec,
periodSeconds=container_health_probe_period_seconds,
timeoutSeconds=container_health_probe_timeout_seconds,
)
)
model = self.messages.GoogleCloudAiplatformV1beta1Model(
artifactUri=artifact_uri,
containerSpec=container_spec,
description=description,
versionDescription=version_description,
displayName=display_name,
explanationSpec=explanation_spec,
baseModelSource=base_model_source,
)
if version_aliases:
model.versionAliases = version_aliases
if labels:
additional_properties = []
for key, value in sorted(labels.items()):
additional_properties.append(model.LabelsValue().AdditionalProperty(
key=key, value=value))
model.labels = model.LabelsValue(
additionalProperties=additional_properties)
return self._service.Upload(
self.messages.AiplatformProjectsLocationsModelsUploadRequest(
parent=region_ref.RelativeName(),
googleCloudAiplatformV1beta1UploadModelRequest=self.messages
.GoogleCloudAiplatformV1beta1UploadModelRequest(
model=model,
parentModel=parent_model,
modelId=model_id)))
def UploadV1(self,
region_ref=None,
display_name=None,
description=None,
version_description=None,
artifact_uri=None,
container_image_uri=None,
container_command=None,
container_args=None,
container_env_vars=None,
container_ports=None,
container_grpc_ports=None,
container_predict_route=None,
container_health_route=None,
container_deployment_timeout_seconds=None,
container_shared_memory_size_mb=None,
container_startup_probe_exec=None,
container_startup_probe_period_seconds=None,
container_startup_probe_timeout_seconds=None,
container_health_probe_exec=None,
container_health_probe_period_seconds=None,
container_health_probe_timeout_seconds=None,
explanation_spec=None,
parent_model=None,
model_id=None,
version_aliases=None,
labels=None):
"""Constructs, sends an UploadModel request and returns the LRO to be done.
Args:
region_ref: The resource reference for a given region. None if the region
reference is not provided.
display_name: The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description: The description of the Model.
version_description: The description of the Model version.
artifact_uri: The path to the directory containing the Model artifact and
any of its supporting files. Not present for AutoML Models.
container_image_uri: Immutable. URI of the Docker image to be used as the
custom container for serving predictions. This URI must identify an
image in Artifact Registry or Container Registry. Learn more about the
[container publishing requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#publishing), including
permissions requirements for the Vertex AI Service Agent. The container
image is ingested upon ModelService.UploadModel, stored internally, and
this original path is afterwards not used. To learn about the
requirements for the Docker image itself, see [Custom container
requirements](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#). You can use the URI
to one of Vertex AI's [pre-built container images for
prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-
built-containers) in this field.
container_command: Specifies the command that runs when the container
starts. This overrides the container's [ENTRYPOINT](https://docs.docker.
com/engine/reference/builder/#entrypoint). Specify this field as an
array of executable and arguments, similar to a Docker `ENTRYPOINT`'s
"exec" form, not its "shell" form. If you do not specify this field,
then the container's `ENTRYPOINT` runs, in conjunction with the args
field or the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if
either exists. If this field is not specified and the container does not
have an `ENTRYPOINT`, then refer to the Docker documentation about [how
`CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). If you specify this field, then you
can also specify the `args` field to provide additional arguments for
this command. However, if you specify this field, then the container's
`CMD` is ignored. See the [Kubernetes documentation about how the
`command` and `args` fields interact with a container's `ENTRYPOINT` and
`CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-
command-argument-container/#notes). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `command` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_args: Specifies arguments for the command that runs when the
container starts. This overrides the container's
[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
this field as an array of executable and arguments, similar to a Docker
`CMD`'s "default parameters" form. If you don't specify this field but
do specify the command field, then the command from the `command` field
runs without any additional arguments. See the [Kubernetes documentation
about how the `command` and `args` fields interact with a container's
`ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-
application/define-command-argument-container/#notes). If you don't
specify this field and don't specify the `command` field, then the
container's
[`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd)
and `CMD` determine what runs based on their default behavior. See the
Docker documentation about [how `CMD` and `ENTRYPOINT`
interact](https://docs.docker.com/engine/reference/builder/#understand-
how-cmd-and-entrypoint-interact). In this field, you can reference
[environment variables set by Vertex
AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables) and environment variables set in
the env field. You cannot reference environment variables set in the
Docker image. In order for environment variables to be expanded,
reference them by using the following syntax: $( VARIABLE_NAME) Note
that this differs from Bash variable expansion, which does not use
parentheses. If a variable cannot be resolved, the reference in the
input string is used unchanged. To avoid variable expansion, you can
escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field
corresponds to the `args` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core)..
container_env_vars: List of environment variables to set in the container.
After the container starts running, code running in the container can
read these environment variables. Additionally, the command and args
fields can reference these variables. Later entries in this list can
also reference earlier entries. For example, the following example sets
the variable `VAR_2` to have the value `foo bar`: ```json [ { "name":
"VAR_1", "value": "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" }
] ``` If you switch the order of the variables in the example, then the
expansion does not occur. This field corresponds to the `env` field of
the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_ports: List of ports to expose from the container. Vertex AI
sends any http prediction requests that it receives to the first port on
this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, it defaults to following value: ```json [ { "containerPort":
8080 } ] ``` Vertex AI does not use ports other than the first one
listed. This field corresponds to the `ports` field of the Kubernetes
Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_grpc_ports: List of ports to expose from the container. Vertex
AI sends any grpc prediction requests that it receives to the first port
on this list. Vertex AI also sends [liveness and health
checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#liveness) to this port. If you do not specify
this field, gRPC requests to the container will be disabled. Vertex AI
does not use ports other than the first one listed. This field
corresponds to the `ports` field of the Kubernetes Containers [v1 core
API](https://kubernetes.io/docs/reference/generated/kubernetes-
api/v1.23/#container-v1-core).
container_predict_route: HTTP path on the container to send prediction
requests to. Vertex AI forwards requests sent using
projects.locations.endpoints.predict to this path on the container's IP
address and port. Vertex AI then returns the container's response in the
API response. For example, if you set this field to `/foo`, then when
Vertex AI receives a prediction request, it forwards the request body in
a POST request to the `/foo` path on the port of your container
specified by the first value of this `ModelContainerSpec`'s ports field.
If you don't specify this field, it defaults to the following value when
you deploy this Model to an Endpoint:
/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The
placeholders in this value are replaced as follows: * ENDPOINT: The last
segment (following `endpoints/`)of the Endpoint.name][] field of the
Endpoint where this Model has been deployed. (Vertex AI makes this value
available to your container code as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_health_route: HTTP path on the container to send health checks
to. Vertex AI intermittently sends GET requests to this path on the
container's IP address and port to check that the container is healthy.
Read more about [health checks](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#health). For example,
if you set this field to `/bar`, then Vertex AI intermittently sends a
GET request to the `/bar` path on the port of your container specified
by the first value of this `ModelContainerSpec`'s ports field. If you
don't specify this field, it defaults to the following value when you
deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/
DEPLOYED_MODEL:predict The placeholders in this value are replaced as
follows * ENDPOINT: The last segment (following `endpoints/`)of the
Endpoint.name][] field of the Endpoint where this Model has been
deployed. (Vertex AI makes this value available to your container code
as the [`AIP_ENDPOINT_ID` environment
variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-
container-requirements#aip-variables).) * DEPLOYED_MODEL:
DeployedModel.id of the `DeployedModel`. (Vertex AI makes this value
available to your container code as the [`AIP_DEPLOYED_MODEL_ID`
environment variable](https://cloud.google.com/vertex-
ai/docs/predictions/custom-container-requirements#aip-variables).)
container_deployment_timeout_seconds (int): Deployment timeout in seconds.
container_shared_memory_size_mb (int): The amount of the VM memory to
reserve as the shared memory for the model in megabytes.
container_startup_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by startup probe. An example of this argument would be
["cat", "/tmp/healthy"]
container_startup_probe_period_seconds (int): How often (in seconds) to
perform the startup probe. Default to 10 seconds. Minimum value is 1.
container_startup_probe_timeout_seconds (int): Number of seconds after
which the startup probe times out. Defaults to 1 second. Minimum value
is 1.
container_health_probe_exec (Sequence[str]): Exec specifies the action to
take. Used by health probe. An example of this argument would be ["cat",
"/tmp/healthy"]
container_health_probe_period_seconds (int): How often (in seconds) to
perform the health probe. Default to 10 seconds. Minimum value is 1.
container_health_probe_timeout_seconds (int): Number of seconds after
which the health probe times out. Defaults to 1 second. Minimum value is
1.
explanation_spec: The default explanation specification for this Model.
The Model can be used for requesting explanation after being deployed if
it is populated. The Model can be used for batch explanation if it is
populated. All fields of the explanation_spec can be overridden by
explanation_spec of DeployModelRequest.deployed_model, or
explanation_spec of BatchPredictionJob. If the default explanation
specification is not set for this Model, this Model can still be used
for requesting explanation by setting explanation_spec of
DeployModelRequest.deployed_model and for batch explanation by setting
explanation_spec of BatchPredictionJob.
parent_model: The resource name of the model into which to upload the
version. Only specify this field when uploading a new version.
model_id: The ID to use for the uploaded Model, which will become the
final component of the model resource name. This value may be up to 63
characters, and valid characters are `[a-z0-9_-]`. The first character
cannot be a number or hyphen..
version_aliases: User provided version aliases so that a model version can
be referenced via alias (i.e. projects/{project}/locations/{location}/mo
dels/{model_id}@{version_alias} instead of auto-generated version id
(i.e.
projects/{project}/locations/{location}/models/{model_id}@{version_id}).
The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A
default version alias will be created for the first version of the
model, and there must be exactly one default version alias for a model.
labels: The labels with user-defined metadata to organize your Models.
Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters,
underscores and dashes. International characters are allowed. See
https://goo.gl/xmQnxf for more information and examples of labels.
Returns:
Response from calling upload model with given request arguments.
"""
container_spec = self.messages.GoogleCloudAiplatformV1ModelContainerSpec(
healthRoute=container_health_route,
imageUri=container_image_uri,
predictRoute=container_predict_route)
if container_command:
container_spec.command = container_command
if container_args:
container_spec.args = container_args
if container_env_vars:
container_spec.env = [
self.messages.GoogleCloudAiplatformV1EnvVar(
name=k, value=container_env_vars[k]) for k in container_env_vars
]
if container_ports:
container_spec.ports = [
self.messages.GoogleCloudAiplatformV1Port(containerPort=port)
for port in container_ports
]
if container_grpc_ports:
container_spec.grpcPorts = [
self.messages.GoogleCloudAiplatformV1Port(containerPort=port)
for port in container_grpc_ports
]
if container_deployment_timeout_seconds:
container_spec.deploymentTimeout = (
str(container_deployment_timeout_seconds) + 's'
)
if container_shared_memory_size_mb:
container_spec.sharedMemorySizeMb = container_shared_memory_size_mb
if (
container_startup_probe_exec
or container_startup_probe_period_seconds
or container_startup_probe_timeout_seconds
):
startup_probe_exec = None
if container_startup_probe_exec:
startup_probe_exec = (
self.messages.GoogleCloudAiplatformV1ProbeExecAction(
command=container_startup_probe_exec
)
)
container_spec.startupProbe = (
self.messages.GoogleCloudAiplatformV1Probe(
exec_=startup_probe_exec,
periodSeconds=container_startup_probe_period_seconds,
timeoutSeconds=container_startup_probe_timeout_seconds,
)
)
if (
container_health_probe_exec
or container_health_probe_period_seconds
or container_health_probe_timeout_seconds
):
health_probe_exec = None
if container_health_probe_exec:
health_probe_exec = (
self.messages.GoogleCloudAiplatformV1ProbeExecAction(
command=container_health_probe_exec
)
)
container_spec.healthProbe = (
self.messages.GoogleCloudAiplatformV1Probe(
exec_=health_probe_exec,
periodSeconds=container_health_probe_period_seconds,
timeoutSeconds=container_health_probe_timeout_seconds,
)
)
model = self.messages.GoogleCloudAiplatformV1Model(
artifactUri=artifact_uri,
containerSpec=container_spec,
description=description,
versionDescription=version_description,
displayName=display_name,
explanationSpec=explanation_spec)
if version_aliases:
model.versionAliases = version_aliases
if labels:
additional_properties = []
for key, value in sorted(labels.items()):
additional_properties.append(model.LabelsValue().AdditionalProperty(
key=key, value=value))
model.labels = model.LabelsValue(
additionalProperties=additional_properties)
return self._service.Upload(
self.messages.AiplatformProjectsLocationsModelsUploadRequest(
parent=region_ref.RelativeName(),
googleCloudAiplatformV1UploadModelRequest=self.messages
.GoogleCloudAiplatformV1UploadModelRequest(
model=model,
parentModel=parent_model,
modelId=model_id)))
def Get(self, model_ref):
"""Gets (describe) the given model.
Args:
model_ref: The resource reference for a given model. None if model
resource reference is not provided.
Returns:
Response from calling get model with request containing given model.
"""
request = self.messages.AiplatformProjectsLocationsModelsGetRequest(
name=model_ref.RelativeName())
return self._service.Get(request)
def Delete(self, model_ref):
"""Deletes the given model.
Args:
model_ref: The resource reference for a given model. None if model
resource reference is not provided.
Returns:
Response from calling delete model with request containing given model.
"""
request = self.messages.AiplatformProjectsLocationsModelsDeleteRequest(
name=model_ref.RelativeName())
return self._service.Delete(request)
def DeleteVersion(self, model_version_ref):
"""Deletes the given model version.
Args:
model_version_ref: The resource reference for a given model version.
Returns:
Response from calling delete version with request containing given model
version.
"""
request = (
self.messages.AiplatformProjectsLocationsModelsDeleteVersionRequest(
name=model_version_ref.RelativeName()
)
)
return self._service.DeleteVersion(request)
def List(self, limit=None, region_ref=None):
"""List all models in the given region.
Args:
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
region_ref: The resource reference for a given region. None if the region
reference is not provided.
Returns:
Response from calling list models with request containing given models
and limit.
"""
return list_pager.YieldFromList(
self._service,
self.messages.AiplatformProjectsLocationsModelsListRequest(
parent=region_ref.RelativeName()),
field='models',
batch_size_attribute='pageSize',
limit=limit)
def ListVersion(self, model_ref=None, limit=None):
"""List all model versions of the given model.
Args:
model_ref: The resource reference for a given model. None if model
resource reference is not provided.
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
Returns:
Response from calling list model versions with request containing given
model and limit.
"""
return list_pager.YieldFromList(
self._service,
self.messages.AiplatformProjectsLocationsModelsListVersionsRequest(
name=model_ref.RelativeName()),
method='ListVersions',
field='models',
batch_size_attribute='pageSize',
limit=limit)
def CopyV1Beta1(self,
destination_region_ref=None,
source_model=None,
kms_key_name=None,
destination_model_id=None,
destination_parent_model=None):
"""Copies the given source model into specified location.
The source model is copied into specified location (including cross-region)
either as a new model or a new model version under given parent model.
Args:
destination_region_ref: the resource reference to the location into which
to copy the Model.
source_model: The resource name of the Model to copy.
kms_key_name: The KMS key name for specifying encryption spec.
destination_model_id: The destination model resource name to copy the
model into.
destination_parent_model: The destination parent model to copy the model
as a model version into.
Returns:
Response from calling copy model.
"""
encryption_spec = None
if kms_key_name:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec(
kmsKeyName=kms_key_name
)
)
request = self.messages.AiplatformProjectsLocationsModelsCopyRequest(
parent=destination_region_ref.RelativeName(),
googleCloudAiplatformV1beta1CopyModelRequest=self.messages
.GoogleCloudAiplatformV1beta1CopyModelRequest(
sourceModel=source_model,
encryptionSpec=encryption_spec,
parentModel=destination_parent_model,
modelId=destination_model_id))
return self._service.Copy(request)
def CopyV1(self,
destination_region_ref=None,
source_model=None,
kms_key_name=None,
destination_model_id=None,
destination_parent_model=None):
"""Copies the given source model into specified location.
The source model is copied into specified location (including cross-region)
either as a new model or a new model version under given parent model.
Args:
destination_region_ref: the resource reference to the location into which
to copy the Model.
source_model: The resource name of the Model to copy.
kms_key_name: The name of the KMS key to use for model encryption.
destination_model_id: Optional. Thew custom ID to be used as the resource
name of the new model. This value may be up to 63 characters, and valid
characters are `[a-z0-9_-]`. The first character cannot be a number or
hyphen.
destination_parent_model: The destination parent model to copy the model
as a model version into.
Returns:
Response from calling copy model.
"""
encryption_spec = None
if kms_key_name:
encryption_spec = (
self.messages.GoogleCloudAiplatformV1EncryptionSpec(
kmsKeyName=kms_key_name
)
)
request = self.messages.AiplatformProjectsLocationsModelsCopyRequest(
parent=destination_region_ref.RelativeName(),
googleCloudAiplatformV1CopyModelRequest=self.messages
.GoogleCloudAiplatformV1CopyModelRequest(
sourceModel=source_model,
encryptionSpec=encryption_spec,
parentModel=destination_parent_model,
modelId=destination_model_id))
return self._service.Copy(request)

View File

@@ -0,0 +1,94 @@
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with long-running operations (simple uri)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.command_lib.ai import constants
def GetClientInstance(api_version=None, no_http=False):
return apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME, api_version, no_http=no_http)
class AiPlatformOperationPoller(waiter.CloudOperationPoller):
"""Poller for AI Platform operations API.
This is necessary because the core operations library doesn't directly support
simple_uri.
"""
def __init__(self, client):
self.client = client
super(AiPlatformOperationPoller, self).__init__(
self.client.client.projects_locations_operations,
self.client.client.projects_locations_operations)
def Poll(self, operation_ref):
return self.client.Get(operation_ref)
def GetResult(self, operation):
return operation
class OperationsClient(object):
"""High-level client for the AI Platform operations surface."""
def __init__(self,
client=None,
messages=None,
version=constants.BETA_VERSION):
self.client = client or GetClientInstance(
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
def Get(self, operation_ref):
return self.client.projects_locations_operations.Get(
self.messages.AiplatformProjectsLocationsOperationsGetRequest(
name=operation_ref.RelativeName()))
def WaitForOperation(
self, operation, operation_ref, message=None, max_wait_ms=1800000
):
"""Wait until the operation is complete or times out.
Args:
operation: The operation resource to wait on
operation_ref: The operation reference to the operation resource. It's the
result by calling resources.REGISTRY.Parse
message: str, the message to print while waiting.
max_wait_ms: int, number of ms to wait before raising WaitException.
Returns:
The operation resource when it has completed
Raises:
OperationTimeoutError: when the operation polling times out
OperationError: when the operation completed with an error
"""
poller = AiPlatformOperationPoller(self)
if poller.IsDone(operation):
return operation
if message is None:
message = 'Waiting for operation [{}]'.format(operation_ref.Name())
return waiter.WaitFor(
poller, operation_ref, message, max_wait_ms=max_wait_ms
)

View File

@@ -0,0 +1,164 @@
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for querying Vertex AI Persistent Resources."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.export import util as export_util
from googlecloudsdk.core.console import console_io
class PersistentResourcesClient(object):
"""Client used for interacting with the PersistentResource endpoint."""
def __init__(self, version=constants.GA_VERSION):
client = apis.GetClientInstance(constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self._messages = client.MESSAGES_MODULE
self._version = version
self._service = client.projects_locations_persistentResources
self._message_prefix = constants.AI_PLATFORM_MESSAGE_PREFIX[version]
def GetMessage(self, message_name):
"""Returns the API message class by name."""
return getattr(
self._messages,
'{prefix}{name}'.format(prefix=self._message_prefix,
name=message_name), None)
def PersistentResourceMessage(self):
"""Returns the PersistentResource message."""
return self.GetMessage('PersistentResource')
def Create(self,
parent,
resource_pools,
persistent_resource_id,
display_name=None,
kms_key_name=None,
labels=None,
network=None,
enable_custom_service_account=False,
service_account=None):
"""Constructs a request and sends it to the endpoint to create a persistent resource.
Args:
parent: str, The project resource path of the persistent resource to
create.
resource_pools: The PersistentResource message instance for the
creation request.
persistent_resource_id: The PersistentResource id for the creation
request.
display_name: str, The display name of the persistent resource to create.
kms_key_name: A customer-managed encryption key to use for the persistent
resource.
labels: LabelValues, map-like user-defined metadata to organize the
resource.
network: Network to peer with the PersistentResource
enable_custom_service_account: Whether or not to enable this Persistent
Resource to use a custom service account.
service_account: A service account (email address string) to use for
creating the Persistent Resource.
Returns:
A PersistentResource message instance created.
"""
persistent_resource = self.PersistentResourceMessage()(
displayName=display_name, resourcePools=resource_pools)
if kms_key_name is not None:
persistent_resource.encryptionSpec = self.GetMessage('EncryptionSpec')(
kmsKeyName=kms_key_name)
if labels:
persistent_resource.labels = labels
if network:
persistent_resource.network = network
if enable_custom_service_account:
persistent_resource.resourceRuntimeSpec = (
self.GetMessage('ResourceRuntimeSpec')(
serviceAccountSpec=self.GetMessage('ServiceAccountSpec')(
enableCustomServiceAccount=True,
serviceAccount=service_account)))
if self._version == constants.GA_VERSION:
return self._service.Create(
self._messages.AiplatformProjectsLocationsPersistentResourcesCreateRequest(
parent=parent,
googleCloudAiplatformV1PersistentResource=persistent_resource,
persistentResourceId=persistent_resource_id,
)
)
return self._service.Create(
self._messages.AiplatformProjectsLocationsPersistentResourcesCreateRequest(
parent=parent,
googleCloudAiplatformV1beta1PersistentResource=persistent_resource,
persistentResourceId=persistent_resource_id,
)
)
def List(self, limit=None, region=None):
"""Constructs a list request and sends it to the Persistent Resources endpoint.
Args:
limit: How many items to return in the list
region: Which region to list resources from
Returns:
A Persistent Resource list response message.
"""
return list_pager.YieldFromList(
self._service,
self._messages.AiplatformProjectsLocationsPersistentResourcesListRequest(
parent=region
),
field='persistentResources',
batch_size_attribute='pageSize',
limit=limit,
)
def Get(self, name):
request = (self._messages
.AiplatformProjectsLocationsPersistentResourcesGetRequest(
name=name))
return self._service.Get(request)
def Delete(self, name):
request = self._messages.AiplatformProjectsLocationsPersistentResourcesDeleteRequest(
name=name
)
return self._service.Delete(request)
def Reboot(self, name):
request = self._messages.AiplatformProjectsLocationsPersistentResourcesRebootRequest(
name=name
)
return self._service.Reboot(request)
def ImportResourceMessage(self, yaml_file, message_name):
"""Import a messages class instance typed by name from a YAML file."""
data = console_io.ReadFromFileOrStdin(yaml_file, binary=False)
message_type = self.GetMessage(message_name)
return export_util.Import(message_type=message_type, stream=data)

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file is used to get the client instance and messages module for GKE recommender."""
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
VERSION_MAP = {
base.ReleaseTrack.ALPHA: 'v1alpha1',
base.ReleaseTrack.GA: 'v1',
}
HTTP_ERROR_FORMAT = (
'ResponseError: code={status_code}, message={status_message}'
)
# The messages module can also be accessed from client.MESSAGES_MODULE
def GetMessagesModule(release_track=base.ReleaseTrack.GA):
api_version = VERSION_MAP.get(release_track)
return apis.GetMessagesModule('gkerecommender', api_version)
def GetClientInstance(release_track=base.ReleaseTrack.GA):
api_version = VERSION_MAP.get(release_track)
return apis.GetClientInstance('gkerecommender', api_version)

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*- #
# Copyright 2025 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for querying serverless ray jobs in AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.export import util as export_util
from googlecloudsdk.core.console import console_io
class ServerlessRayJobsClient(object):
"""Client used for interacting with Serverless Ray Jobs endpoint."""
def __init__(self, version=constants.GA_VERSION):
client = apis.GetClientInstance(constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self._messages = client.MESSAGES_MODULE
self._version = version
self._service = client.projects_locations_serverlessRayJobs
self._message_prefix = constants.AI_PLATFORM_MESSAGE_PREFIX[version]
def GetMessage(self, message_name):
"""Returns the API message class by name."""
return getattr(
self._messages,
'{prefix}{name}'.format(prefix=self._message_prefix,
name=message_name), None)
def ServerlessRayJobMessage(self):
"""Retures the Serverless Ray Jobs resource message."""
return self.GetMessage('ServerlessRayJob')
def Create(self,
parent,
job_spec,
display_name=None,
labels=None):
"""Constructs a request and sends it to the endpoint to create a serverless ray job instance.
Args:
parent: str, The project resource path of the serverless ray job to
create.
job_spec: The ServerlessRayJobSpec message instance for the job creation
request.
display_name: str, The display name of the serverless ray job to create.
labels: LabelValues, map-like user-defined metadata to organize the
serverless ray job.
Returns:
A ServerlessRayJob message instance created.
"""
serverless_ray_job = self.ServerlessRayJobMessage()(
displayName=display_name, jobSpec=job_spec
)
if labels:
serverless_ray_job.labels = labels
# TODO(b/390679825): Add V1 version support when Serverless Ray Jobs API is
# GA ready.
return self._service.Create(
self._messages.AiplatformProjectsLocationsServerlessRayJobsCreateRequest(
parent=parent,
googleCloudAiplatformV1beta1ServerlessRayJob=serverless_ray_job,
)
)
def List(self, limit=None, region=None):
return list_pager.YieldFromList(
self._service,
self._messages.AiplatformProjectsLocationsServerlessRayJobsListRequest(
parent=region
),
field='serverlessRayJobs',
batch_size_attribute='pageSize',
limit=limit,
)
def Get(self, name):
request = (
self._messages.AiplatformProjectsLocationsServerlessRayJobsGetRequest(
name=name
)
)
return self._service.Get(request)
def Cancel(self, name):
request = self._messages.AiplatformProjectsLocationsServerlessRayJobsCancelRequest(
name=name
)
return self._service.Cancel(request)
def ImportResourceMessage(self, yaml_file, message_name):
"""Import a messages class instance typed by name from a YAML file."""
data = console_io.ReadFromFileOrStdin(yaml_file, binary=False)
message_type = self.GetMessage(message_name)
return export_util.Import(message_type=message_type, stream=data)

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for AI Platform Tensorboard experiments API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import common_args
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
class TensorboardExperimentsClient(object):
"""High-level client for the AI Platform Tensorboard experiment surface."""
def __init__(self,
client=None,
messages=None,
version=constants.BETA_VERSION):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_tensorboards_experiments
self._version = version
def Create(self, tensorboard_ref, args):
return self.CreateBeta(tensorboard_ref, args)
def CreateBeta(self, tensorboard_ref, args):
"""Create a new Tensorboard experiment."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment
.LabelsValue)
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsCreateRequest(
parent=tensorboard_ref.RelativeName(),
googleCloudAiplatformV1beta1TensorboardExperiment=self.messages
.GoogleCloudAiplatformV1beta1TensorboardExperiment(
displayName=args.display_name,
description=args.description,
labels=labels),
tensorboardExperimentId=args.tensorboard_experiment_id)
return self._service.Create(request)
def List(self, tensorboard_ref, limit=1000, page_size=50, sort_by=None):
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsListRequest(
parent=tensorboard_ref.RelativeName(),
orderBy=common_args.ParseSortByArg(sort_by))
return list_pager.YieldFromList(
self._service,
request,
field='tensorboardExperiments',
batch_size_attribute='pageSize',
batch_size=page_size,
limit=limit)
def Get(self, tensorboard_exp_ref):
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsGetRequest(
name=tensorboard_exp_ref.RelativeName())
return self._service.Get(request)
def Delete(self, tensorboard_exp_ref):
request = (
self.messages
.AiplatformProjectsLocationsTensorboardsExperimentsDeleteRequest(
name=tensorboard_exp_ref.RelativeName()))
return self._service.Delete(request)
def Patch(self, tensorboard_exp_ref, args):
return self.PatchBeta(tensorboard_exp_ref, args)
def PatchBeta(self, tensorboard_exp_ref, args):
"""Update a Tensorboard experiment."""
tensorboard_exp = (
self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment())
update_mask = []
def GetLabels():
return self.Get(tensorboard_exp_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment
.LabelsValue, GetLabels)
if labels_update.needs_update:
tensorboard_exp.labels = labels_update.labels
update_mask.append('labels')
if args.display_name is not None:
tensorboard_exp.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
tensorboard_exp.description = args.description
update_mask.append('description')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsPatchRequest(
name=tensorboard_exp_ref.RelativeName(),
googleCloudAiplatformV1beta1TensorboardExperiment=tensorboard_exp,
updateMask=','.join(update_mask))
return self._service.Patch(request)

Some files were not shown because too many files have changed in this diff Show More