feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,29 @@
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml vision."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Vision(base.Group):
"""Use Google Cloud Vision to analyze images."""

View File

@@ -0,0 +1,59 @@
help_text:
language_hints: |
Language hints can be provided to Google Cloud Vision API. In most cases,
an empty value yields the best results since it enables automatic language
detection. For languages based on the Latin alphabet, setting
`language_hints` is not needed. Text detection returns an error if one or
more of the specified languages is not one of the supported languages.
(See https://cloud.google.com/vision/docs/languages.) To provide language
hints run:
$ {command} --language-hints ja,ko
request:
collection: vision.images
method: annotate
api_version: v1
response:
error:
field: responses.error
code: code
message: message
args:
image_field:
api_field: requests.image
arg_name: image_path
is_positional: true
help_text: |
Path to the image to be analyzed. This can be either a local path
or a URL. If you provide a local file, the contents will be sent
directly to Google Cloud Vision. If you provide a URL, it must be in
Google Cloud Storage format (gs://bucket/object) or an HTTP URL
(http://... or https://...)
processor: googlecloudsdk.command_lib.ml.vision.util:GetImageFromPath
language_hints_field:
api_field: requests.imageContext.languageHints
arg_name: language-hints
help_text: List of languages to use for text detection.
max_results_field:
api_field: requests.features.maxResults
arg_name: max-results
help_text: Maximum number of results to be provided.
model_field:
api_field: requests.features.model
arg_name: model-version
default: builtin/stable
release_tracks: [ALPHA, BETA]
help_text: |
Model version to use for the feature.
type: str
choices:
- arg_value: builtin/stable
enum_value: builtin/stable
- arg_value: builtin/latest
enum_value: builtin/latest

View File

@@ -0,0 +1,72 @@
- release_tracks: [GA]
help_text:
brief: Detect dense text in an image.
description: |
Detect dense text in an image, such as books and research reports.
Google Cloud Vision uses OCR (Optical Character Recognition) to analyze text.
This is a premium feature for dense text such as books, research
reports, and PDFs. To detect small amounts of text such as on signs, use
`detect-text` instead. For more information on this feature, see the Google
Cloud Vision documentation at https://cloud.google.com/vision/docs/.
{language_hints}
language_hints: !COMMON help_text.language_hints
examples: |
To detect dense text in image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: DOCUMENT_TEXT_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.language_hints_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect dense text in an image.
description: |
Detect dense text in an image, such as books and research reports.
Google Cloud Vision uses OCR (Optical Character Recognition) to analyze text.
This is a premium feature for dense text such as books, research
reports, and PDFs. To detect small amounts of text such as on signs, use
`detect-text` instead. For more information on this feature, see the Google
Cloud Vision documentation at https://cloud.google.com/vision/docs/.
{language_hints}
language_hints: !COMMON help_text.language_hints
examples: |
To detect dense text in image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: DOCUMENT_TEXT_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.language_hints_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,54 @@
- release_tracks: [GA]
help_text:
brief: Detect faces within an image.
description: |
Detect faces within an image.
examples: |
To detect faces in image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: FACE_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect faces within an image.
description: |
Detect faces within an image.
examples: |
To detect faces in image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: FACE_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,52 @@
- release_tracks: [GA]
help_text:
brief: Detect general attributes of an image.
description: |
Detect general attributes of an image, such as dominant color.
examples: |
To detect general attributes of image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: IMAGE_PROPERTIES
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect general attributes of an image.
description: |
Detect general attributes of an image, such as dominant color.
examples: |
To detect general attributes of image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: IMAGE_PROPERTIES
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,56 @@
- release_tracks: [GA]
help_text:
brief: Detect broad sets of categories within an image.
description: |
Label Detection detects categories in an image, ranging from modes of
transportation to animals.
examples: |
To detect categories in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LABEL_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect broad sets of categories within an image.
description: |
Label Detection detects categories in an image, ranging from modes of
transportation to animals.
examples: |
To detect categories in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LABEL_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,56 @@
- release_tracks: [GA]
help_text:
brief: Detect popular natural and man-made structures within an image.
description: |
Google Cloud Vision will recognize landmarks in an image, such as "Palace
of Fine Arts."
examples: |
To recognize landmarks in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LANDMARK_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect popular natural and man-made structures within an image.
description: |
Google Cloud Vision will recognize landmarks in an image, such as "Palace
of Fine Arts."
examples: |
To recognize landmarks in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LANDMARK_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,54 @@
- release_tracks: [GA]
help_text:
brief: Detect popular product logos within an image.
description: |
Detect popular product logos within an image.
examples: |
To detect product logos in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LOGO_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect popular product logos within an image.
description: |
Detect popular product logos within an image.
examples: |
To detect product logos in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: LOGO_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,29 @@
- release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Detect and extract multiple objects in an image with object localization.
description: |
Detect and extract multiple objects in an image with object localization.
Object localization identifies multiple objects in an image
and provides a LocalizedObjectAnnotation for each object in the image.
examples: |
To detect objects for image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: OBJECT_LOCALIZATION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,60 @@
- release_tracks: [BETA, ALPHA]
help_text:
brief: detect products within an image
description: |
detect products within an image
examples: |
To detect product in image 'gs://my-bucket/my-image.jpg' by searching in product set
'my-product-set', in category 'toys', run:
$ {command} gs://my-bucket/my-image.jpg --product-set='my-product-set' --product-set-location=us-east1 --category='toys'
request:
_COMMON_: request
static_fields:
requests.features.type: PRODUCT_SEARCH
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:AddProductSetToDetectProductRequest
- googlecloudsdk.command_lib.ml.vision.product_search.utils:AddBoundingPolygonToDetectProductRequest
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- api_field: requests.imageContext.productSearchParams.productCategories
arg_name: category
help_text: Product category to search in.
required: true
choices:
- arg_value: apparel
enum_value: apparel
- arg_value: homegoods
enum_value: homegoods
- arg_value: toys
enum_value: toys
- api_field: requests.imageContext.productSearchParams.filter
arg_name: filter
help_text: |-
Filter expression to restrict search results based on product labels. ANDs of ORs of
key-value expressions are supported, where expressions within an OR must
have the same key. Expressions separated by AND must have different keys.
An '=' should be used to connect the key and value. For example,
'(color = red OR color = blue) AND brand = Google' is acceptable, but not
'(color = red OR brand = Google)' or 'color: red'.
- arg_name: bounding-polygon
help_text: |-
Bounding polygon around the areas of interest in the image. If it is not specified, system
discretion will be applied. A bounding polygon can be specified by a list of vertices or
normalized vertices. A vertex (x, y) represents a 2D point in the image. x, y are integers
and are in the same scale as the original image. The normalized vertex coordinates are
relative to orginal image and range from 0 to 1. For example,
--bounding-polygon=0.,0.,0.,0.3,0.3,0.,0.3,0.3 specifies a polygon with 4 normalized
vertices - (0., 0.), (0., 0.3), (0.3, 0.), (0.3, 0.3). Notice that the decimal point is
needed for normalized vertex coordindates.
additional_arguments_hook: googlecloudsdk.command_lib.ml.vision.product_search.utils:ProductSetArgsForDetectProduct
output:
format: json

View File

@@ -0,0 +1,56 @@
- release_tracks: [GA]
help_text:
brief: Detect explicit content in an image.
description: |
Safe Search Detection detects adult content, violent content, medical
content and spoof content in an image.
examples: |
To detect adult content, violent content, medical
content and spoof content in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: SAFE_SEARCH_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect explicit content in an image.
description: |
Safe Search Detection detects adult content, violent content, medical
content and spoof content in an image.
examples: |
To detect adult content, violent content, medical
content and spoof content in an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: SAFE_SEARCH_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,68 @@
- release_tracks: [GA]
help_text:
brief: Detect and extract text within an image.
description: |
Detect and extract text within an image.
Google Cloud Vision uses OCR (Optical Character Recognition) to detect
text within an image, with support for a broad array of languages and
automatic label detection.
{language_hints}
language_hints: !COMMON help_text.language_hints
examples: |
To detect and extract text within an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: TEXT_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.language_hints_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect and extract text within an image.
description: |
Detect and extract text within an image.
Google Cloud Vision uses OCR (Optical Character Recognition) to detect
text within an image, with support for a broad array of languages and
automatic label detection.
{language_hints}
language_hints: !COMMON help_text.language_hints
examples: |
To detect and extract text within an image 'gs://my_bucket/input_file':
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: TEXT_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.language_hints_field
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,56 @@
- release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Detect and transcribe text from PDF files stored in Google Cloud Storage.
description: |
Detect and transcribe text from PDF files stored in Google Cloud Storage.
The Vision API accepts PDF files up to 2000 pages.
Larger files will return an error.
examples: |
To detect text for input PDF file 'gs://my_bucket/input_file' and store output in 'gs://my_bucket/out_put_prefix':
$ {command} gs://my_bucket/input_file
gs://my_bucket/out_put_prefix
request:
collection: vision.files
method: asyncBatchAnnotate
api_version: v1
static_fields:
requests.features.type: DOCUMENT_TEXT_DETECTION
requests.inputConfig.mimeType: 'application/pdf'
response:
error:
field: error
code: code
message: message
arguments:
params:
- api_field: requests.inputConfig.gcsSource
arg_name: input_file
is_positional: true
help_text: |
Google Cloud Storage location to read the input from. It must be in
Google Cloud Storage format (gs://bucket/object)
processor: googlecloudsdk.command_lib.ml.vision.util:GetGcsSourceFromPath
- api_field: requests.outputConfig.gcsDestination
arg_name: output_path
is_positional: true
help_text: |
Google Cloud Storage location to store the output file. It must be in
Google Cloud Storage format (gs://bucket/object)
processor: googlecloudsdk.command_lib.ml.vision.util:GetGcsDestinationFromPath
- api_field: requests.outputConfig.batchSize
arg_name: batch-size
help_text: |
Maximum number of response protos to put into each output JSON file on
Google Cloud Storage.
The valid range is [1, 100]. If not specified, the default value is 20.
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,55 @@
- release_tracks: [ALPHA, BETA, GA]
help_text:
brief: Detect and transcribe text from TIFF files stored in Google Cloud Storage.
description: |
Detect and transcribe text from TIFF files stored in Google Cloud Storage.
The Vision API accepts TIFF files up to 2000 pages.
Larger files will return an error.
examples: |
To detect text for input TIFF file `gs://my_bucket/input_file` and store output in `gs://my_bucket/out_put_prefix`:
$ {command} gs://my_bucket/input_file
gs://my_bucket/out_put_prefix
request:
collection: vision.files
method: asyncBatchAnnotate
api_version: v1
static_fields:
requests.features.type: DOCUMENT_TEXT_DETECTION
requests.inputConfig.mimeType: 'image/tiff'
response:
error:
field: error
code: code
message: message
arguments:
params:
- api_field: requests.inputConfig.gcsSource
arg_name: input_file
is_positional: true
help_text: |
Google Cloud Storage location to read the input from. It must be in
Google Cloud Storage format (gs://bucket/object)
processor: googlecloudsdk.command_lib.ml.vision.util:GetGcsSourceFromPath
- api_field: requests.outputConfig.gcsDestination
arg_name: output_path
is_positional: true
help_text: |
Google Cloud Storage location to store the output file. It must be in
Google Cloud Storage format (gs://bucket/object)
processor: googlecloudsdk.command_lib.ml.vision.util:GetGcsDestinationFromPath
- api_field: requests.outputConfig.batchSize
arg_name: batch-size
help_text: |
Maximum number of response protos to put into each output JSON file on
Google Cloud Storage.
The valid range is [1, 100]. If not specified, the default value is 20.
- _COMMON_: args.model_field
output:
format: json

View File

@@ -0,0 +1,59 @@
- release_tracks: [GA]
help_text:
brief: Detect entities in an image from similar images on the web.
description: |
Detect entities in an image from similar images on the web.
examples: |
To detect entities in an image `gs://my_bucket/input_file`:
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: WEB_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect entities in an image from similar images on the web.
description: |
Detect entities in an image from similar images on the web.
examples: |
To detect entities in an image `gs://my_bucket/input_file`:
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: WEB_DETECTION
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.max_results_field
- _COMMON_: args.model_field
- api_field: requests.imageContext.webDetectionParams.includeGeoResults
arg_name: include-geo-results
action: store_true
help_text: Whether to include results derived from the geo information in the
image.
output:
format: json

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for operations of ml vision."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Operations(base.Group):
"""Manage Google Cloud Vision operations."""

View File

@@ -0,0 +1,25 @@
- release_tracks: [BETA, ALPHA]
help_text:
brief: describe a long-running operation
description: describe a long-running operation
examples: |-
To describe the long-running operation with name 'projects/my-project/locations/us-east1/operations/123', run:
$ {command} projects/my-project/locations/us-east1/operations/123
request:
collection: vision.operations
api_version: v1
disable_resource_check: true
arguments:
params:
- arg_name: operation
api_field: name
is_positional: true
required: true
help_text: |
Full name of the operation to describe.
output:
format: json

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml vision product-search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class ProductSearch(base.Group):
"""Uses Google Cloud Vision to analyze product in images."""

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml vision product-search images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Images(base.Group):
"""Use the Google Cloud Vision API to manipulate reference images."""

View File

@@ -0,0 +1,44 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: create a reference image.
description: create a reference image.
examples: |
To create a reference image 'my-image' in product 'my-product' in the
location 'us-east1' from image of 'gs://my-bucket/image.jpg' with two
bounding polygons, run:
$ {command} my-image --location=my-location --product=my-product --storage-location=gs://my-bucket/image.jpg --flags-file=my-flags.yaml
The content of 'my-flags.yaml' is as following:
```
- --bounding-polygon:
vertices:
- {x: 0, y: 0}
- {x: 0, y: 10}
- {x: 10, y: 0}
- {x: 10, y: 10}
- --bounding-polygon:
normalized-vertices:
- {x: 0.8, y: 0.8}
- {x: 0.8, y: 1}
- {x: 1, y: 0.8}
- {x: 1, y: 1}
```
request:
collection: vision.projects.locations.products.referenceImages
api_version: v1
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:AddBoundingPolygonsToReferenceImageCreationRequest
arguments:
resource:
help_text: Reference image to create.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:reference_image
params:
- api_field: referenceImage.uri
arg_name: storage-location
required: true
help_text: |-
The Google Cloud Storage URI of the reference image. The URI must start with 'gs://'.
additional_arguments_hook: googlecloudsdk.command_lib.ml.vision.product_search.utils:AddBoundingPolygonsArg

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: delete a reference image.
description: delete a reference image.
examples: |
To delete the image 'my-image' from 'my-product' in location of 'us-east1', run:
$ {command} my-image --location=us-east1 --product=my-product
request:
collection: vision.projects.locations.products.referenceImages
api_version: v1
arguments:
resource:
help_text: Reference image to delete.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:reference_image

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: descibe a reference image.
description: descibe a reference image.
examples: |
To describe the image 'my-image' of 'my-product' in location of 'us-east1', run:
$ {command} my-image --location=us-east1 --product=my-product
request:
collection: vision.projects.locations.products.referenceImages
api_version: v1
arguments:
resource:
help_text: Reference image to describe.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:reference_image

View File

@@ -0,0 +1,30 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: list all reference images for a product.
description: list all reference images for a product.
examples: |
To list all reference images in product 'my-product' and 'my-product' is in 'us-east1', run:
$ {command} --location=us-east1 --product=my-product
request:
collection: vision.projects.locations.products.referenceImages
api_version: v1
response:
id_field: name
arguments:
resource:
help_text: The product of reference images to list.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product
is_positional: false
output:
format: |
table(
name.scope("referenceImages"):label=NAME,
name.scope("products").segment(0):label=PRODUCT,
uri:label=IMAGE_LOCATION
)

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml product-search product-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class ProductSet(base.Group):
"""Uses Google Cloud Vision to manipulate product sets."""

View File

@@ -0,0 +1,28 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Add a product to the specified product set.
description: Add a product to the specified product set. One product can be added to at most 100 product sets.
examples: |
To add product 'my-product' to product set 'my-product-set' in location 'us-east1', run:
$ {command} my-product-set --location=us-east1 --product=my-product
request:
collection: vision.projects.locations.productSets
method: addProduct
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:FixProductInAddProductToProductSetRequest
arguments:
resource:
help_text: Product set to add product to.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set
params:
- api_field: addProductToProductSetRequest.product
arg_name: product
required: true
help_text: |-
The id of the product to add to the product set. The product must be in the same project
and location as the specified product set.

View File

@@ -0,0 +1,26 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Create a new product set.
description: Create a product set.
examples: |-
To create a product set 'my-product-set' in location 'us-east1' with a display name
'my favorite', run:
$ {command} my-product-set --location=us-east1 --display-name='my favorite'
request:
collection: vision.projects.locations.productSets
arguments:
resource:
help_text: Product set to create.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set
params:
- api_field: productSet.displayName
arg_name: display-name
required: true
help_text: |-
The user provided name for this product set. It must not be empty and can be up to 4096
characters long.

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Delete a product set.
description: Delete a product set.
examples: |-
To delete 'my-product-set' from location 'us-east1', run:
$ {command} my-product-set --location=us-east1
request:
collection: vision.projects.locations.productSets
arguments:
resource:
help_text: Product set to delete.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Describe a product set.
description: Describe a product set.
examples: |-
To describe 'my-product-set' in location 'us-east1', run:
$ {command} my-product-set --location=us-east1
request:
collection: vision.projects.locations.productSets
arguments:
resource:
help_text: Product set to describe.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set

View File

@@ -0,0 +1,38 @@
- release_tracks: [ALPHA, BETA]
command_type: GENERIC
help_text:
brief: Import a list of reference images to specified product sets.
description: Import a list of reference images to specified product sets based on a list of image information.
examples: |
To import reference images from 'gs://my-bucket/my-object.csv' into location 'us-east1', run:
$ {command} --location=us-east1 gs://my-bucket/my-object.csv
request:
collection: vision.projects.locations.productSets
disable_resource_check: true
method: import
arguments:
resource:
help_text: Location to import to.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:location
is_positional: false
override_resource_collection: true
is_parent_resource: true
params:
- api_field: importProductSetsRequest.inputConfig.gcsSource.csvFileUri
arg_name: source
is_positional: true
required: true
help_text: |-
Google Cloud Storage URI of the input csv file which must start with `gs://`.
The format of the input csv file should be one image per line.
In each line, there are 8 columns - `image-uri`, `image-id`, `product-set-id`, `product-id`,
`product-category`, `product-display-name`, `labels`, and `bounding-poly`.
The `image-uri`, `product-set-id`, `product-id`, and `product-category` columns are required.
See https://cloud.google.com/vision/product-search/docs/csv-format for more information on the
csv format and the meaning of each column.
async:
collection: vision.projects.locations.operations
extract_resource_result: false

View File

@@ -0,0 +1,29 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: List product sets in a location.
description: List product sets in a location.
examples: |-
To list all product sets in location 'us-east1', run:
$ {command} --location=us-east1
request:
collection: vision.projects.locations.productSets
arguments:
resource:
help_text: Location to list all product sets.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:location
is_positional: false
response:
id_field: name
output:
format: |
table(
name.scope("productSets"):label=NAME,
name.scope("locations").segment(0):label=LOCATION,
displayName
)

View File

@@ -0,0 +1,24 @@
- release_tracks: [ALPHA, BETA]
command_type: LIST
help_text:
brief: List products in a product set.
description: List products in a product set.
examples: |
To list products in product set 'my-product-set' in location 'us-east1', run:
$ {command} --product-set=my-product-set --location=us-east1
request:
collection: vision.projects.locations.productSets.products
method: list
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:FixNameInListProductsInProductSetRequest
arguments:
resource:
help_text: Product set in which to list products.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set_product
output:
format: json

View File

@@ -0,0 +1,28 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Remove a product from a product set.
description: Remove a product from a product set.
examples: |
To remove product 'my-product' from product set 'my-product-set' in location 'us-east1', run:
$ {command} my-product-set --location=us-east1 --product=my-product
request:
collection: vision.projects.locations.productSets
method: removeProduct
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:FixProductInRemoveProductFromProductSetRequest
arguments:
resource:
help_text: Product set to remove product from.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set
params:
- api_field: removeProductFromProductSetRequest.product
arg_name: product
required: true
help_text: |-
The id of the product to remove from the product set. The product must be in the same
project and location as the specified product set.

View File

@@ -0,0 +1,25 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Update a product set.
description: Update a product set.
examples: |
To change the display name to 'my favorite' of product set 'my-product-set'
in locaiton 'us-east1', run:
$ {command} my-produt-set --location=us-east1 --display-name='my favorite'
request:
collection: vision.projects.locations.productSets
arguments:
resource:
help_text: Product set to update.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product_set
params:
- api_field: productSet.displayName
arg_name: display-name
help_text: |-
The user provided name for this product set. It must not be empty and must be at most 4096
characters long.

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml vision product-search products."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Product(base.Group):
"""Uses Google Cloud Vision to manipulate products."""

View File

@@ -0,0 +1,44 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: create a product.
description: Create a product.
examples: |
To create a product 'my-product' in 'us-east1' with category of 'toys' and display name of
'favorite product', run:
$ {command} my-product --location=us-east1 --category=toys --display-name='favorite product'
request:
collection: vision.projects.locations.products
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:PrepareProductLabelsForProductCreationRequest
arguments:
resource:
help_text: The product to create.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product
params:
- api_field: product.displayName
arg_name: display-name
required: true
help_text: |-
The display name of this product. It must not be empty and can be up to 4096 characters
long.
- api_field: product.description
arg_name: description
help_text: |-
An optional description of this product. It can be up to 4096 characters long.
- api_field: product.productCategory
arg_name: category
required: true
choices:
- arg_value: homegoods
enum_value: homegoods
- arg_value: apparel
enum_value: apparel
- arg_value: toys
enum_value: toys
help_text: |-
The category for the product. After being set, it cannot be changed.
additional_arguments_hook: googlecloudsdk.command_lib.ml.vision.product_search.utils:ProductLabelsArgumentsForCreate

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: delete a product.
description: delete a product.
examples: |
To delete product 'my-product' in location 'us-east1', run:
$ {command} my-product --location=us-east1
request:
collection: vision.projects.locations.products
arguments:
resource:
help_text: The product to delete.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product

View File

@@ -0,0 +1,43 @@
- release_tracks: [ALPHA]
help_text:
brief: Deletes all Products in a ProductSet or all Products that are in no ProductSet.
description: Deletes all Products in a ProductSet or all Products that are in no ProductSet.
examples: |
To delete all products in no product sets in location 'us-east1' and project 'test-project', run:
$ {command} --orphan-products us-east1 --project=test-project
request:
collection: vision.projects.locations.products
method: purge
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:PromptDeleteAll
arguments:
resource:
help_text: The location in which to delete the product set.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:non_primary_location
is_parent_resource: true
params:
- api_field: purgeProductsRequest.force
arg_name: force
help_text: |
If specified, user will not be prompted and all the products in specified set will
be deleted. If not specified, user will be prompted to continue the delete.
- group:
mutex: true
required: true
params:
- api_field: purgeProductsRequest.productSetPurgeConfig.productSetId
arg_name: product-set
help_text: |
Delete all Products that are in a ProductSet. Even if the Product is in multiple
ProductSets, the Product is still deleted.
- api_field: purgeProductsRequest.deleteOrphanProducts
action: store_true
arg_name: orphan-products
help_text: |
Delete all Products that are in no ProductSet.

View File

@@ -0,0 +1,17 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: describe a product.
description: describe a product.
examples: |
To describe product 'my-product' in location 'us-east1', run:
$ {command} my-product --location=us-east1
request:
collection: vision.projects.locations.products
arguments:
resource:
help_text: The product to describe.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product

View File

@@ -0,0 +1,29 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: list all products in a location.
description: list all products in a location.
examples: |
To list products in location 'us-east1', run:
$ {command} --location=us-east1
request:
collection: vision.projects.locations.products
response:
id_field: name
arguments:
resource:
help_text: The location to list all products.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:location
is_positional: false
output:
format: |
table(
name.scope("products"):label=NAME,
name.scope("locations").segment(0):label=LOCATION,
displayName
)

View File

@@ -0,0 +1,40 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: update a product.
description: update a product.
examples: |
To change the display name to 'new-display--name', and description to 'new-description' of
product 'my-product' in location 'us-east1', run:
$ {command} my-product --location=us-east1 --display-name='new-display-name' --description='new-description'
To add labels 'color=blue' to and remove 'weight=10lbs' from product 'my-product' in location 'us-east1',
run:
$ {command} my-product --location=us-east1 --add-product-labels='color=blue' --remove-product-labels='weight=10lbs'
To clear all labels from product 'my-product' in location 'us-east1', run:
$ {command} my-product --location=us-east1 --clear-product-labels
request:
collection: vision.projects.locations.products
modify_request_hooks:
- googlecloudsdk.command_lib.ml.vision.product_search.utils:UpdateLabelsAndUpdateMaskForProductUpdateRequest
arguments:
resource:
help_text: The product to update.
spec: !REF googlecloudsdk.command_lib.ml.vision.resources:product
params:
- api_field: product.displayName
arg_name: display-name
help_text: |-
The display name of this product. It must not be empty and can be up to 4096 characters
long.
- api_field: product.description
arg_name: description
help_text: |-
An optional description of this product. It can be up to 4096 characters long.
additional_arguments_hook: googlecloudsdk.command_lib.ml.vision.product_search.utils:ProductLabelsArgumentsForUpdate

View File

@@ -0,0 +1,68 @@
- release_tracks: [GA]
help_text:
brief: Suggest a bounding box in an image.
description: |
Returns the coordinates of a bounding box that surrounds the dominant
object or face in an image.
examples: |
To get the coordinates of a bounding box that surrounds the dominant
object or face in an image `gs://my_bucket/input_file`:
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: CROP_HINTS
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- api_field: requests.imageContext.cropHintsParams.aspectRatios
arg_name: aspect-ratios
type: googlecloudsdk.command_lib.ml.vision.flags:AspectRatioType
help_text: A list of aspect ratio hints for the suggested bounding box. Aspect
ratios may be specified either as a decimal number (ex. 1.333) or as a ratio
of width to height (ex 4:3).
output:
format: json
- release_tracks: [ALPHA, BETA]
help_text:
brief: Suggest a bounding box in an image.
description: |
Returns the coordinates of a bounding box that surrounds the dominant
object or face in an image.
examples: |
To get the coordinates of a bounding box that surrounds the dominant
object or face in an image `gs://my_bucket/input_file`:
$ {command} gs://my_bucket/input_file
request:
_COMMON_: request
static_fields:
requests.features.type: CROP_HINTS
response:
_COMMON_: response
arguments:
params:
- _COMMON_: args.image_field
- _COMMON_: args.model_field
- api_field: requests.imageContext.cropHintsParams.aspectRatios
arg_name: aspect-ratios
type: googlecloudsdk.command_lib.ml.vision.flags:AspectRatioType
help_text: A list of aspect ratio hints for the suggested bounding box. Aspect
ratios may be specified either as a decimal number (ex. 1.333) or as a ratio
of width to height (ex 4:3).
output:
format: json