feat: Add new gcloud commands, API clients, and third-party libraries across various services.

This commit is contained in:
2026-01-01 20:26:35 +01:00
parent 5e23cbece0
commit a19e592eb7
25221 changed files with 8324611 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml video."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(
base.ReleaseTrack.GA, base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class VideoIntelligence(base.Group):
"""Cloud ML Video-Intelligence command groups."""

View File

@@ -0,0 +1,64 @@
request:
collection: videointelligence.videos
method: annotate
api_version: v1
async:
collection: videointelligence.projects.locations.operations
extract_resource_result: false
result_attribute: response
args:
- arg_name: input_path
is_positional: true
help_text: |
Path to the video to be analyzed. Must be a local path or a Google
Cloud Storage URI.
- api_field: outputUri
arg_name: output-uri
help_text: |
Location to which the results should be written. Must be a Google
Cloud Storage URI.
processor: googlecloudsdk.command_lib.ml.video.util:ValidateOutputUri
- api_field: videoContext.segments
arg_name: segments
help_text: |
Segments from the video which you want to analyze (by default, the
entire video will be treated as one segment). Must be in the format
START1:END1[,START2:END2,...] (inclusive). START and END of segments must
be a properly formatted duration string of the form `HhMmSs` where:
```
* H is the number of hours from beginning of video
* M is the number of minutes from the beginning of video
* S is the number of seconds from the beginning of the video
```
H, M and S can be specified as ints or floats for fractional units
(to microsecond resolution). Unit chars (e.g. `h`, `m` or `s`) are
required. Microseconds can be specified using fractional seconds
e.g. 0.000569s == 569 microseconds.
Examples:
0s:23.554048s,24s:29.528064s
0:1m40s,3m50s:5m10.232265s
type: str
processor: googlecloudsdk.command_lib.ml.video.util:ValidateAndParseSegments
- api_field: locationId
arg_name: region
choices:
- arg_value: us-east1
enum_value: us-east1
- arg_value: us-west1
enum_value: us-west1
- arg_value: europe-west1
enum_value: europe-west1
- arg_value: asia-east1
enum_value: asia-east1
help_text: |
Optional Cloud region where annotation should take place. If no region
is specified, a region will be determined based on video file location.

View File

@@ -0,0 +1,33 @@
- release_tracks: []
help_text:
brief: Detect explicit content in videos.
description: |
Detect adult content within a video. Adult content is content generally
appropriate for 18 years of age and older, including but not limited to
nudity, sexual activities, and pornography (including cartoons or anime).
The response includes a bucketized likelihood value, from VERY_UNLIKELY to
VERY_LIKELY. When Explicit Content Detection evaluates a video, it does so on
a per-frame basis and considers visual content only (not audio).
examples: |
To detect explicit content in a video file named 'gs://my_bucket/input_file.mp4', run the following command.:
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [EXPLICIT_CONTENT_DETECTION]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
output:
format: json

View File

@@ -0,0 +1,43 @@
- release_tracks: []
help_text:
brief: Detect general labels for videos.
description: |
Detect general categories in videos, such as modes of transportation or
animals. Use the --detection-mode flag to control whether labels are
detected for shots, frames, or both.
examples: |
To detect labels in video file 'gs://my_bucket/input_file.mp4':
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [LABEL_DETECTION]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
- api_field: videoContext.labelDetectionConfig.labelDetectionMode
arg_name: detection-mode
help_text: The mode of label detection requested.
choices:
- arg_value: shot
enum_value: SHOT_MODE
help_text: Detect labels at the per-shot level.
- arg_value: frame
enum_value: FRAME_MODE
help_text: Detect labels at the per-frame level.
- arg_value: shot-and-frame
enum_value: SHOT_AND_FRAME_MODE
help_text: Detect labels at both the per-shot and per-frame level.
default: shot
output:
format: json

View File

@@ -0,0 +1,26 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect and track objects in a video.
description: |
Detect and track objects in a video.
examples: |
To detect and track objects in video file 'gs://my_bucket/input_file.mp4':
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [OBJECT_TRACKING]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
output:
format: json

View File

@@ -0,0 +1,27 @@
- release_tracks: []
help_text:
brief: Detect shot changes in videos.
description: |
Detect when the shot changes in a video.
examples: |
To detect shot changes in a video file named 'gs://my_bucket/input_file.mp4', run the following command:
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [SHOT_CHANGE_DETECTION]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
output:
format: json

View File

@@ -0,0 +1,33 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Detect text in videos.
description: |
Detect text in videos.
examples: |
To detect text in video file 'gs://my_bucket/input_file.mp4':
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [TEXT_DETECTION]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
- api_field: videoContext.textDetectionConfig.languageHints
arg_name: language-hints
default: []
help_text: Language hints can be specified if the languages to be detected are known
beforehand. It can increase the accuracy of the detection. Language hints must be language
codes in BCP-47 format. Automatic language detection is performed if no language hints are
provided.
output:
format: json

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""Command group for working with Cloud Video Intelligence operations."""

View File

@@ -0,0 +1,22 @@
- release_tracks: []
help_text:
brief: Get description of a long-running video analysis operation.
description: |
Get information about a long-running video analysis operation.
examples: |-
To get information about a long-running operation with name 'projects/my-project/locations/us-east1/operations/123', run the following command:
$ {command} projects/my-project/locations/us-east1/operations/123
request:
collection: videointelligence.projects.locations.operations
api_version: v1
arguments:
resource:
help_text: The ID of the operation to describe.
spec: !REF googlecloudsdk.command_lib.ml.video.resources:operation
output:
format: json

View File

@@ -0,0 +1,28 @@
- release_tracks: []
help_text:
brief: Poll long-running video analysis operation until it completes.
description: |
Poll a long-running video analysis operation until it completes. When
the operation is complete, this command will display the results of the
analysis.
examples: |
To poll a long-running video analysis operation named 'projects/my-project/locations/us-east1/operations/123' until it completes, run the following:
$ {command} projects/my-project/locations/us-east1/operations/123
request:
collection: videointelligence.projects.locations.operations
api_version: v1
arguments:
resource:
help_text: ID for the operation to poll until complete.
spec: !REF googlecloudsdk.command_lib.ml.video.resources:operation
async:
collection: videointelligence.projects.locations.operations
result_attribute: response
output:
format: json

View File

@@ -0,0 +1,86 @@
- release_tracks: [ALPHA, BETA]
help_text:
brief: Transcribe speech in a video.
description: |
Transcribe speech within a video.
examples: |
To transcribe speech in video file 'gs://my_bucket/input_file.mp4':
$ {command} gs://my_bucket/input_file.mp4
request:
_COMMON_: request
static_fields:
features: [SPEECH_TRANSCRIPTION]
modify_request_hooks:
- googlecloudsdk.command_lib.ml.video.util:UpdateRequestWithInput
async:
_COMMON_: async
arguments:
params:
- _COMMON_args
- api_field: videoContext.speechTranscriptionConfig.enableAutomaticPunctuation
arg_name: enable-automatic-punctuation
help_text: |
If enabled, adds punctuation to recognition result hypotheses. This feature is only
available in select languages. Setting this for requests in other languages has no effect at
all. The default disabled case does not add punctuation to result hypotheses.
- api_field: videoContext.speechTranscriptionConfig.enableSpeakerDiarization
arg_name: enable-speaker-diarization
help_text: |
If enabled, perform speaker detection for each recognized word in the top alternative of the
recognition result using a speaker_tag provided in the WordInfo results section. When this
is enabled, we send all the words from the beginning of the audio for the top alternative in
every consecutive responses. This is done in order to improve our speaker tags as our models
learn to identify the speakers in the conversation over time.
- api_field: videoContext.speechTranscriptionConfig.diarizationSpeakerCount
arg_name: diarization-speaker-count
help_text: |
Optional estimated number of speakers in the conversation. If not specified, defaults to
'2'. Ignored unless --enable-speaker-diarization is enabled.
- api_field: videoContext.speechTranscriptionConfig.maxAlternatives
arg_name: max-alternatives
help_text: |
Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of
`SpeechRecognitionAlternative` messages within each `SpeechTranscription`. The server may
return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
return a maximum of one. If omitted, will return a maximum of one
- api_field: videoContext.speechTranscriptionConfig.languageCode
arg_name: language-code
required: true
help_text: |
The language of the supplied audio as a
[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
language tag. Example: "en-US". See
[Language Support](https://cloud.google.com/speech/docs/languages) for a list of the
currently supported language codes.
- api_field: videoContext.speechTranscriptionConfig.filterProfanity
arg_name: filter-profanity
help_text: |
If enabled, the server will attempt to filter out profanities, replacing all but the
initial character in each filtered word with asterisks, e.g. ```f***```. If disabled or
omitted, profanities will not be filtered out.
- api_field: videoContext.speechTranscriptionConfig.enableWordConfidence
arg_name: enable-word-confidence
help_text: |
If enabled, the top result includes a list of words and the confidence for those words. If
disabled, no word-level confidence information is returned. The default is disabled.
- api_field: videoContext.speechTranscriptionConfig.audioTracks
arg_name: audio-tracks
processor: googlecloudsdk.command_lib.ml.video.util:AudioTrackProcessor
help_text: |
For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two
commas separated tracks. Default track is 0.
- api_field: videoContext.speechTranscriptionConfig.speechContexts.phrases
arg_name: hints
default: []
help_text: |
list of strings containing words and phrases "hints" so that the speech recognition is
more likely to recognize them. This can be used to improve the accuracy for specific words
and phrases, for example, if specific commands are typically spoken by the user. This can
also be used to add additional words to the vocabulary of the recognizer. See
(https://cloud.google.com/speech/limits#content).
output:
format: yaml