124 lines
4.2 KiB
YAML
124 lines
4.2 KiB
YAML
help_text:
|
|
language_help: |
|
|
Currently English, Spanish, Japanese, Chinese (Simplified and Traditional),
|
|
French, German, Italian, Korean, and Portuguese are supported.
|
|
language_help_ga: |
|
|
Currently English, Spanish, and Japanese are supported.
|
|
language_examples:
|
|
AE: |
|
|
To analyze entites in raw content 'puppies':
|
|
|
|
$ {command} --content='puppies'
|
|
|
|
To analyze entites in file 'myfile.txt':
|
|
|
|
$ {command} --content-file='myfile.txt'
|
|
|
|
To analyze entites in a remote file 'gs://bucket_name/mycontent.html' for Japanese language using
|
|
UTF-8 encoding:
|
|
|
|
$ {command} --content-file='gs://bucket_name/mycontent.html' --content-type=HTML --encoding-type=utf8 --language=ja-JP
|
|
AES: |
|
|
To analyze entity sentiment in raw content 'puppies':
|
|
|
|
$ {command} --content='puppies'
|
|
|
|
To analyze entity sentiment in file 'myfile.txt':
|
|
|
|
$ {command} --content-file='myfile.txt'
|
|
|
|
To analyze entity sentiment in a remote file 'gs://bucket_name/mycontent.html' for Japanese language using
|
|
UTF-8 encoding:
|
|
|
|
$ {command} --content-file='gs://bucket_name/mycontent.html' --content-type=HTML --encoding-type=utf8 --language=ja-JP
|
|
AS: |
|
|
To analyze sentiment in raw content 'I love puppies.':
|
|
|
|
$ {command} --content='I love puppies.'
|
|
|
|
To analyze sentiment in file 'myfile.txt':
|
|
|
|
$ {command} --content-file='myfile.txt'
|
|
|
|
To analyze sentiment in a remote file 'gs://bucket_name/mycontent.html' for Japanese language using
|
|
UTF-8 encoding:
|
|
|
|
$ {command} --content-file='gs://bucket_name/mycontent.html' --content-type=HTML --encoding-type=utf8 --language=ja-JP
|
|
ASX: |
|
|
To analyze syntax in raw content 'They drink.':
|
|
|
|
$ {command} --content='They drink'
|
|
|
|
To analyze syntax in file 'myfile.txt':
|
|
|
|
$ {command} --content-file='myfile.txt'
|
|
|
|
To analyze syntax in a remote file 'gs://bucket_name/mycontent.html' for Japanese language using
|
|
UTF-8 encoding:
|
|
|
|
$ {command} --content-file='gs://bucket_name/mycontent.html' --content-type=HTML --encoding-type=utf8 --language=ja-JP
|
|
CT: |
|
|
To classify text in raw content 'Long Political Text.':
|
|
|
|
$ {command} --content='Long Political Text.'
|
|
|
|
To classify text in file 'myfile.txt':
|
|
|
|
$ {command} --content-file='myfile.txt'
|
|
|
|
To classify text in a remote file 'gs://bucket_name/mycontent.html' for Japanese language:
|
|
|
|
$ {command} --content-file='gs://bucket_name/mycontent.html' --content-type=HTML --language=ja-JP
|
|
|
|
|
|
request:
|
|
collection: language.documents
|
|
api_version: v1
|
|
modify_request_hooks:
|
|
- googlecloudsdk.command_lib.ml.language.util:UpdateRequestWithInput
|
|
|
|
args:
|
|
content:
|
|
api_field: document.type
|
|
arg_name: content-type
|
|
default: plain-text
|
|
choices:
|
|
- arg_value: plain-text
|
|
enum_value: PLAIN_TEXT
|
|
- arg_value: html
|
|
enum_value: HTML
|
|
help_text: Specify the format of the input text.
|
|
|
|
language:
|
|
api_field: document.language
|
|
arg_name: language
|
|
help_text: |
|
|
Specify the language of the input text. If omitted, the server will
|
|
attempt to auto-detect. Both ISO (such as `en` or `es`) and BCP-47 (such
|
|
as `en-US` or `ja-JP`) language codes are accepted.
|
|
|
|
encoding:
|
|
api_field: encodingType
|
|
arg_name: encoding-type
|
|
default: utf8
|
|
help_text: |
|
|
The encoding type used by the API to calculate offsets. If set to `none`,
|
|
encoding-dependent offsets will be set at -1. This is an optional flag
|
|
only used for the entity mentions in results, and does not affect how the
|
|
input is read or analyzed.
|
|
|
|
content_group:
|
|
group:
|
|
mutex: true
|
|
required: true
|
|
params:
|
|
- arg_name: content
|
|
help_text: |
|
|
Specify input text on the command line. Useful for experiments, or
|
|
for extremely short text.
|
|
- arg_name: content-file
|
|
help_text: |
|
|
Specify a local file or Google Cloud Storage (format
|
|
`gs://bucket/object`) file path containing the text to be analyzed.
|
|
More useful for longer text or data output from another system.
|