1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package comprehend
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/comprehend/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Performs toxicity analysis on the list of text strings that you provide as
// input. The API response contains a results list that matches the size of the
// input list. For more information about toxicity detection, see Toxicity
// detection (https://docs.aws.amazon.com/comprehend/latest/dg/toxicity-detection.html)
// in the Amazon Comprehend Developer Guide.
func (c *Client) DetectToxicContent(ctx context.Context, params *DetectToxicContentInput, optFns ...func(*Options)) (*DetectToxicContentOutput, error) {
if params == nil {
params = &DetectToxicContentInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DetectToxicContent", params, optFns, c.addOperationDetectToxicContentMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DetectToxicContentOutput)
out.ResultMetadata = metadata
return out, nil
}
type DetectToxicContentInput struct {
// The language of the input text. Currently, English is the only supported
// language.
//
// This member is required.
LanguageCode types.LanguageCode
// A list of up to 10 text strings. Each string has a maximum size of 1 KB, and
// the maximum size of the list is 10 KB.
//
// This member is required.
TextSegments []types.TextSegment
noSmithyDocumentSerde
}
type DetectToxicContentOutput struct {
// Results of the content moderation analysis. Each entry in the results list
// contains a list of toxic content types identified in the text, along with a
// confidence score for each content type. The results list also includes a
// toxicity score for each entry in the results list.
ResultList []types.ToxicLabels
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDetectToxicContentMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDetectToxicContent{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDetectToxicContent{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "DetectToxicContent"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addOpDetectToxicContentValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDetectToxicContent(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDetectToxicContent(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "DetectToxicContent",
}
}
|