File: api_op_GetMLModel.go

package info (click to toggle)
golang-github-aws-aws-sdk-go-v2 1.24.1-2~bpo12%2B1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-backports
  • size: 554,032 kB
  • sloc: java: 15,941; makefile: 419; sh: 175
file content (263 lines) | stat: -rw-r--r-- 9,876 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
// Code generated by smithy-go-codegen DO NOT EDIT.

package machinelearning

import (
	"context"
	"fmt"
	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
	"github.com/aws/aws-sdk-go-v2/service/machinelearning/types"
	"github.com/aws/smithy-go/middleware"
	smithyhttp "github.com/aws/smithy-go/transport/http"
	"time"
)

// Returns an MLModel that includes detailed metadata, data source information,
// and the current status of the MLModel . GetMLModel provides results in normal
// or verbose format.
func (c *Client) GetMLModel(ctx context.Context, params *GetMLModelInput, optFns ...func(*Options)) (*GetMLModelOutput, error) {
	if params == nil {
		params = &GetMLModelInput{}
	}

	result, metadata, err := c.invokeOperation(ctx, "GetMLModel", params, optFns, c.addOperationGetMLModelMiddlewares)
	if err != nil {
		return nil, err
	}

	out := result.(*GetMLModelOutput)
	out.ResultMetadata = metadata
	return out, nil
}

type GetMLModelInput struct {

	// The ID assigned to the MLModel at creation.
	//
	// This member is required.
	MLModelId *string

	// Specifies whether the GetMLModel operation should return Recipe . If true,
	// Recipe is returned. If false, Recipe is not returned.
	Verbose bool

	noSmithyDocumentSerde
}

// Represents the output of a GetMLModel operation, and provides detailed
// information about a MLModel .
type GetMLModelOutput struct {

	// The approximate CPU time in milliseconds that Amazon Machine Learning spent
	// processing the MLModel , normalized and scaled on computation resources.
	// ComputeTime is only available if the MLModel is in the COMPLETED state.
	ComputeTime *int64

	// The time that the MLModel was created. The time is expressed in epoch time.
	CreatedAt *time.Time

	// The AWS user account from which the MLModel was created. The account type can
	// be either an AWS root account or an AWS Identity and Access Management (IAM)
	// user account.
	CreatedByIamUser *string

	// The current endpoint of the MLModel
	EndpointInfo *types.RealtimeEndpointInfo

	// The epoch time when Amazon Machine Learning marked the MLModel as COMPLETED or
	// FAILED . FinishedAt is only available when the MLModel is in the COMPLETED or
	// FAILED state.
	FinishedAt *time.Time

	// The location of the data file or directory in Amazon Simple Storage Service
	// (Amazon S3).
	InputDataLocationS3 *string

	// The time of the most recent edit to the MLModel . The time is expressed in epoch
	// time.
	LastUpdatedAt *time.Time

	// A link to the file that contains logs of the CreateMLModel operation.
	LogUri *string

	// The MLModel ID, which is same as the MLModelId in the request.
	MLModelId *string

	// Identifies the MLModel category. The following are the available types:
	//   - REGRESSION -- Produces a numeric result. For example, "What price should a
	//   house be listed at?"
	//   - BINARY -- Produces one of two possible results. For example, "Is this an
	//   e-commerce website?"
	//   - MULTICLASS -- Produces one of several possible results. For example, "Is
	//   this a HIGH, LOW or MEDIUM risk trade?"
	MLModelType types.MLModelType

	// A description of the most recent details about accessing the MLModel .
	Message *string

	// A user-supplied name or description of the MLModel .
	Name *string

	// The recipe to use when training the MLModel . The Recipe provides detailed
	// information about the observation data to use during training, and manipulations
	// to perform on the observation data during training. Note: This parameter is
	// provided as part of the verbose format.
	Recipe *string

	// The schema used by all of the data files referenced by the DataSource . Note:
	// This parameter is provided as part of the verbose format.
	Schema *string

	// The scoring threshold is used in binary classification MLModel models. It marks
	// the boundary between a positive prediction and a negative prediction. Output
	// values greater than or equal to the threshold receive a positive result from the
	// MLModel, such as true . Output values less than the threshold receive a negative
	// response from the MLModel, such as false .
	ScoreThreshold *float32

	// The time of the most recent edit to the ScoreThreshold . The time is expressed
	// in epoch time.
	ScoreThresholdLastUpdatedAt *time.Time

	// Long integer type that is a 64-bit signed number.
	SizeInBytes *int64

	// The epoch time when Amazon Machine Learning marked the MLModel as INPROGRESS .
	// StartedAt isn't available if the MLModel is in the PENDING state.
	StartedAt *time.Time

	// The current status of the MLModel . This element can have one of the following
	// values:
	//   - PENDING - Amazon Machine Learning (Amazon ML) submitted a request to
	//   describe a MLModel .
	//   - INPROGRESS - The request is processing.
	//   - FAILED - The request did not run to completion. The ML model isn't usable.
	//   - COMPLETED - The request completed successfully.
	//   - DELETED - The MLModel is marked as deleted. It isn't usable.
	Status types.EntityStatus

	// The ID of the training DataSource .
	TrainingDataSourceId *string

	// A list of the training parameters in the MLModel . The list is implemented as a
	// map of key-value pairs. The following is the current set of training parameters:
	//
	//   - sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending
	//   on the input data, the size of the model might affect its performance. The value
	//   is an integer that ranges from 100000 to 2147483648 . The default value is
	//   33554432 .
	//   - sgd.maxPasses - The number of times that the training process traverses the
	//   observations to build the MLModel . The value is an integer that ranges from 1
	//   to 10000 . The default value is 10 .
	//   - sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling
	//   data improves a model's ability to find the optimal solution for a variety of
	//   data types. The valid values are auto and none . The default value is none .
	//   We strongly recommend that you shuffle your data.
	//   - sgd.l1RegularizationAmount - The coefficient regularization L1 norm. It
	//   controls overfitting the data by penalizing large coefficients. This tends to
	//   drive coefficients to zero, resulting in a sparse feature set. If you use this
	//   parameter, start by specifying a small value, such as 1.0E-08 . The value is a
	//   double that ranges from 0 to MAX_DOUBLE . The default is to not use L1
	//   normalization. This parameter can't be used when L2 is specified. Use this
	//   parameter sparingly.
	//   - sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It
	//   controls overfitting the data by penalizing large coefficients. This tends to
	//   drive coefficients to small, nonzero values. If you use this parameter, start by
	//   specifying a small value, such as 1.0E-08 . The value is a double that ranges
	//   from 0 to MAX_DOUBLE . The default is to not use L2 normalization. This
	//   parameter can't be used when L1 is specified. Use this parameter sparingly.
	TrainingParameters map[string]string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata

	noSmithyDocumentSerde
}

func (c *Client) addOperationGetMLModelMiddlewares(stack *middleware.Stack, options Options) (err error) {
	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
		return err
	}
	err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetMLModel{}, middleware.After)
	if err != nil {
		return err
	}
	err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetMLModel{}, middleware.After)
	if err != nil {
		return err
	}
	if err := addProtocolFinalizerMiddlewares(stack, options, "GetMLModel"); err != nil {
		return fmt.Errorf("add protocol finalizers: %v", err)
	}

	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
		return err
	}
	if err = addSetLoggerMiddleware(stack, options); err != nil {
		return err
	}
	if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
		return err
	}
	if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
		return err
	}
	if err = addResolveEndpointMiddleware(stack, options); err != nil {
		return err
	}
	if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
		return err
	}
	if err = addRetryMiddlewares(stack, options); err != nil {
		return err
	}
	if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
		return err
	}
	if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
		return err
	}
	if err = addClientUserAgent(stack, options); err != nil {
		return err
	}
	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
		return err
	}
	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
		return err
	}
	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
		return err
	}
	if err = addOpGetMLModelValidationMiddleware(stack); err != nil {
		return err
	}
	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetMLModel(options.Region), middleware.Before); err != nil {
		return err
	}
	if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
		return err
	}
	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
		return err
	}
	if err = addResponseErrorMiddleware(stack); err != nil {
		return err
	}
	if err = addRequestResponseLogging(stack, options); err != nil {
		return err
	}
	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
		return err
	}
	return nil
}

func newServiceMetadataMiddleware_opGetMLModel(region string) *awsmiddleware.RegisterServiceMetadata {
	return &awsmiddleware.RegisterServiceMetadata{
		Region:        region,
		ServiceID:     ServiceID,
		OperationName: "GetMLModel",
	}
}