File: aiplatform_v1beta1.projects.locations.models.evaluations.html

package info (click to toggle)
python-googleapi 2.180.0-1
  • links: PTS
  • area: main
  • in suites: forky, sid
  • size: 527,124 kB
  • sloc: python: 11,076; javascript: 249; sh: 114; makefile: 59
file content (841 lines) | stat: -rw-r--r-- 145,693 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
<html><body>
<style>

body, h1, h2, h3, div, span, p, pre, a {
  margin: 0;
  padding: 0;
  border: 0;
  font-weight: inherit;
  font-style: inherit;
  font-size: 100%;
  font-family: inherit;
  vertical-align: baseline;
}

body {
  font-size: 13px;
  padding: 1em;
}

h1 {
  font-size: 26px;
  margin-bottom: 1em;
}

h2 {
  font-size: 24px;
  margin-bottom: 1em;
}

h3 {
  font-size: 20px;
  margin-bottom: 1em;
  margin-top: 1em;
}

pre, code {
  line-height: 1.5;
  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}

pre {
  margin-top: 0.5em;
}

h1, h2, h3, p {
  font-family: Arial, sans serif;
}

h1, h2, h3 {
  border-bottom: solid #CCC 1px;
}

.toc_element {
  margin-top: 0.5em;
}

.firstline {
  margin-left: 2 em;
}

.method  {
  margin-top: 1em;
  border: solid 1px #CCC;
  padding: 1em;
  background: #EEE;
}

.details {
  font-weight: bold;
  font-size: 14px;
}

</style>

<h1><a href="aiplatform_v1beta1.html">Vertex AI API</a> . <a href="aiplatform_v1beta1.projects.html">projects</a> . <a href="aiplatform_v1beta1.projects.locations.html">locations</a> . <a href="aiplatform_v1beta1.projects.locations.models.html">models</a> . <a href="aiplatform_v1beta1.projects.locations.models.evaluations.html">evaluations</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
  <code><a href="aiplatform_v1beta1.projects.locations.models.evaluations.operations.html">operations()</a></code>
</p>
<p class="firstline">Returns the operations Resource.</p>

<p class="toc_element">
  <code><a href="aiplatform_v1beta1.projects.locations.models.evaluations.slices.html">slices()</a></code>
</p>
<p class="firstline">Returns the slices Resource.</p>

<p class="toc_element">
  <code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
  <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
<p class="firstline">Gets a ModelEvaluation.</p>
<p class="toc_element">
  <code><a href="#import_">import_(parent, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Imports an externally generated ModelEvaluation.</p>
<p class="toc_element">
  <code><a href="#list">list(parent, filter=None, pageSize=None, pageToken=None, readMask=None, x__xgafv=None)</a></code></p>
<p class="firstline">Lists ModelEvaluations in a Model.</p>
<p class="toc_element">
  <code><a href="#list_next">list_next()</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<h3>Method Details</h3>
<div class="method">
    <code class="details" id="close">close()</code>
  <pre>Close httplib2 connections.</pre>
</div>

<div class="method">
    <code class="details" id="get">get(name, x__xgafv=None)</code>
  <pre>Gets a ModelEvaluation.

Args:
  name: string, Required. The name of the ModelEvaluation resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` (required)
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # A collection of metrics calculated by comparing Model&#x27;s predictions on all of the test data against annotations from the test data.
  &quot;biasConfigs&quot;: { # Configuration for bias detection. # Specify the configuration for bias detection.
    &quot;biasSlices&quot;: { # Specification for how the data should be sliced. # Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against &quot;not slice_a&quot;. Below are examples with feature &quot;education&quot; with value &quot;low&quot;, &quot;medium&quot;, &quot;high&quot; in the dataset: Example 1: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}] A single slice provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;medium&#x27; or &#x27;high&#x27;. Example 2: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}, {&#x27;education&#x27;: &#x27;high&#x27;}] Two slices provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;high&#x27;.
      &quot;configs&quot;: { # Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by &quot;instance&quot; as a dictionary prefix for Vertex Batch Predictions output format.
        &quot;a_key&quot;: { # Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values &quot;US&quot;, &quot;Canada&quot;, &quot;Mexico&quot; in the dataset: Example 1: { &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } A single slice for any data with zip_code 12345 in the dataset. Example 2: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 12345, &quot;high&quot;: 20000 } } } A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 10000, &quot;high&quot;: 20000 } }, &quot;country&quot;: { &quot;value&quot;: { &quot;string_value&quot;: &quot;US&quot; } } } A single slice containing data where the zip_codes between 10000 and 20000 has the country &quot;US&quot;. For this example, data with the zip_code of 12345 and country &quot;US&quot; will be in this slice. Example 4: { &quot;country&quot;: {&quot;all_values&quot;: { &quot;value&quot;: true } } } Three slices are computed, one for each unique country in the dataset. Example 5: { &quot;country&quot;: { &quot;all_values&quot;: { &quot;value&quot;: true } }, &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country &quot;US&quot; will be in one slice, zip_code 12345 and country &quot;Canada&quot; in another slice, and zip_code 12345 and country &quot;Mexico&quot; in another slice, totaling 3 slices.
          &quot;allValues&quot;: True or False, # If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{&quot;all_values&quot;:{&quot;value&quot;:true}}`
          &quot;range&quot;: { # A range of values for slice(s). `low` is inclusive, `high` is exclusive. # A range of values for a numerical feature. Example: `{&quot;range&quot;:{&quot;low&quot;:10000.0,&quot;high&quot;:50000.0}}` will capture 12345 and 23334 in the slice.
            &quot;high&quot;: 3.14, # Exclusive high value for the range.
            &quot;low&quot;: 3.14, # Inclusive low value for the range.
          },
          &quot;value&quot;: { # Single value that supports strings and floats. # A unique specific value for a given feature. Example: `{ &quot;value&quot;: { &quot;string_value&quot;: &quot;12345&quot; } }`
            &quot;floatValue&quot;: 3.14, # Float type.
            &quot;stringValue&quot;: &quot;A String&quot;, # String type.
          },
        },
      },
    },
    &quot;labels&quot;: [ # Positive labels selection on the target field.
      &quot;A String&quot;,
    ],
  },
  &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp when this ModelEvaluation was created.
  &quot;displayName&quot;: &quot;A String&quot;, # The display name of the ModelEvaluation.
  &quot;explanationSpecs&quot;: [ # Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data.
    {
      &quot;explanationSpec&quot;: { # Specification of Model explanation. # Explanation spec details.
        &quot;metadata&quot;: { # Metadata describing the Model&#x27;s input and output for explanation. # Optional. Metadata describing the Model&#x27;s input and output for explanation.
          &quot;featureAttributionsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
          &quot;inputs&quot;: { # Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance.
            &quot;a_key&quot;: { # Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow.
              &quot;denseShapeTensorName&quot;: &quot;A String&quot;, # Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
              &quot;encodedBaselines&quot;: [ # A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor.
                &quot;&quot;,
              ],
              &quot;encodedTensorName&quot;: &quot;A String&quot;, # Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table.
              &quot;encoding&quot;: &quot;A String&quot;, # Defines how the feature is encoded into the input tensor. Defaults to IDENTITY.
              &quot;featureValueDomain&quot;: { # Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. # The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized.
                &quot;maxValue&quot;: 3.14, # The maximum permissible value for this feature.
                &quot;minValue&quot;: 3.14, # The minimum permissible value for this feature.
                &quot;originalMean&quot;: 3.14, # If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization.
                &quot;originalStddev&quot;: 3.14, # If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization.
              },
              &quot;groupName&quot;: &quot;A String&quot;, # Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name.
              &quot;indexFeatureMapping&quot;: [ # A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
                &quot;A String&quot;,
              ],
              &quot;indicesTensorName&quot;: &quot;A String&quot;, # Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
              &quot;inputBaselines&quot;: [ # Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature&#x27;s input in the instance[]. The schema of any single instance may be specified via Endpoint&#x27;s DeployedModels&#x27; Model&#x27;s PredictSchemata&#x27;s instance_schema_uri.
                &quot;&quot;,
              ],
              &quot;inputTensorName&quot;: &quot;A String&quot;, # Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow.
              &quot;modality&quot;: &quot;A String&quot;, # Modality of the feature. Valid values are: numeric, image. Defaults to numeric.
              &quot;visualization&quot;: { # Visualization configurations for image explanation. # Visualization configurations for image explanation.
                &quot;clipPercentLowerbound&quot;: 3.14, # Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62.
                &quot;clipPercentUpperbound&quot;: 3.14, # Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9.
                &quot;colorMap&quot;: &quot;A String&quot;, # The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue.
                &quot;overlayType&quot;: &quot;A String&quot;, # How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE.
                &quot;polarity&quot;: &quot;A String&quot;, # Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE.
                &quot;type&quot;: &quot;A String&quot;, # Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES.
              },
            },
          },
          &quot;latentSpaceSource&quot;: &quot;A String&quot;, # Name of the source to generate embeddings for example based explanations.
          &quot;outputs&quot;: { # Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed.
            &quot;a_key&quot;: { # Metadata of the prediction output to be explained.
              &quot;displayNameMappingKey&quot;: &quot;A String&quot;, # Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output.
              &quot;indexDisplayNameMapping&quot;: &quot;&quot;, # Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It&#x27;s not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index.
              &quot;outputTensorName&quot;: &quot;A String&quot;, # Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow.
            },
          },
        },
        &quot;parameters&quot;: { # Parameters to configure explaining for Model&#x27;s predictions. # Required. Parameters that configure explaining of the Model&#x27;s predictions.
          &quot;examples&quot;: { # Example-based explainability that returns the nearest neighbors from the provided dataset. # Example-based explanations that returns the nearest neighbors from the provided dataset.
            &quot;exampleGcsSource&quot;: { # The Cloud Storage input instances. # The Cloud Storage input instances.
              &quot;dataFormat&quot;: &quot;A String&quot;, # The format in which instances are given, if not specified, assume it&#x27;s JSONL format. Currently only JSONL format is supported.
              &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage location for the input instances.
                &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                  &quot;A String&quot;,
                ],
              },
            },
            &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search.
              &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                &quot;A String&quot;,
              ],
            },
            &quot;nearestNeighborSearchConfig&quot;: &quot;&quot;, # The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
            &quot;neighborCount&quot;: 42, # The number of neighbors to return when querying for examples.
            &quot;presets&quot;: { # Preset configuration for example-based explanations # Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality.
              &quot;modality&quot;: &quot;A String&quot;, # The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type.
              &quot;query&quot;: &quot;A String&quot;, # Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`.
            },
          },
          &quot;integratedGradientsAttribution&quot;: { # An attribution method that computes the Aumann-Shapley value taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 # An attribution method that computes Aumann-Shapley values taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365
            &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
              &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
            },
            &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
              &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                  { # Noise sigma for a single feature.
                    &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                    &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                  },
                ],
              },
              &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
              &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
            },
            &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively.
          },
          &quot;outputIndices&quot;: [ # If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it&#x27;s explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes).
            &quot;&quot;,
          ],
          &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
            &quot;pathCount&quot;: 42, # Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively.
          },
          &quot;topK&quot;: 42, # If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs.
          &quot;xraiAttribution&quot;: { # An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. # An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead.
            &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
              &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
            },
            &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
              &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                  { # Noise sigma for a single feature.
                    &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                    &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                  },
                ],
              },
              &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
              &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
            },
            &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.
          },
        },
      },
      &quot;explanationType&quot;: &quot;A String&quot;, # Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai`
    },
  ],
  &quot;metadata&quot;: &quot;&quot;, # The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of &quot;pipeline_job_id&quot;, &quot;evaluation_dataset_type&quot;, &quot;evaluation_dataset_path&quot;, &quot;row_based_metrics_path&quot;.
  &quot;metrics&quot;: &quot;&quot;, # Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri
  &quot;metricsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  &quot;modelExplanation&quot;: { # Aggregated explanation metrics for a Model over a set of instances. # Aggregated explanation metrics for the Model&#x27;s prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models.
    &quot;meanAttributions&quot;: [ # Output only. Aggregated attributions explaining the Model&#x27;s prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated.
      { # Attribution that explains a particular prediction output.
        &quot;approximationError&quot;: 3.14, # Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.
        &quot;baselineOutputValue&quot;: 3.14, # Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model&#x27;s predicted output has multiple dimensions (rank &gt; 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.
        &quot;featureAttributions&quot;: &quot;&quot;, # Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature&#x27;s input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).
        &quot;instanceOutputValue&quot;: 3.14, # Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.
        &quot;outputDisplayName&quot;: &quot;A String&quot;, # Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.
        &quot;outputIndex&quot;: [ # Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.
          42,
        ],
        &quot;outputName&quot;: &quot;A String&quot;, # Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.
      },
    ],
  },
  &quot;name&quot;: &quot;A String&quot;, # Output only. The resource name of the ModelEvaluation.
  &quot;sliceDimensions&quot;: [ # All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `.
    &quot;A String&quot;,
  ],
}</pre>
</div>

<div class="method">
    <code class="details" id="import_">import_(parent, body=None, x__xgafv=None)</code>
  <pre>Imports an externally generated ModelEvaluation.

Args:
  parent: string, Required. The name of the parent model resource. Format: `projects/{project}/locations/{location}/models/{model}` (required)
  body: object, The request body.
    The object takes the form of:

{ # Request message for ModelService.ImportModelEvaluation
  &quot;modelEvaluation&quot;: { # A collection of metrics calculated by comparing Model&#x27;s predictions on all of the test data against annotations from the test data. # Required. Model evaluation resource to be imported.
    &quot;biasConfigs&quot;: { # Configuration for bias detection. # Specify the configuration for bias detection.
      &quot;biasSlices&quot;: { # Specification for how the data should be sliced. # Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against &quot;not slice_a&quot;. Below are examples with feature &quot;education&quot; with value &quot;low&quot;, &quot;medium&quot;, &quot;high&quot; in the dataset: Example 1: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}] A single slice provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;medium&#x27; or &#x27;high&#x27;. Example 2: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}, {&#x27;education&#x27;: &#x27;high&#x27;}] Two slices provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;high&#x27;.
        &quot;configs&quot;: { # Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by &quot;instance&quot; as a dictionary prefix for Vertex Batch Predictions output format.
          &quot;a_key&quot;: { # Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values &quot;US&quot;, &quot;Canada&quot;, &quot;Mexico&quot; in the dataset: Example 1: { &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } A single slice for any data with zip_code 12345 in the dataset. Example 2: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 12345, &quot;high&quot;: 20000 } } } A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 10000, &quot;high&quot;: 20000 } }, &quot;country&quot;: { &quot;value&quot;: { &quot;string_value&quot;: &quot;US&quot; } } } A single slice containing data where the zip_codes between 10000 and 20000 has the country &quot;US&quot;. For this example, data with the zip_code of 12345 and country &quot;US&quot; will be in this slice. Example 4: { &quot;country&quot;: {&quot;all_values&quot;: { &quot;value&quot;: true } } } Three slices are computed, one for each unique country in the dataset. Example 5: { &quot;country&quot;: { &quot;all_values&quot;: { &quot;value&quot;: true } }, &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country &quot;US&quot; will be in one slice, zip_code 12345 and country &quot;Canada&quot; in another slice, and zip_code 12345 and country &quot;Mexico&quot; in another slice, totaling 3 slices.
            &quot;allValues&quot;: True or False, # If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{&quot;all_values&quot;:{&quot;value&quot;:true}}`
            &quot;range&quot;: { # A range of values for slice(s). `low` is inclusive, `high` is exclusive. # A range of values for a numerical feature. Example: `{&quot;range&quot;:{&quot;low&quot;:10000.0,&quot;high&quot;:50000.0}}` will capture 12345 and 23334 in the slice.
              &quot;high&quot;: 3.14, # Exclusive high value for the range.
              &quot;low&quot;: 3.14, # Inclusive low value for the range.
            },
            &quot;value&quot;: { # Single value that supports strings and floats. # A unique specific value for a given feature. Example: `{ &quot;value&quot;: { &quot;string_value&quot;: &quot;12345&quot; } }`
              &quot;floatValue&quot;: 3.14, # Float type.
              &quot;stringValue&quot;: &quot;A String&quot;, # String type.
            },
          },
        },
      },
      &quot;labels&quot;: [ # Positive labels selection on the target field.
        &quot;A String&quot;,
      ],
    },
    &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp when this ModelEvaluation was created.
    &quot;displayName&quot;: &quot;A String&quot;, # The display name of the ModelEvaluation.
    &quot;explanationSpecs&quot;: [ # Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data.
      {
        &quot;explanationSpec&quot;: { # Specification of Model explanation. # Explanation spec details.
          &quot;metadata&quot;: { # Metadata describing the Model&#x27;s input and output for explanation. # Optional. Metadata describing the Model&#x27;s input and output for explanation.
            &quot;featureAttributionsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
            &quot;inputs&quot;: { # Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance.
              &quot;a_key&quot;: { # Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow.
                &quot;denseShapeTensorName&quot;: &quot;A String&quot;, # Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
                &quot;encodedBaselines&quot;: [ # A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor.
                  &quot;&quot;,
                ],
                &quot;encodedTensorName&quot;: &quot;A String&quot;, # Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table.
                &quot;encoding&quot;: &quot;A String&quot;, # Defines how the feature is encoded into the input tensor. Defaults to IDENTITY.
                &quot;featureValueDomain&quot;: { # Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. # The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized.
                  &quot;maxValue&quot;: 3.14, # The maximum permissible value for this feature.
                  &quot;minValue&quot;: 3.14, # The minimum permissible value for this feature.
                  &quot;originalMean&quot;: 3.14, # If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization.
                  &quot;originalStddev&quot;: 3.14, # If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization.
                },
                &quot;groupName&quot;: &quot;A String&quot;, # Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name.
                &quot;indexFeatureMapping&quot;: [ # A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
                  &quot;A String&quot;,
                ],
                &quot;indicesTensorName&quot;: &quot;A String&quot;, # Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
                &quot;inputBaselines&quot;: [ # Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature&#x27;s input in the instance[]. The schema of any single instance may be specified via Endpoint&#x27;s DeployedModels&#x27; Model&#x27;s PredictSchemata&#x27;s instance_schema_uri.
                  &quot;&quot;,
                ],
                &quot;inputTensorName&quot;: &quot;A String&quot;, # Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow.
                &quot;modality&quot;: &quot;A String&quot;, # Modality of the feature. Valid values are: numeric, image. Defaults to numeric.
                &quot;visualization&quot;: { # Visualization configurations for image explanation. # Visualization configurations for image explanation.
                  &quot;clipPercentLowerbound&quot;: 3.14, # Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62.
                  &quot;clipPercentUpperbound&quot;: 3.14, # Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9.
                  &quot;colorMap&quot;: &quot;A String&quot;, # The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue.
                  &quot;overlayType&quot;: &quot;A String&quot;, # How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE.
                  &quot;polarity&quot;: &quot;A String&quot;, # Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE.
                  &quot;type&quot;: &quot;A String&quot;, # Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES.
                },
              },
            },
            &quot;latentSpaceSource&quot;: &quot;A String&quot;, # Name of the source to generate embeddings for example based explanations.
            &quot;outputs&quot;: { # Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed.
              &quot;a_key&quot;: { # Metadata of the prediction output to be explained.
                &quot;displayNameMappingKey&quot;: &quot;A String&quot;, # Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output.
                &quot;indexDisplayNameMapping&quot;: &quot;&quot;, # Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It&#x27;s not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index.
                &quot;outputTensorName&quot;: &quot;A String&quot;, # Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow.
              },
            },
          },
          &quot;parameters&quot;: { # Parameters to configure explaining for Model&#x27;s predictions. # Required. Parameters that configure explaining of the Model&#x27;s predictions.
            &quot;examples&quot;: { # Example-based explainability that returns the nearest neighbors from the provided dataset. # Example-based explanations that returns the nearest neighbors from the provided dataset.
              &quot;exampleGcsSource&quot;: { # The Cloud Storage input instances. # The Cloud Storage input instances.
                &quot;dataFormat&quot;: &quot;A String&quot;, # The format in which instances are given, if not specified, assume it&#x27;s JSONL format. Currently only JSONL format is supported.
                &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage location for the input instances.
                  &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                    &quot;A String&quot;,
                  ],
                },
              },
              &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search.
                &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                  &quot;A String&quot;,
                ],
              },
              &quot;nearestNeighborSearchConfig&quot;: &quot;&quot;, # The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
              &quot;neighborCount&quot;: 42, # The number of neighbors to return when querying for examples.
              &quot;presets&quot;: { # Preset configuration for example-based explanations # Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality.
                &quot;modality&quot;: &quot;A String&quot;, # The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type.
                &quot;query&quot;: &quot;A String&quot;, # Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`.
              },
            },
            &quot;integratedGradientsAttribution&quot;: { # An attribution method that computes the Aumann-Shapley value taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 # An attribution method that computes Aumann-Shapley values taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365
              &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
                &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
              },
              &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
                &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                  &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                    { # Noise sigma for a single feature.
                      &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                      &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                    },
                  ],
                },
                &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
                &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
              },
              &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively.
            },
            &quot;outputIndices&quot;: [ # If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it&#x27;s explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes).
              &quot;&quot;,
            ],
            &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
              &quot;pathCount&quot;: 42, # Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively.
            },
            &quot;topK&quot;: 42, # If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs.
            &quot;xraiAttribution&quot;: { # An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. # An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead.
              &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
                &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
              },
              &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
                &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                  &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                    { # Noise sigma for a single feature.
                      &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                      &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                    },
                  ],
                },
                &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
                &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
              },
              &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.
            },
          },
        },
        &quot;explanationType&quot;: &quot;A String&quot;, # Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai`
      },
    ],
    &quot;metadata&quot;: &quot;&quot;, # The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of &quot;pipeline_job_id&quot;, &quot;evaluation_dataset_type&quot;, &quot;evaluation_dataset_path&quot;, &quot;row_based_metrics_path&quot;.
    &quot;metrics&quot;: &quot;&quot;, # Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri
    &quot;metricsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
    &quot;modelExplanation&quot;: { # Aggregated explanation metrics for a Model over a set of instances. # Aggregated explanation metrics for the Model&#x27;s prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models.
      &quot;meanAttributions&quot;: [ # Output only. Aggregated attributions explaining the Model&#x27;s prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated.
        { # Attribution that explains a particular prediction output.
          &quot;approximationError&quot;: 3.14, # Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.
          &quot;baselineOutputValue&quot;: 3.14, # Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model&#x27;s predicted output has multiple dimensions (rank &gt; 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.
          &quot;featureAttributions&quot;: &quot;&quot;, # Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature&#x27;s input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).
          &quot;instanceOutputValue&quot;: 3.14, # Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.
          &quot;outputDisplayName&quot;: &quot;A String&quot;, # Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.
          &quot;outputIndex&quot;: [ # Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.
            42,
          ],
          &quot;outputName&quot;: &quot;A String&quot;, # Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.
        },
      ],
    },
    &quot;name&quot;: &quot;A String&quot;, # Output only. The resource name of the ModelEvaluation.
    &quot;sliceDimensions&quot;: [ # All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `.
      &quot;A String&quot;,
    ],
  },
}

  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # A collection of metrics calculated by comparing Model&#x27;s predictions on all of the test data against annotations from the test data.
  &quot;biasConfigs&quot;: { # Configuration for bias detection. # Specify the configuration for bias detection.
    &quot;biasSlices&quot;: { # Specification for how the data should be sliced. # Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against &quot;not slice_a&quot;. Below are examples with feature &quot;education&quot; with value &quot;low&quot;, &quot;medium&quot;, &quot;high&quot; in the dataset: Example 1: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}] A single slice provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;medium&#x27; or &#x27;high&#x27;. Example 2: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}, {&#x27;education&#x27;: &#x27;high&#x27;}] Two slices provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;high&#x27;.
      &quot;configs&quot;: { # Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by &quot;instance&quot; as a dictionary prefix for Vertex Batch Predictions output format.
        &quot;a_key&quot;: { # Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values &quot;US&quot;, &quot;Canada&quot;, &quot;Mexico&quot; in the dataset: Example 1: { &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } A single slice for any data with zip_code 12345 in the dataset. Example 2: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 12345, &quot;high&quot;: 20000 } } } A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 10000, &quot;high&quot;: 20000 } }, &quot;country&quot;: { &quot;value&quot;: { &quot;string_value&quot;: &quot;US&quot; } } } A single slice containing data where the zip_codes between 10000 and 20000 has the country &quot;US&quot;. For this example, data with the zip_code of 12345 and country &quot;US&quot; will be in this slice. Example 4: { &quot;country&quot;: {&quot;all_values&quot;: { &quot;value&quot;: true } } } Three slices are computed, one for each unique country in the dataset. Example 5: { &quot;country&quot;: { &quot;all_values&quot;: { &quot;value&quot;: true } }, &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country &quot;US&quot; will be in one slice, zip_code 12345 and country &quot;Canada&quot; in another slice, and zip_code 12345 and country &quot;Mexico&quot; in another slice, totaling 3 slices.
          &quot;allValues&quot;: True or False, # If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{&quot;all_values&quot;:{&quot;value&quot;:true}}`
          &quot;range&quot;: { # A range of values for slice(s). `low` is inclusive, `high` is exclusive. # A range of values for a numerical feature. Example: `{&quot;range&quot;:{&quot;low&quot;:10000.0,&quot;high&quot;:50000.0}}` will capture 12345 and 23334 in the slice.
            &quot;high&quot;: 3.14, # Exclusive high value for the range.
            &quot;low&quot;: 3.14, # Inclusive low value for the range.
          },
          &quot;value&quot;: { # Single value that supports strings and floats. # A unique specific value for a given feature. Example: `{ &quot;value&quot;: { &quot;string_value&quot;: &quot;12345&quot; } }`
            &quot;floatValue&quot;: 3.14, # Float type.
            &quot;stringValue&quot;: &quot;A String&quot;, # String type.
          },
        },
      },
    },
    &quot;labels&quot;: [ # Positive labels selection on the target field.
      &quot;A String&quot;,
    ],
  },
  &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp when this ModelEvaluation was created.
  &quot;displayName&quot;: &quot;A String&quot;, # The display name of the ModelEvaluation.
  &quot;explanationSpecs&quot;: [ # Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data.
    {
      &quot;explanationSpec&quot;: { # Specification of Model explanation. # Explanation spec details.
        &quot;metadata&quot;: { # Metadata describing the Model&#x27;s input and output for explanation. # Optional. Metadata describing the Model&#x27;s input and output for explanation.
          &quot;featureAttributionsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
          &quot;inputs&quot;: { # Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance.
            &quot;a_key&quot;: { # Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow.
              &quot;denseShapeTensorName&quot;: &quot;A String&quot;, # Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
              &quot;encodedBaselines&quot;: [ # A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor.
                &quot;&quot;,
              ],
              &quot;encodedTensorName&quot;: &quot;A String&quot;, # Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table.
              &quot;encoding&quot;: &quot;A String&quot;, # Defines how the feature is encoded into the input tensor. Defaults to IDENTITY.
              &quot;featureValueDomain&quot;: { # Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. # The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized.
                &quot;maxValue&quot;: 3.14, # The maximum permissible value for this feature.
                &quot;minValue&quot;: 3.14, # The minimum permissible value for this feature.
                &quot;originalMean&quot;: 3.14, # If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization.
                &quot;originalStddev&quot;: 3.14, # If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization.
              },
              &quot;groupName&quot;: &quot;A String&quot;, # Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name.
              &quot;indexFeatureMapping&quot;: [ # A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
                &quot;A String&quot;,
              ],
              &quot;indicesTensorName&quot;: &quot;A String&quot;, # Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
              &quot;inputBaselines&quot;: [ # Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature&#x27;s input in the instance[]. The schema of any single instance may be specified via Endpoint&#x27;s DeployedModels&#x27; Model&#x27;s PredictSchemata&#x27;s instance_schema_uri.
                &quot;&quot;,
              ],
              &quot;inputTensorName&quot;: &quot;A String&quot;, # Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow.
              &quot;modality&quot;: &quot;A String&quot;, # Modality of the feature. Valid values are: numeric, image. Defaults to numeric.
              &quot;visualization&quot;: { # Visualization configurations for image explanation. # Visualization configurations for image explanation.
                &quot;clipPercentLowerbound&quot;: 3.14, # Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62.
                &quot;clipPercentUpperbound&quot;: 3.14, # Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9.
                &quot;colorMap&quot;: &quot;A String&quot;, # The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue.
                &quot;overlayType&quot;: &quot;A String&quot;, # How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE.
                &quot;polarity&quot;: &quot;A String&quot;, # Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE.
                &quot;type&quot;: &quot;A String&quot;, # Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES.
              },
            },
          },
          &quot;latentSpaceSource&quot;: &quot;A String&quot;, # Name of the source to generate embeddings for example based explanations.
          &quot;outputs&quot;: { # Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed.
            &quot;a_key&quot;: { # Metadata of the prediction output to be explained.
              &quot;displayNameMappingKey&quot;: &quot;A String&quot;, # Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output.
              &quot;indexDisplayNameMapping&quot;: &quot;&quot;, # Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It&#x27;s not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index.
              &quot;outputTensorName&quot;: &quot;A String&quot;, # Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow.
            },
          },
        },
        &quot;parameters&quot;: { # Parameters to configure explaining for Model&#x27;s predictions. # Required. Parameters that configure explaining of the Model&#x27;s predictions.
          &quot;examples&quot;: { # Example-based explainability that returns the nearest neighbors from the provided dataset. # Example-based explanations that returns the nearest neighbors from the provided dataset.
            &quot;exampleGcsSource&quot;: { # The Cloud Storage input instances. # The Cloud Storage input instances.
              &quot;dataFormat&quot;: &quot;A String&quot;, # The format in which instances are given, if not specified, assume it&#x27;s JSONL format. Currently only JSONL format is supported.
              &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage location for the input instances.
                &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                  &quot;A String&quot;,
                ],
              },
            },
            &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search.
              &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                &quot;A String&quot;,
              ],
            },
            &quot;nearestNeighborSearchConfig&quot;: &quot;&quot;, # The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
            &quot;neighborCount&quot;: 42, # The number of neighbors to return when querying for examples.
            &quot;presets&quot;: { # Preset configuration for example-based explanations # Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality.
              &quot;modality&quot;: &quot;A String&quot;, # The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type.
              &quot;query&quot;: &quot;A String&quot;, # Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`.
            },
          },
          &quot;integratedGradientsAttribution&quot;: { # An attribution method that computes the Aumann-Shapley value taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 # An attribution method that computes Aumann-Shapley values taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365
            &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
              &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
            },
            &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
              &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                  { # Noise sigma for a single feature.
                    &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                    &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                  },
                ],
              },
              &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
              &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
            },
            &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively.
          },
          &quot;outputIndices&quot;: [ # If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it&#x27;s explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes).
            &quot;&quot;,
          ],
          &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
            &quot;pathCount&quot;: 42, # Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively.
          },
          &quot;topK&quot;: 42, # If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs.
          &quot;xraiAttribution&quot;: { # An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. # An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead.
            &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
              &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
            },
            &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
              &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                  { # Noise sigma for a single feature.
                    &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                    &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                  },
                ],
              },
              &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
              &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
            },
            &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.
          },
        },
      },
      &quot;explanationType&quot;: &quot;A String&quot;, # Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai`
    },
  ],
  &quot;metadata&quot;: &quot;&quot;, # The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of &quot;pipeline_job_id&quot;, &quot;evaluation_dataset_type&quot;, &quot;evaluation_dataset_path&quot;, &quot;row_based_metrics_path&quot;.
  &quot;metrics&quot;: &quot;&quot;, # Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri
  &quot;metricsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  &quot;modelExplanation&quot;: { # Aggregated explanation metrics for a Model over a set of instances. # Aggregated explanation metrics for the Model&#x27;s prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models.
    &quot;meanAttributions&quot;: [ # Output only. Aggregated attributions explaining the Model&#x27;s prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated.
      { # Attribution that explains a particular prediction output.
        &quot;approximationError&quot;: 3.14, # Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.
        &quot;baselineOutputValue&quot;: 3.14, # Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model&#x27;s predicted output has multiple dimensions (rank &gt; 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.
        &quot;featureAttributions&quot;: &quot;&quot;, # Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature&#x27;s input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).
        &quot;instanceOutputValue&quot;: 3.14, # Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.
        &quot;outputDisplayName&quot;: &quot;A String&quot;, # Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.
        &quot;outputIndex&quot;: [ # Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.
          42,
        ],
        &quot;outputName&quot;: &quot;A String&quot;, # Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.
      },
    ],
  },
  &quot;name&quot;: &quot;A String&quot;, # Output only. The resource name of the ModelEvaluation.
  &quot;sliceDimensions&quot;: [ # All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `.
    &quot;A String&quot;,
  ],
}</pre>
</div>

<div class="method">
    <code class="details" id="list">list(parent, filter=None, pageSize=None, pageToken=None, readMask=None, x__xgafv=None)</code>
  <pre>Lists ModelEvaluations in a Model.

Args:
  parent: string, Required. The resource name of the Model to list the ModelEvaluations from. Format: `projects/{project}/locations/{location}/models/{model}` (required)
  filter: string, The standard list filter.
  pageSize: integer, The standard list page size.
  pageToken: string, The standard list page token. Typically obtained via ListModelEvaluationsResponse.next_page_token of the previous ModelService.ListModelEvaluations call.
  readMask: string, Mask specifying which fields to read.
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # Response message for ModelService.ListModelEvaluations.
  &quot;modelEvaluations&quot;: [ # List of ModelEvaluations in the requested page.
    { # A collection of metrics calculated by comparing Model&#x27;s predictions on all of the test data against annotations from the test data.
      &quot;biasConfigs&quot;: { # Configuration for bias detection. # Specify the configuration for bias detection.
        &quot;biasSlices&quot;: { # Specification for how the data should be sliced. # Specification for how the data should be sliced for bias. It contains a list of slices, with limitation of two slices. The first slice of data will be the slice_a. The second slice in the list (slice_b) will be compared against the first slice. If only a single slice is provided, then slice_a will be compared against &quot;not slice_a&quot;. Below are examples with feature &quot;education&quot; with value &quot;low&quot;, &quot;medium&quot;, &quot;high&quot; in the dataset: Example 1: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}] A single slice provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;medium&#x27; or &#x27;high&#x27;. Example 2: bias_slices = [{&#x27;education&#x27;: &#x27;low&#x27;}, {&#x27;education&#x27;: &#x27;high&#x27;}] Two slices provided. In this case, slice_a is the collection of data with &#x27;education&#x27; equals &#x27;low&#x27;, and slice_b is the collection of data with &#x27;education&#x27; equals &#x27;high&#x27;.
          &quot;configs&quot;: { # Mapping configuration for this SliceSpec. The key is the name of the feature. By default, the key will be prefixed by &quot;instance&quot; as a dictionary prefix for Vertex Batch Predictions output format.
            &quot;a_key&quot;: { # Specification message containing the config for this SliceSpec. When `kind` is selected as `value` and/or `range`, only a single slice will be computed. When `all_values` is present, a separate slice will be computed for each possible label/value for the corresponding key in `config`. Examples, with feature zip_code with values 12345, 23334, 88888 and feature country with values &quot;US&quot;, &quot;Canada&quot;, &quot;Mexico&quot; in the dataset: Example 1: { &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } A single slice for any data with zip_code 12345 in the dataset. Example 2: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 12345, &quot;high&quot;: 20000 } } } A single slice containing data where the zip_codes between 12345 and 20000 For this example, data with the zip_code of 12345 will be in this slice. Example 3: { &quot;zip_code&quot;: { &quot;range&quot;: { &quot;low&quot;: 10000, &quot;high&quot;: 20000 } }, &quot;country&quot;: { &quot;value&quot;: { &quot;string_value&quot;: &quot;US&quot; } } } A single slice containing data where the zip_codes between 10000 and 20000 has the country &quot;US&quot;. For this example, data with the zip_code of 12345 and country &quot;US&quot; will be in this slice. Example 4: { &quot;country&quot;: {&quot;all_values&quot;: { &quot;value&quot;: true } } } Three slices are computed, one for each unique country in the dataset. Example 5: { &quot;country&quot;: { &quot;all_values&quot;: { &quot;value&quot;: true } }, &quot;zip_code&quot;: { &quot;value&quot;: { &quot;float_value&quot;: 12345.0 } } } Three slices are computed, one for each unique country in the dataset where the zip_code is also 12345. For this example, data with zip_code 12345 and country &quot;US&quot; will be in one slice, zip_code 12345 and country &quot;Canada&quot; in another slice, and zip_code 12345 and country &quot;Mexico&quot; in another slice, totaling 3 slices.
              &quot;allValues&quot;: True or False, # If all_values is set to true, then all possible labels of the keyed feature will have another slice computed. Example: `{&quot;all_values&quot;:{&quot;value&quot;:true}}`
              &quot;range&quot;: { # A range of values for slice(s). `low` is inclusive, `high` is exclusive. # A range of values for a numerical feature. Example: `{&quot;range&quot;:{&quot;low&quot;:10000.0,&quot;high&quot;:50000.0}}` will capture 12345 and 23334 in the slice.
                &quot;high&quot;: 3.14, # Exclusive high value for the range.
                &quot;low&quot;: 3.14, # Inclusive low value for the range.
              },
              &quot;value&quot;: { # Single value that supports strings and floats. # A unique specific value for a given feature. Example: `{ &quot;value&quot;: { &quot;string_value&quot;: &quot;12345&quot; } }`
                &quot;floatValue&quot;: 3.14, # Float type.
                &quot;stringValue&quot;: &quot;A String&quot;, # String type.
              },
            },
          },
        },
        &quot;labels&quot;: [ # Positive labels selection on the target field.
          &quot;A String&quot;,
        ],
      },
      &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp when this ModelEvaluation was created.
      &quot;displayName&quot;: &quot;A String&quot;, # The display name of the ModelEvaluation.
      &quot;explanationSpecs&quot;: [ # Describes the values of ExplanationSpec that are used for explaining the predicted values on the evaluated data.
        {
          &quot;explanationSpec&quot;: { # Specification of Model explanation. # Explanation spec details.
            &quot;metadata&quot;: { # Metadata describing the Model&#x27;s input and output for explanation. # Optional. Metadata describing the Model&#x27;s input and output for explanation.
              &quot;featureAttributionsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the format of the feature attributions. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML tabular Models always have this field populated by Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
              &quot;inputs&quot;: { # Required. Map from feature names to feature input metadata. Keys are the name of the features. Values are the specification of the feature. An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in ExplanationMetadata.inputs. The baseline of the empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow images, the key can be any friendly name of the feature. Once specified, featureAttributions are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in instance.
                &quot;a_key&quot;: { # Metadata of the input of a feature. Fields other than InputMetadata.input_baselines are applicable only for Models that are using Vertex AI-provided images for Tensorflow.
                  &quot;denseShapeTensorName&quot;: &quot;A String&quot;, # Specifies the shape of the values of the input if the input is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
                  &quot;encodedBaselines&quot;: [ # A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is provided, Vertex AI broadcasts to the same shape as the encoded tensor.
                    &quot;&quot;,
                  ],
                  &quot;encodedTensorName&quot;: &quot;A String&quot;, # Encoded tensor is a transformation of the input tensor. Must be provided if choosing Integrated Gradients attribution or XRAI attribution and the input tensor is not differentiable. An encoded tensor is generated if the input tensor is encoded by a lookup table.
                  &quot;encoding&quot;: &quot;A String&quot;, # Defines how the feature is encoded into the input tensor. Defaults to IDENTITY.
                  &quot;featureValueDomain&quot;: { # Domain details of the input feature value. Provides numeric information about the feature, such as its range (min, max). If the feature has been pre-processed, for example with z-scoring, then it provides information about how to recover the original feature. For example, if the input feature is an image and it has been pre-processed to obtain 0-mean and stddev = 1 values, then original_mean, and original_stddev refer to the mean and stddev of the original feature (e.g. image tensor) from which input feature (with mean = 0 and stddev = 1) was obtained. # The domain details of the input feature value. Like min/max, original mean or standard deviation if normalized.
                    &quot;maxValue&quot;: 3.14, # The maximum permissible value for this feature.
                    &quot;minValue&quot;: 3.14, # The minimum permissible value for this feature.
                    &quot;originalMean&quot;: 3.14, # If this input feature has been normalized to a mean value of 0, the original_mean specifies the mean value of the domain prior to normalization.
                    &quot;originalStddev&quot;: 3.14, # If this input feature has been normalized to a standard deviation of 1.0, the original_stddev specifies the standard deviation of the domain prior to normalization.
                  },
                  &quot;groupName&quot;: &quot;A String&quot;, # Name of the group that the input belongs to. Features with the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one single attribution generated in Attribution.feature_attributions, keyed by the group name.
                  &quot;indexFeatureMapping&quot;: [ # A list of feature names for each index in the input tensor. Required when the input InputMetadata.encoding is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
                    &quot;A String&quot;,
                  ],
                  &quot;indicesTensorName&quot;: &quot;A String&quot;, # Specifies the index of the values of the input tensor. Required when the input tensor is a sparse representation. Refer to Tensorflow documentation for more details: https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
                  &quot;inputBaselines&quot;: [ # Baseline inputs for this feature. If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in Attribution.feature_attributions. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input tensor. If a scalar is provided, we broadcast to the same shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature&#x27;s input in the instance[]. The schema of any single instance may be specified via Endpoint&#x27;s DeployedModels&#x27; Model&#x27;s PredictSchemata&#x27;s instance_schema_uri.
                    &quot;&quot;,
                  ],
                  &quot;inputTensorName&quot;: &quot;A String&quot;, # Name of the input tensor for this feature. Required and is only applicable to Vertex AI-provided images for Tensorflow.
                  &quot;modality&quot;: &quot;A String&quot;, # Modality of the feature. Valid values are: numeric, image. Defaults to numeric.
                  &quot;visualization&quot;: { # Visualization configurations for image explanation. # Visualization configurations for image explanation.
                    &quot;clipPercentLowerbound&quot;: 3.14, # Excludes attributions below the specified percentile, from the highlighted areas. Defaults to 62.
                    &quot;clipPercentUpperbound&quot;: 3.14, # Excludes attributions above the specified percentile from the highlighted areas. Using the clip_percent_upperbound and clip_percent_lowerbound together can be useful for filtering out noise and making it easier to see areas of strong attribution. Defaults to 99.9.
                    &quot;colorMap&quot;: &quot;A String&quot;, # The color scheme used for the highlighted areas. Defaults to PINK_GREEN for Integrated Gradients attribution, which shows positive attributions in green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which highlights the most influential regions in yellow and the least influential in blue.
                    &quot;overlayType&quot;: &quot;A String&quot;, # How the original image is displayed in the visualization. Adjusting the overlay can help increase visual clarity if the original image makes it difficult to view the visualization. Defaults to NONE.
                    &quot;polarity&quot;: &quot;A String&quot;, # Whether to only highlight pixels with positive contributions, negative or both. Defaults to POSITIVE.
                    &quot;type&quot;: &quot;A String&quot;, # Type of the image visualization. Only applicable to Integrated Gradients attribution. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES.
                  },
                },
              },
              &quot;latentSpaceSource&quot;: &quot;A String&quot;, # Name of the source to generate embeddings for example based explanations.
              &quot;outputs&quot;: { # Required. Map from output names to output metadata. For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the output field in the prediction to be explained. Currently only one key is allowed.
                &quot;a_key&quot;: { # Metadata of the prediction output to be explained.
                  &quot;displayNameMappingKey&quot;: &quot;A String&quot;, # Specify a field name in the prediction to look for the display name. Use this if the prediction contains the display names for the outputs. The display names in the prediction must have the same shape of the outputs, so that it can be located by Attribution.output_index for a specific output.
                  &quot;indexDisplayNameMapping&quot;: &quot;&quot;, # Static mapping between the index and display name. Use this if the outputs are a deterministic n-dimensional array, e.g. a list of scores of all the classes in a pre-defined order for a multi-classification Model. It&#x27;s not feasible if the outputs are non-deterministic, e.g. the Model produces top-k classes or sort the outputs by their values. The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The Attribution.output_display_name is populated by locating in the mapping with Attribution.output_index.
                  &quot;outputTensorName&quot;: &quot;A String&quot;, # Name of the output tensor. Required and is only applicable to Vertex AI provided images for Tensorflow.
                },
              },
            },
            &quot;parameters&quot;: { # Parameters to configure explaining for Model&#x27;s predictions. # Required. Parameters that configure explaining of the Model&#x27;s predictions.
              &quot;examples&quot;: { # Example-based explainability that returns the nearest neighbors from the provided dataset. # Example-based explanations that returns the nearest neighbors from the provided dataset.
                &quot;exampleGcsSource&quot;: { # The Cloud Storage input instances. # The Cloud Storage input instances.
                  &quot;dataFormat&quot;: &quot;A String&quot;, # The format in which instances are given, if not specified, assume it&#x27;s JSONL format. Currently only JSONL format is supported.
                  &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage location for the input instances.
                    &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                      &quot;A String&quot;,
                    ],
                  },
                },
                &quot;gcsSource&quot;: { # The Google Cloud Storage location for the input content. # The Cloud Storage locations that contain the instances to be indexed for approximate nearest neighbor search.
                  &quot;uris&quot;: [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.
                    &quot;A String&quot;,
                  ],
                },
                &quot;nearestNeighborSearchConfig&quot;: &quot;&quot;, # The full configuration for the generated index, the semantics are the same as metadata and should match [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
                &quot;neighborCount&quot;: 42, # The number of neighbors to return when querying for examples.
                &quot;presets&quot;: { # Preset configuration for example-based explanations # Simplified preset configuration, which automatically sets configuration values based on the desired query speed-precision trade-off and modality.
                  &quot;modality&quot;: &quot;A String&quot;, # The modality of the uploaded model, which automatically configures the distance measurement and feature normalization for the underlying example index and queries. If your model does not precisely fit one of these types, it is okay to choose the closest type.
                  &quot;query&quot;: &quot;A String&quot;, # Preset option controlling parameters for speed-precision trade-off when querying for examples. If omitted, defaults to `PRECISE`.
                },
              },
              &quot;integratedGradientsAttribution&quot;: { # An attribution method that computes the Aumann-Shapley value taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 # An attribution method that computes Aumann-Shapley values taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365
                &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for IG with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
                  &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
                },
                &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
                  &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                    &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                      { # Noise sigma for a single feature.
                        &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                        &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                      },
                    ],
                  },
                  &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
                  &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
                },
                &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is within the desired error range. Valid range of its value is [1, 100], inclusively.
              },
              &quot;outputIndices&quot;: [ # If populated, only returns attributions that have output_index contained in output_indices. It must be an ndarray of integers, with the same shape of the output it&#x27;s explaining. If not populated, returns attributions for top_k indices of outputs. If neither top_k nor output_indices is populated, returns the argmax index of the outputs. Only applicable to Models that predict multiple outputs (e,g, multi-class Models that predict multiple classes).
                &quot;&quot;,
              ],
              &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. # An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
                &quot;pathCount&quot;: 42, # Required. The number of feature permutations to consider when approximating the Shapley values. Valid range of its value is [1, 50], inclusively.
              },
              &quot;topK&quot;: 42, # If populated, returns attributions for top K indices of outputs (defaults to 1). Only applies to Models that predicts more than one outputs (e,g, multi-class Models). When set to -1, returns explanations for all outputs.
              &quot;xraiAttribution&quot;: { # An explanation method that redistributes Integrated Gradients attributions to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Supported only by image Models. # An attribution method that redistributes Integrated Gradients attribution to segmented regions, taking advantage of the model&#x27;s fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural images, like a picture of a house or an animal. If the images are taken in artificial environments, like a lab or manufacturing line, or from diagnostic equipment, like x-rays or quality-control cameras, use Integrated Gradients instead.
                &quot;blurBaselineConfig&quot;: { # Config for blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383 # Config for XRAI with blur baseline. When enabled, a linear path from the maximally blurred image to the input image is created. Using a blurred baseline instead of zero (black image) is motivated by the BlurIG approach explained here: https://arxiv.org/abs/2004.03383
                  &quot;maxBlurSigma&quot;: 3.14, # The standard deviation of the blur kernel for the blurred baseline. The same blurring parameter is used for both the height and the width dimension. If not set, the method defaults to the zero (i.e. black for images) baseline.
                },
                &quot;smoothGradConfig&quot;: { # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf # Config for SmoothGrad approximation of gradients. When enabled, the gradients are approximated by averaging the gradients from noisy samples in the vicinity of the inputs. Adding noise can help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf
                  &quot;featureNoiseSigma&quot;: { # Noise sigma by features. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. # This is similar to noise_sigma, but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, noise_sigma will be used for all features.
                    &quot;noiseSigma&quot;: [ # Noise sigma per feature. No noise is added to features that are not set.
                      { # Noise sigma for a single feature.
                        &quot;name&quot;: &quot;A String&quot;, # The name of the input feature for which noise sigma is provided. The features are defined in explanation metadata inputs.
                        &quot;sigma&quot;: 3.14, # This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to noise_sigma but represents the noise added to the current feature. Defaults to 0.1.
                      },
                    ],
                  },
                  &quot;noiseSigma&quot;: 3.14, # This is a single float value and will be used to add noise to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to have 0-mean and 1-variance. Learn more about [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to section 3.2 of the SmoothGrad paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set feature_noise_sigma instead for each feature.
                  &quot;noisySampleCount&quot;: 42, # The number of gradient samples to use for approximation. The higher this number, the more accurate the gradient is, but the runtime complexity increases by this factor as well. Valid range of its value is [1, 50]. Defaults to 3.
                },
                &quot;stepCount&quot;: 42, # Required. The number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. Valid range of its value is [1, 100], inclusively.
              },
            },
          },
          &quot;explanationType&quot;: &quot;A String&quot;, # Explanation type. For AutoML Image Classification models, possible values are: * `image-integrated-gradients` * `image-xrai`
        },
      ],
      &quot;metadata&quot;: &quot;&quot;, # The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from Managed Pipeline, metadata contains a structured value with keys of &quot;pipeline_job_id&quot;, &quot;evaluation_dataset_type&quot;, &quot;evaluation_dataset_path&quot;, &quot;row_based_metrics_path&quot;.
      &quot;metrics&quot;: &quot;&quot;, # Evaluation metrics of the Model. The schema of the metrics is stored in metrics_schema_uri
      &quot;metricsSchemaUri&quot;: &quot;A String&quot;, # Points to a YAML file stored on Google Cloud Storage describing the metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
      &quot;modelExplanation&quot;: { # Aggregated explanation metrics for a Model over a set of instances. # Aggregated explanation metrics for the Model&#x27;s prediction output over the data this ModelEvaluation uses. This field is populated only if the Model is evaluated with explanations, and only for AutoML tabular Models.
        &quot;meanAttributions&quot;: [ # Output only. Aggregated attributions explaining the Model&#x27;s prediction outputs over the set of instances. The attributions are grouped by outputs. For Models that predict only one output, such as regression Models that predict only one score, there is only one attibution that explains the predicted output. For Models that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. Attribution.output_index can be used to identify which output this attribution is explaining. The baselineOutputValue, instanceOutputValue and featureAttributions fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. Attribution.approximation_error is not populated.
          { # Attribution that explains a particular prediction output.
            &quot;approximationError&quot;: 3.14, # Output only. Error of feature_attributions caused by approximation used in the explanation method. Lower value means more precise attributions. * For Sampled Shapley attribution, increasing path_count might reduce the error. * For Integrated Gradients attribution, increasing step_count might reduce the error. * For XRAI attribution, increasing step_count might reduce the error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for more information.
            &quot;baselineOutputValue&quot;: 3.14, # Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in ExplanationMetadata.inputs. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model&#x27;s predicted output has multiple dimensions (rank &gt; 1), this is the value in the output located by output_index. If there are multiple baselines, their output values are averaged.
            &quot;featureAttributions&quot;: &quot;&quot;, # Output only. Attributions of each explained feature. Features are extracted from the prediction instances according to explanation metadata for inputs. The value is a struct, whose keys are the name of the feature. The values are how much the feature in the instance contributed to the predicted result. The format of the value is determined by the feature&#x27;s input format: * If the feature is a scalar value, the attribution value is a floating number. * If the feature is an array of scalar values, the attribution value is an array. * If the feature is a struct, the attribution value is a struct. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The ExplanationMetadata.feature_attributions_schema_uri field, pointed to by the ExplanationSpec field of the Endpoint.deployed_models object, points to the schema file that describes the features and their attribution values (if it is populated).
            &quot;instanceOutputValue&quot;: 3.14, # Output only. Model predicted output on the corresponding explanation instance. The field name of the output is determined by the key in ExplanationMetadata.outputs. If the Model predicted output has multiple dimensions, this is the value in the output located by output_index.
            &quot;outputDisplayName&quot;: &quot;A String&quot;, # Output only. The display name of the output identified by output_index. For example, the predicted class name by a multi-classification Model. This field is only populated iff the Model predicts display names as a separate field along with the explained output. The predicted display name must has the same shape of the explained output, and can be located using output_index.
            &quot;outputIndex&quot;: [ # Output only. The index that locates the explained prediction output. If the prediction output is a scalar value, output_index is not populated. If the prediction output has multiple dimensions, the length of the output_index list is the same as the number of dimensions of the output. The i-th element in output_index is the element index of the i-th dimension of the output vector. Indices start from 0.
              42,
            ],
            &quot;outputName&quot;: &quot;A String&quot;, # Output only. Name of the explain output. Specified as the key in ExplanationMetadata.outputs.
          },
        ],
      },
      &quot;name&quot;: &quot;A String&quot;, # Output only. The resource name of the ModelEvaluation.
      &quot;sliceDimensions&quot;: [ # All possible dimensions of ModelEvaluationSlices. The dimensions can be used as the filter of the ModelService.ListModelEvaluationSlices request, in the form of `slice.dimension = `.
        &quot;A String&quot;,
      ],
    },
  ],
  &quot;nextPageToken&quot;: &quot;A String&quot;, # A token to retrieve next page of results. Pass to ListModelEvaluationsRequest.page_token to obtain that page.
}</pre>
</div>

<div class="method">
    <code class="details" id="list_next">list_next()</code>
  <pre>Retrieves the next page of results.

        Args:
          previous_request: The request for the previous page. (required)
          previous_response: The response from the request for the previous page. (required)

        Returns:
          A request object that you can call &#x27;execute()&#x27; on to request the next
          page. Returns None if there are no more items in the collection.
        </pre>
</div>

</body></html>