File: discoveryengine_v1beta.projects.locations.evaluations.html

package info (click to toggle)
python-googleapi 2.180.0-1
  • links: PTS
  • area: main
  • in suites: forky, sid
  • size: 527,124 kB
  • sloc: python: 11,076; javascript: 249; sh: 114; makefile: 59
file content (1041 lines) | stat: -rw-r--r-- 180,735 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
<html><body>
<style>

body, h1, h2, h3, div, span, p, pre, a {
  margin: 0;
  padding: 0;
  border: 0;
  font-weight: inherit;
  font-style: inherit;
  font-size: 100%;
  font-family: inherit;
  vertical-align: baseline;
}

body {
  font-size: 13px;
  padding: 1em;
}

h1 {
  font-size: 26px;
  margin-bottom: 1em;
}

h2 {
  font-size: 24px;
  margin-bottom: 1em;
}

h3 {
  font-size: 20px;
  margin-bottom: 1em;
  margin-top: 1em;
}

pre, code {
  line-height: 1.5;
  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}

pre {
  margin-top: 0.5em;
}

h1, h2, h3, p {
  font-family: Arial, sans serif;
}

h1, h2, h3 {
  border-bottom: solid #CCC 1px;
}

.toc_element {
  margin-top: 0.5em;
}

.firstline {
  margin-left: 2 em;
}

.method  {
  margin-top: 1em;
  border: solid 1px #CCC;
  padding: 1em;
  background: #EEE;
}

.details {
  font-weight: bold;
  font-size: 14px;
}

</style>

<h1><a href="discoveryengine_v1beta.html">Discovery Engine API</a> . <a href="discoveryengine_v1beta.projects.html">projects</a> . <a href="discoveryengine_v1beta.projects.locations.html">locations</a> . <a href="discoveryengine_v1beta.projects.locations.evaluations.html">evaluations</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
  <code><a href="discoveryengine_v1beta.projects.locations.evaluations.operations.html">operations()</a></code>
</p>
<p class="firstline">Returns the operations Resource.</p>

<p class="toc_element">
  <code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
  <code><a href="#create">create(parent, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a Evaluation. Upon creation, the evaluation will be automatically triggered and begin execution.</p>
<p class="toc_element">
  <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
<p class="firstline">Gets a Evaluation.</p>
<p class="toc_element">
  <code><a href="#list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets a list of Evaluations.</p>
<p class="toc_element">
  <code><a href="#listResults">listResults(evaluation, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets a list of results for a given a Evaluation.</p>
<p class="toc_element">
  <code><a href="#listResults_next">listResults_next()</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
  <code><a href="#list_next">list_next()</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<h3>Method Details</h3>
<div class="method">
    <code class="details" id="close">close()</code>
  <pre>Close httplib2 connections.</pre>
</div>

<div class="method">
    <code class="details" id="create">create(parent, body=None, x__xgafv=None)</code>
  <pre>Creates a Evaluation. Upon creation, the evaluation will be automatically triggered and begin execution.

Args:
  parent: string, Required. The parent resource name, such as `projects/{project}/locations/{location}`. (required)
  body: object, The request body.
    The object takes the form of:

{ # An evaluation is a single execution (or run) of an evaluation process. It encapsulates the state of the evaluation and the resulting data.
  &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was created at.
  &quot;endTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was completed at.
  &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. The error that occurred during evaluation. Only populated when the evaluation&#x27;s state is FAILED.
    &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
    &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
      {
        &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
      },
    ],
    &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
  },
  &quot;errorSamples&quot;: [ # Output only. A sample of errors encountered while processing the request.
    { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
      &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
        {
          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
        },
      ],
      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
    },
  ],
  &quot;evaluationSpec&quot;: { # Describes the specification of the evaluation. # Required. The specification of the evaluation.
    &quot;querySetSpec&quot;: { # Describes the specification of the query set. # Optional. The specification of the query set.
      &quot;sampleQuerySet&quot;: &quot;A String&quot;, # Optional. The full resource name of the SampleQuerySet used for the evaluation, in the format of `projects/{project}/locations/{location}/sampleQuerySets/{sampleQuerySet}`.
    },
    &quot;searchRequest&quot;: { # Request message for SearchService.Search method. # Required. The search request that is used to perform the evaluation. Only the following fields within SearchRequest are supported; if any other fields are provided, an UNSUPPORTED error will be returned: * SearchRequest.serving_config * SearchRequest.branch * SearchRequest.canonical_filter * SearchRequest.query_expansion_spec * SearchRequest.spell_correction_spec * SearchRequest.content_search_spec * SearchRequest.user_pseudo_id
      &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
        &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
          { # Boost applies to documents which match a condition.
            &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
            &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
              &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
              &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                  &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                  &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                },
              ],
              &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
              &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
            },
            &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
          },
        ],
      },
      &quot;branch&quot;: &quot;A String&quot;, # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
      &quot;canonicalFilter&quot;: &quot;A String&quot;, # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
      &quot;contentSearchSpec&quot;: { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
        &quot;chunkSpec&quot;: { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
          &quot;numNextChunks&quot;: 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
          &quot;numPreviousChunks&quot;: 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
        },
        &quot;extractiveContentSpec&quot;: { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
          &quot;maxExtractiveAnswerCount&quot;: 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
          &quot;maxExtractiveSegmentCount&quot;: 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
          &quot;numNextSegments&quot;: 42, # Return at most `num_next_segments` segments after each selected segments.
          &quot;numPreviousSegments&quot;: 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
          &quot;returnExtractiveSegmentScore&quot;: True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
        },
        &quot;searchResultMode&quot;: &quot;A String&quot;, # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
        &quot;snippetSpec&quot;: { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
          &quot;maxSnippetCount&quot;: 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count &gt; 0.
          &quot;referenceOnly&quot;: True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
          &quot;returnSnippet&quot;: True or False, # If `true`, then return snippet. If no snippet can be generated, we return &quot;No snippet is available for this page.&quot; A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
        },
        &quot;summarySpec&quot;: { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
          &quot;ignoreAdversarialQuery&quot;: True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
          &quot;ignoreJailBreakingQuery&quot;: True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. &quot;Reply in the tone of a competing company&#x27;s CEO&quot;. If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
          &quot;ignoreLowRelevantContent&quot;: True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
          &quot;ignoreNonSummarySeekingQuery&quot;: True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
          &quot;includeCitations&quot;: True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud&#x27;s fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
          &quot;languageCode&quot;: &quot;A String&quot;, # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
          &quot;modelPromptSpec&quot;: { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
            &quot;preamble&quot;: &quot;A String&quot;, # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
          },
          &quot;modelSpec&quot;: { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
            &quot;version&quot;: &quot;A String&quot;, # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
          },
          &quot;multimodalSpec&quot;: { # Multimodal specification: Will return an image from specified source. If multiple sources are specified, the pick is a quality based decision. # Optional. Multimodal specification.
            &quot;imageSource&quot;: &quot;A String&quot;, # Optional. Source of image returned in the answer.
          },
          &quot;summaryResultCount&quot;: 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
          &quot;useSemanticChunks&quot;: True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
        },
      },
      &quot;dataStoreSpecs&quot;: [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. For engines with a single data store, the specs directly under SearchRequest should be used.
        { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
          &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Optional. Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
            &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
              { # Boost applies to documents which match a condition.
                &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
                &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
                  &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
                  &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                    { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                      &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                      &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                    },
                  ],
                  &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
                  &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
                },
                &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
              },
            ],
          },
          &quot;customSearchOperators&quot;: &quot;A String&quot;, # Optional. Custom search operators which if specified will be used to filter results from workspace data stores. For more information on custom search operators, see [SearchOperators](https://support.google.com/cloudsearch/answer/6172299).
          &quot;dataStore&quot;: &quot;A String&quot;, # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. The path must include the project number, project id is not supported for this field.
          &quot;filter&quot;: &quot;A String&quot;, # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
        },
      ],
      &quot;displaySpec&quot;: { # Specifies features for display, like match highlighting. # Optional. Config for display feature, like match highlighting on search results.
        &quot;matchHighlightingCondition&quot;: &quot;A String&quot;, # The condition under which match highlighting should occur.
      },
      &quot;embeddingSpec&quot;: { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
        &quot;embeddingVectors&quot;: [ # The embedding vector used for retrieval. Limit to 1.
          { # Embedding vector.
            &quot;fieldPath&quot;: &quot;A String&quot;, # Embedding field path in schema.
            &quot;vector&quot;: [ # Query embedding vector.
              3.14,
            ],
          },
        ],
      },
      &quot;facetSpecs&quot;: [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
        { # A facet specification to perform faceted search.
          &quot;enableDynamicPosition&quot;: True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it&#x27;s not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * &quot;rating&quot;, enable_dynamic_position = true * &quot;price&quot;, enable_dynamic_position = false * &quot;brands&quot;, enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be (&quot;price&quot;, &quot;brands&quot;, &quot;rating&quot;, &quot;gender&quot;) or (&quot;price&quot;, &quot;brands&quot;, &quot;gender&quot;, &quot;rating&quot;) depends on how API orders &quot;gender&quot; and &quot;rating&quot; facets. However, notice that &quot;price&quot; and &quot;brands&quot; are always ranked at first and second position because their enable_dynamic_position is false.
          &quot;excludedFilterKeys&quot;: [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet &quot;Red&quot; and 200 documents with the color facet &quot;Blue&quot;. A query containing the filter &quot;color:ANY(&quot;Red&quot;)&quot; and having &quot;color&quot; as FacetKey.key would by default return only &quot;Red&quot; documents in the search results, and also return &quot;Red&quot; with count 100 as the only color facet. Although there are also blue documents available, &quot;Blue&quot; would not be shown as an available facet value. If &quot;color&quot; is listed in &quot;excludedFilterKeys&quot;, then the query returns the facet values &quot;Red&quot; with count 100 and &quot;Blue&quot; with count 200, because the &quot;color&quot; key is now excluded from the filter. Because this field doesn&#x27;t affect search results, the search results are still correctly filtered to return only &quot;Red&quot; documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
            &quot;A String&quot;,
          ],
          &quot;facetKey&quot;: { # Specifies how a facet is computed. # Required. The facet key specification.
            &quot;caseInsensitive&quot;: True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
            &quot;contains&quot;: [ # Only get facet values that contain the given strings. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;contains&quot; to &quot;2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Sci-Fi &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
            &quot;intervals&quot;: [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
              { # A floating point interval.
                &quot;exclusiveMaximum&quot;: 3.14, # Exclusive upper bound.
                &quot;exclusiveMinimum&quot;: 3.14, # Exclusive lower bound.
                &quot;maximum&quot;: 3.14, # Inclusive upper bound.
                &quot;minimum&quot;: 3.14, # Inclusive lower bound.
              },
            ],
            &quot;key&quot;: &quot;A String&quot;, # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
            &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Allowed values are: * &quot;count desc&quot;, which means order by SearchResponse.Facet.values.count descending. * &quot;value desc&quot;, which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
            &quot;prefixes&quot;: [ # Only get facet values that start with the given string prefix. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;prefixes&quot; to &quot;Action&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Action &gt; 2021&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
            &quot;restrictedValues&quot;: [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;restricted_values&quot; to &quot;Action &gt; 2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
          },
          &quot;limit&quot;: 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is &quot;healthcare_aggregation_key&quot;, the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
        },
      ],
      &quot;filter&quot;: &quot;A String&quot;, # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field &#x27;name&#x27; in their schema. In this case the filter would look like this: filter --&gt; name:&#x27;ANY(&quot;king kong&quot;)&#x27; For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
      &quot;imageQuery&quot;: { # Specifies the image query input. # Raw image query.
        &quot;imageBytes&quot;: &quot;A String&quot;, # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
      },
      &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn&#x27;t specified, the query language code is automatically detected, which may not be accurate.
      &quot;naturalLanguageQueryUnderstandingSpec&quot;: { # Specification to enable natural language understanding capabilities for search requests. # Config for natural language query understanding capabilities, such as extracting structured field filters from the query. Refer to [this documentation](https://cloud.google.com/generative-ai-app-builder/docs/natural-language-queries) for more information. If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
        &quot;extractedFilterBehavior&quot;: &quot;A String&quot;, # Optional. Controls behavior of how extracted filters are applied to the search. The default behavior depends on the request. For single datastore structured search, the default is `HARD_FILTER`. For multi-datastore search, the default behavior is `SOFT_BOOST`. Location-based filters are always applied as hard filters, and the `SOFT_BOOST` setting will not affect them. This field is only used if SearchRequest.natural_language_query_understanding_spec.filter_extraction_condition is set to FilterExtractionCondition.ENABLED.
        &quot;filterExtractionCondition&quot;: &quot;A String&quot;, # The condition under which filter extraction should occur. Server behavior defaults to `DISABLED`.
        &quot;geoSearchQueryDetectionFieldNames&quot;: [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
          &quot;A String&quot;,
        ],
      },
      &quot;offset&quot;: 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
      &quot;oneBoxPageSize&quot;: 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
      &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
      &quot;pageSize&quot;: 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
      &quot;pageToken&quot;: &quot;A String&quot;, # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
      &quot;params&quot;: { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: &quot;au&quot;` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
        &quot;a_key&quot;: &quot;&quot;,
      },
      &quot;personalizationSpec&quot;: { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
        &quot;mode&quot;: &quot;A String&quot;, # The personalization mode of the search request. Defaults to Mode.AUTO.
      },
      &quot;query&quot;: &quot;A String&quot;, # Raw search query.
      &quot;queryExpansionSpec&quot;: { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
        &quot;condition&quot;: &quot;A String&quot;, # The condition under which query expansion should occur. Default to Condition.DISABLED.
        &quot;pinUnexpandedResults&quot;: True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
      },
      &quot;rankingExpression&quot;: &quot;A String&quot;, # Optional. The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The syntax and supported features depend on the `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by &quot;+&quot;. * ranking_expression = function, { &quot; + &quot;, function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and combinations of those chained using + or * operators) are supported: * `double` * `signal` * `log(signal)` * `exp(signal)` * `rr(signal, double &gt; 0)` -- reciprocal rank transformation with second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN, 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 | double, else returns signal1. Here are a few examples of ranking formulas that use the supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score` with slight `semantic_smilarity_score` adjustment. - `0.2 * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank by the exponent of `semantic_similarity_score` filling the value with 0 if it&#x27;s NaN, also add constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`: semantic similarity adjustment that is calculated using the embeddings generated by a proprietary Google model. This score determines how semantically similar a search query is to a document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25) ranking function. This score is calculated using a probabilistic model to estimate the probability that a document is relevant to a given query. * `relevance_score`: semantic relevance adjustment that uses a proprietary Google model to determine the meaning and intent behind a user&#x27;s query in context with the content in the documents. * `pctr_rank`: predicted conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the relevance and attractiveness of a search result from a user&#x27;s perspective. A higher pCTR suggests that the result is more likely to satisfy the user&#x27;s query and intent, making it a valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank * `document_age`: The time in hours elapsed since the document was last updated, a floating-point number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank. Uses proprietary Google model to determine the keyword-based overlap between the query and the document. * `base_rank`: the default rank of the result
      &quot;rankingExpressionBackend&quot;: &quot;A String&quot;, # Optional. The backend to use for the ranking expression evaluation.
      &quot;regionCode&quot;: &quot;A String&quot;, # The Unicode country/region code (CLDR) of a location, such as &quot;US&quot; and &quot;419&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
      &quot;relevanceScoreSpec&quot;: { # The specification for returning the document relevance score. # Optional. The specification for returning the relevance score.
        &quot;returnRelevanceScore&quot;: True or False, # Optional. Whether to return the relevance score for search results. The higher the score, the more relevant the document is to the query.
      },
      &quot;relevanceThreshold&quot;: &quot;A String&quot;, # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information. This feature is not supported for healthcare search.
      &quot;safeSearch&quot;: True or False, # Whether to turn on safe search. This is only supported for website search.
      &quot;searchAsYouTypeSpec&quot;: { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
        &quot;condition&quot;: &quot;A String&quot;, # The condition under which search as you type should occur. Default to Condition.DISABLED.
      },
      &quot;servingConfig&quot;: &quot;A String&quot;, # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
      &quot;session&quot;: &quot;A String&quot;, # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is &quot;How did Alphabet do in 2022?&quot; and the current query is &quot;How about 2023?&quot;, the current query will be interpreted as &quot;How did Alphabet do in 2023?&quot;. Example #2 (coordination between /search API calls and /answer API calls): Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
      &quot;sessionSpec&quot;: { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
        &quot;queryId&quot;: &quot;A String&quot;, # If set, the search result gets stored to the &quot;turn&quot; specified by this query ID. Example: Let&#x27;s say the session looks like this: session { name: &quot;.../sessions/xxx&quot; turns { query { text: &quot;What is foo?&quot; query_id: &quot;.../questions/yyy&quot; } answer: &quot;Foo is ...&quot; } turns { query { text: &quot;How about bar then?&quot; query_id: &quot;.../questions/zzz&quot; } } } The user can call /search API with a request like this: session: &quot;.../sessions/xxx&quot; session_spec { query_id: &quot;.../questions/zzz&quot; } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID &amp; query ID.
        &quot;searchResultPersistenceCount&quot;: 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is similar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
      },
      &quot;spellCorrectionSpec&quot;: { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
        &quot;mode&quot;: &quot;A String&quot;, # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
      },
      &quot;userInfo&quot;: { # Information of an end user. # Information about the end user. Highly recommended for analytics and personalization. UserInfo.user_agent is used to deduce `device_type` for analytics.
        &quot;timeZone&quot;: &quot;A String&quot;, # Optional. IANA time zone, e.g. Europe/Budapest.
        &quot;userAgent&quot;: &quot;A String&quot;, # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
        &quot;userId&quot;: &quot;A String&quot;, # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don&#x27;t set for anonymous users. Always use a hashed value for this ID. Don&#x27;t set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
      },
      &quot;userLabels&quot;: { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
        &quot;a_key&quot;: &quot;A String&quot;,
      },
      &quot;userPseudoId&quot;: &quot;A String&quot;, # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
    },
  },
  &quot;name&quot;: &quot;A String&quot;, # Identifier. The full resource name of the Evaluation, in the format of `projects/{project}/locations/{location}/evaluations/{evaluation}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
  &quot;qualityMetrics&quot;: { # Describes the metrics produced by the evaluation. # Output only. The metrics produced by the evaluation, averaged across all SampleQuerys in the SampleQuerySet. Only populated when the evaluation&#x27;s state is SUCCEEDED.
    &quot;docNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per document, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved documents (D1, D2, D3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [D3 (0), D1 (1), D2 (1)] Ideal: [D1 (1), D2 (1), D3 (0)] Calculate NDCG@3 for each SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;docPrecision&quot;: { # Stores the metric values at specific top-k levels. # Precision per document, at various top-k cutoff levels. Precision is the fraction of retrieved documents that are relevant. Example (top-5): * For a single SampleQuery, If 4 out of 5 retrieved documents in the top-5 are relevant, precision@5 = 4/5 = 0.8
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;docRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per document, at various top-k cutoff levels. Recall is the fraction of relevant documents retrieved out of all relevant documents. Example (top-5): * For a single SampleQuery, If 3 out of 5 relevant documents are retrieved in the top-5, recall@5 = 3/5 = 0.6
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;pageNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per page, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved pages (P1, P2, P3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [P3 (0), P1 (1), P2 (1)] Ideal: [P1 (1), P2 (1), P3 (0)] Calculate NDCG@3 for SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;pageRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per page, at various top-k cutoff levels. Recall is the fraction of relevant pages retrieved out of all relevant pages. Example (top-5): * For a single SampleQuery, if 3 out of 5 relevant pages are retrieved in the top-5, recall@5 = 3/5 = 0.6
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
  },
  &quot;state&quot;: &quot;A String&quot;, # Output only. The state of the evaluation.
}

  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # This resource represents a long-running operation that is the result of a network API call.
  &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
  &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
    &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
    &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
      {
        &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
      },
    ],
    &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
  },
  &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
    &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
  },
  &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
  &quot;response&quot;: { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
    &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
  },
}</pre>
</div>

<div class="method">
    <code class="details" id="get">get(name, x__xgafv=None)</code>
  <pre>Gets a Evaluation.

Args:
  name: string, Required. Full resource name of Evaluation, such as `projects/{project}/locations/{location}/evaluations/{evaluation}`. If the caller does not have permission to access the Evaluation, regardless of whether or not it exists, a PERMISSION_DENIED error is returned. If the requested Evaluation does not exist, a NOT_FOUND error is returned. (required)
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # An evaluation is a single execution (or run) of an evaluation process. It encapsulates the state of the evaluation and the resulting data.
  &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was created at.
  &quot;endTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was completed at.
  &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. The error that occurred during evaluation. Only populated when the evaluation&#x27;s state is FAILED.
    &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
    &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
      {
        &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
      },
    ],
    &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
  },
  &quot;errorSamples&quot;: [ # Output only. A sample of errors encountered while processing the request.
    { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
      &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
        {
          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
        },
      ],
      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
    },
  ],
  &quot;evaluationSpec&quot;: { # Describes the specification of the evaluation. # Required. The specification of the evaluation.
    &quot;querySetSpec&quot;: { # Describes the specification of the query set. # Optional. The specification of the query set.
      &quot;sampleQuerySet&quot;: &quot;A String&quot;, # Optional. The full resource name of the SampleQuerySet used for the evaluation, in the format of `projects/{project}/locations/{location}/sampleQuerySets/{sampleQuerySet}`.
    },
    &quot;searchRequest&quot;: { # Request message for SearchService.Search method. # Required. The search request that is used to perform the evaluation. Only the following fields within SearchRequest are supported; if any other fields are provided, an UNSUPPORTED error will be returned: * SearchRequest.serving_config * SearchRequest.branch * SearchRequest.canonical_filter * SearchRequest.query_expansion_spec * SearchRequest.spell_correction_spec * SearchRequest.content_search_spec * SearchRequest.user_pseudo_id
      &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
        &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
          { # Boost applies to documents which match a condition.
            &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
            &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
              &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
              &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                  &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                  &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                },
              ],
              &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
              &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
            },
            &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
          },
        ],
      },
      &quot;branch&quot;: &quot;A String&quot;, # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
      &quot;canonicalFilter&quot;: &quot;A String&quot;, # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
      &quot;contentSearchSpec&quot;: { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
        &quot;chunkSpec&quot;: { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
          &quot;numNextChunks&quot;: 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
          &quot;numPreviousChunks&quot;: 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
        },
        &quot;extractiveContentSpec&quot;: { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
          &quot;maxExtractiveAnswerCount&quot;: 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
          &quot;maxExtractiveSegmentCount&quot;: 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
          &quot;numNextSegments&quot;: 42, # Return at most `num_next_segments` segments after each selected segments.
          &quot;numPreviousSegments&quot;: 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
          &quot;returnExtractiveSegmentScore&quot;: True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
        },
        &quot;searchResultMode&quot;: &quot;A String&quot;, # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
        &quot;snippetSpec&quot;: { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
          &quot;maxSnippetCount&quot;: 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count &gt; 0.
          &quot;referenceOnly&quot;: True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
          &quot;returnSnippet&quot;: True or False, # If `true`, then return snippet. If no snippet can be generated, we return &quot;No snippet is available for this page.&quot; A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
        },
        &quot;summarySpec&quot;: { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
          &quot;ignoreAdversarialQuery&quot;: True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
          &quot;ignoreJailBreakingQuery&quot;: True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. &quot;Reply in the tone of a competing company&#x27;s CEO&quot;. If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
          &quot;ignoreLowRelevantContent&quot;: True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
          &quot;ignoreNonSummarySeekingQuery&quot;: True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
          &quot;includeCitations&quot;: True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud&#x27;s fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
          &quot;languageCode&quot;: &quot;A String&quot;, # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
          &quot;modelPromptSpec&quot;: { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
            &quot;preamble&quot;: &quot;A String&quot;, # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
          },
          &quot;modelSpec&quot;: { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
            &quot;version&quot;: &quot;A String&quot;, # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
          },
          &quot;multimodalSpec&quot;: { # Multimodal specification: Will return an image from specified source. If multiple sources are specified, the pick is a quality based decision. # Optional. Multimodal specification.
            &quot;imageSource&quot;: &quot;A String&quot;, # Optional. Source of image returned in the answer.
          },
          &quot;summaryResultCount&quot;: 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
          &quot;useSemanticChunks&quot;: True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
        },
      },
      &quot;dataStoreSpecs&quot;: [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. For engines with a single data store, the specs directly under SearchRequest should be used.
        { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
          &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Optional. Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
            &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
              { # Boost applies to documents which match a condition.
                &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
                &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
                  &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
                  &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                    { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                      &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                      &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                    },
                  ],
                  &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
                  &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
                },
                &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
              },
            ],
          },
          &quot;customSearchOperators&quot;: &quot;A String&quot;, # Optional. Custom search operators which if specified will be used to filter results from workspace data stores. For more information on custom search operators, see [SearchOperators](https://support.google.com/cloudsearch/answer/6172299).
          &quot;dataStore&quot;: &quot;A String&quot;, # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. The path must include the project number, project id is not supported for this field.
          &quot;filter&quot;: &quot;A String&quot;, # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
        },
      ],
      &quot;displaySpec&quot;: { # Specifies features for display, like match highlighting. # Optional. Config for display feature, like match highlighting on search results.
        &quot;matchHighlightingCondition&quot;: &quot;A String&quot;, # The condition under which match highlighting should occur.
      },
      &quot;embeddingSpec&quot;: { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
        &quot;embeddingVectors&quot;: [ # The embedding vector used for retrieval. Limit to 1.
          { # Embedding vector.
            &quot;fieldPath&quot;: &quot;A String&quot;, # Embedding field path in schema.
            &quot;vector&quot;: [ # Query embedding vector.
              3.14,
            ],
          },
        ],
      },
      &quot;facetSpecs&quot;: [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
        { # A facet specification to perform faceted search.
          &quot;enableDynamicPosition&quot;: True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it&#x27;s not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * &quot;rating&quot;, enable_dynamic_position = true * &quot;price&quot;, enable_dynamic_position = false * &quot;brands&quot;, enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be (&quot;price&quot;, &quot;brands&quot;, &quot;rating&quot;, &quot;gender&quot;) or (&quot;price&quot;, &quot;brands&quot;, &quot;gender&quot;, &quot;rating&quot;) depends on how API orders &quot;gender&quot; and &quot;rating&quot; facets. However, notice that &quot;price&quot; and &quot;brands&quot; are always ranked at first and second position because their enable_dynamic_position is false.
          &quot;excludedFilterKeys&quot;: [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet &quot;Red&quot; and 200 documents with the color facet &quot;Blue&quot;. A query containing the filter &quot;color:ANY(&quot;Red&quot;)&quot; and having &quot;color&quot; as FacetKey.key would by default return only &quot;Red&quot; documents in the search results, and also return &quot;Red&quot; with count 100 as the only color facet. Although there are also blue documents available, &quot;Blue&quot; would not be shown as an available facet value. If &quot;color&quot; is listed in &quot;excludedFilterKeys&quot;, then the query returns the facet values &quot;Red&quot; with count 100 and &quot;Blue&quot; with count 200, because the &quot;color&quot; key is now excluded from the filter. Because this field doesn&#x27;t affect search results, the search results are still correctly filtered to return only &quot;Red&quot; documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
            &quot;A String&quot;,
          ],
          &quot;facetKey&quot;: { # Specifies how a facet is computed. # Required. The facet key specification.
            &quot;caseInsensitive&quot;: True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
            &quot;contains&quot;: [ # Only get facet values that contain the given strings. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;contains&quot; to &quot;2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Sci-Fi &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
            &quot;intervals&quot;: [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
              { # A floating point interval.
                &quot;exclusiveMaximum&quot;: 3.14, # Exclusive upper bound.
                &quot;exclusiveMinimum&quot;: 3.14, # Exclusive lower bound.
                &quot;maximum&quot;: 3.14, # Inclusive upper bound.
                &quot;minimum&quot;: 3.14, # Inclusive lower bound.
              },
            ],
            &quot;key&quot;: &quot;A String&quot;, # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
            &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Allowed values are: * &quot;count desc&quot;, which means order by SearchResponse.Facet.values.count descending. * &quot;value desc&quot;, which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
            &quot;prefixes&quot;: [ # Only get facet values that start with the given string prefix. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;prefixes&quot; to &quot;Action&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Action &gt; 2021&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
            &quot;restrictedValues&quot;: [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;restricted_values&quot; to &quot;Action &gt; 2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
              &quot;A String&quot;,
            ],
          },
          &quot;limit&quot;: 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is &quot;healthcare_aggregation_key&quot;, the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
        },
      ],
      &quot;filter&quot;: &quot;A String&quot;, # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field &#x27;name&#x27; in their schema. In this case the filter would look like this: filter --&gt; name:&#x27;ANY(&quot;king kong&quot;)&#x27; For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
      &quot;imageQuery&quot;: { # Specifies the image query input. # Raw image query.
        &quot;imageBytes&quot;: &quot;A String&quot;, # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
      },
      &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn&#x27;t specified, the query language code is automatically detected, which may not be accurate.
      &quot;naturalLanguageQueryUnderstandingSpec&quot;: { # Specification to enable natural language understanding capabilities for search requests. # Config for natural language query understanding capabilities, such as extracting structured field filters from the query. Refer to [this documentation](https://cloud.google.com/generative-ai-app-builder/docs/natural-language-queries) for more information. If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
        &quot;extractedFilterBehavior&quot;: &quot;A String&quot;, # Optional. Controls behavior of how extracted filters are applied to the search. The default behavior depends on the request. For single datastore structured search, the default is `HARD_FILTER`. For multi-datastore search, the default behavior is `SOFT_BOOST`. Location-based filters are always applied as hard filters, and the `SOFT_BOOST` setting will not affect them. This field is only used if SearchRequest.natural_language_query_understanding_spec.filter_extraction_condition is set to FilterExtractionCondition.ENABLED.
        &quot;filterExtractionCondition&quot;: &quot;A String&quot;, # The condition under which filter extraction should occur. Server behavior defaults to `DISABLED`.
        &quot;geoSearchQueryDetectionFieldNames&quot;: [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
          &quot;A String&quot;,
        ],
      },
      &quot;offset&quot;: 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
      &quot;oneBoxPageSize&quot;: 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
      &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
      &quot;pageSize&quot;: 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
      &quot;pageToken&quot;: &quot;A String&quot;, # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
      &quot;params&quot;: { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: &quot;au&quot;` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
        &quot;a_key&quot;: &quot;&quot;,
      },
      &quot;personalizationSpec&quot;: { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
        &quot;mode&quot;: &quot;A String&quot;, # The personalization mode of the search request. Defaults to Mode.AUTO.
      },
      &quot;query&quot;: &quot;A String&quot;, # Raw search query.
      &quot;queryExpansionSpec&quot;: { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
        &quot;condition&quot;: &quot;A String&quot;, # The condition under which query expansion should occur. Default to Condition.DISABLED.
        &quot;pinUnexpandedResults&quot;: True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
      },
      &quot;rankingExpression&quot;: &quot;A String&quot;, # Optional. The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The syntax and supported features depend on the `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by &quot;+&quot;. * ranking_expression = function, { &quot; + &quot;, function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and combinations of those chained using + or * operators) are supported: * `double` * `signal` * `log(signal)` * `exp(signal)` * `rr(signal, double &gt; 0)` -- reciprocal rank transformation with second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN, 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 | double, else returns signal1. Here are a few examples of ranking formulas that use the supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score` with slight `semantic_smilarity_score` adjustment. - `0.2 * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank by the exponent of `semantic_similarity_score` filling the value with 0 if it&#x27;s NaN, also add constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`: semantic similarity adjustment that is calculated using the embeddings generated by a proprietary Google model. This score determines how semantically similar a search query is to a document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25) ranking function. This score is calculated using a probabilistic model to estimate the probability that a document is relevant to a given query. * `relevance_score`: semantic relevance adjustment that uses a proprietary Google model to determine the meaning and intent behind a user&#x27;s query in context with the content in the documents. * `pctr_rank`: predicted conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the relevance and attractiveness of a search result from a user&#x27;s perspective. A higher pCTR suggests that the result is more likely to satisfy the user&#x27;s query and intent, making it a valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank * `document_age`: The time in hours elapsed since the document was last updated, a floating-point number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank. Uses proprietary Google model to determine the keyword-based overlap between the query and the document. * `base_rank`: the default rank of the result
      &quot;rankingExpressionBackend&quot;: &quot;A String&quot;, # Optional. The backend to use for the ranking expression evaluation.
      &quot;regionCode&quot;: &quot;A String&quot;, # The Unicode country/region code (CLDR) of a location, such as &quot;US&quot; and &quot;419&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
      &quot;relevanceScoreSpec&quot;: { # The specification for returning the document relevance score. # Optional. The specification for returning the relevance score.
        &quot;returnRelevanceScore&quot;: True or False, # Optional. Whether to return the relevance score for search results. The higher the score, the more relevant the document is to the query.
      },
      &quot;relevanceThreshold&quot;: &quot;A String&quot;, # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information. This feature is not supported for healthcare search.
      &quot;safeSearch&quot;: True or False, # Whether to turn on safe search. This is only supported for website search.
      &quot;searchAsYouTypeSpec&quot;: { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
        &quot;condition&quot;: &quot;A String&quot;, # The condition under which search as you type should occur. Default to Condition.DISABLED.
      },
      &quot;servingConfig&quot;: &quot;A String&quot;, # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
      &quot;session&quot;: &quot;A String&quot;, # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is &quot;How did Alphabet do in 2022?&quot; and the current query is &quot;How about 2023?&quot;, the current query will be interpreted as &quot;How did Alphabet do in 2023?&quot;. Example #2 (coordination between /search API calls and /answer API calls): Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
      &quot;sessionSpec&quot;: { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
        &quot;queryId&quot;: &quot;A String&quot;, # If set, the search result gets stored to the &quot;turn&quot; specified by this query ID. Example: Let&#x27;s say the session looks like this: session { name: &quot;.../sessions/xxx&quot; turns { query { text: &quot;What is foo?&quot; query_id: &quot;.../questions/yyy&quot; } answer: &quot;Foo is ...&quot; } turns { query { text: &quot;How about bar then?&quot; query_id: &quot;.../questions/zzz&quot; } } } The user can call /search API with a request like this: session: &quot;.../sessions/xxx&quot; session_spec { query_id: &quot;.../questions/zzz&quot; } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID &amp; query ID.
        &quot;searchResultPersistenceCount&quot;: 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is similar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
      },
      &quot;spellCorrectionSpec&quot;: { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
        &quot;mode&quot;: &quot;A String&quot;, # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
      },
      &quot;userInfo&quot;: { # Information of an end user. # Information about the end user. Highly recommended for analytics and personalization. UserInfo.user_agent is used to deduce `device_type` for analytics.
        &quot;timeZone&quot;: &quot;A String&quot;, # Optional. IANA time zone, e.g. Europe/Budapest.
        &quot;userAgent&quot;: &quot;A String&quot;, # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
        &quot;userId&quot;: &quot;A String&quot;, # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don&#x27;t set for anonymous users. Always use a hashed value for this ID. Don&#x27;t set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
      },
      &quot;userLabels&quot;: { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
        &quot;a_key&quot;: &quot;A String&quot;,
      },
      &quot;userPseudoId&quot;: &quot;A String&quot;, # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
    },
  },
  &quot;name&quot;: &quot;A String&quot;, # Identifier. The full resource name of the Evaluation, in the format of `projects/{project}/locations/{location}/evaluations/{evaluation}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
  &quot;qualityMetrics&quot;: { # Describes the metrics produced by the evaluation. # Output only. The metrics produced by the evaluation, averaged across all SampleQuerys in the SampleQuerySet. Only populated when the evaluation&#x27;s state is SUCCEEDED.
    &quot;docNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per document, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved documents (D1, D2, D3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [D3 (0), D1 (1), D2 (1)] Ideal: [D1 (1), D2 (1), D3 (0)] Calculate NDCG@3 for each SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;docPrecision&quot;: { # Stores the metric values at specific top-k levels. # Precision per document, at various top-k cutoff levels. Precision is the fraction of retrieved documents that are relevant. Example (top-5): * For a single SampleQuery, If 4 out of 5 retrieved documents in the top-5 are relevant, precision@5 = 4/5 = 0.8
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;docRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per document, at various top-k cutoff levels. Recall is the fraction of relevant documents retrieved out of all relevant documents. Example (top-5): * For a single SampleQuery, If 3 out of 5 relevant documents are retrieved in the top-5, recall@5 = 3/5 = 0.6
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;pageNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per page, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved pages (P1, P2, P3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [P3 (0), P1 (1), P2 (1)] Ideal: [P1 (1), P2 (1), P3 (0)] Calculate NDCG@3 for SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
    &quot;pageRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per page, at various top-k cutoff levels. Recall is the fraction of relevant pages retrieved out of all relevant pages. Example (top-5): * For a single SampleQuery, if 3 out of 5 relevant pages are retrieved in the top-5, recall@5 = 3/5 = 0.6
      &quot;top1&quot;: 3.14, # The top-1 value.
      &quot;top10&quot;: 3.14, # The top-10 value.
      &quot;top3&quot;: 3.14, # The top-3 value.
      &quot;top5&quot;: 3.14, # The top-5 value.
    },
  },
  &quot;state&quot;: &quot;A String&quot;, # Output only. The state of the evaluation.
}</pre>
</div>

<div class="method">
    <code class="details" id="list">list(parent, pageSize=None, pageToken=None, x__xgafv=None)</code>
  <pre>Gets a list of Evaluations.

Args:
  parent: string, Required. The parent location resource name, such as `projects/{project}/locations/{location}`. If the caller does not have permission to list Evaluations under this location, regardless of whether or not this location exists, a `PERMISSION_DENIED` error is returned. (required)
  pageSize: integer, Optional. Maximum number of Evaluations to return. If unspecified, defaults to 100. The maximum allowed value is 1000. Values above 1000 will be coerced to 1000. If this field is negative, an `INVALID_ARGUMENT` error is returned.
  pageToken: string, Optional. A page token ListEvaluationsResponse.next_page_token, received from a previous EvaluationService.ListEvaluations call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to EvaluationService.ListEvaluations must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # Response message for EvaluationService.ListEvaluations method.
  &quot;evaluations&quot;: [ # The Evaluations.
    { # An evaluation is a single execution (or run) of an evaluation process. It encapsulates the state of the evaluation and the resulting data.
      &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was created at.
      &quot;endTime&quot;: &quot;A String&quot;, # Output only. Timestamp the Evaluation was completed at.
      &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. The error that occurred during evaluation. Only populated when the evaluation&#x27;s state is FAILED.
        &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
        &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
          {
            &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
          },
        ],
        &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
      },
      &quot;errorSamples&quot;: [ # Output only. A sample of errors encountered while processing the request.
        { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
          &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
          &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
            {
              &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
            },
          ],
          &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
        },
      ],
      &quot;evaluationSpec&quot;: { # Describes the specification of the evaluation. # Required. The specification of the evaluation.
        &quot;querySetSpec&quot;: { # Describes the specification of the query set. # Optional. The specification of the query set.
          &quot;sampleQuerySet&quot;: &quot;A String&quot;, # Optional. The full resource name of the SampleQuerySet used for the evaluation, in the format of `projects/{project}/locations/{location}/sampleQuerySets/{sampleQuerySet}`.
        },
        &quot;searchRequest&quot;: { # Request message for SearchService.Search method. # Required. The search request that is used to perform the evaluation. Only the following fields within SearchRequest are supported; if any other fields are provided, an UNSUPPORTED error will be returned: * SearchRequest.serving_config * SearchRequest.branch * SearchRequest.canonical_filter * SearchRequest.query_expansion_spec * SearchRequest.spell_correction_spec * SearchRequest.content_search_spec * SearchRequest.user_pseudo_id
          &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
            &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
              { # Boost applies to documents which match a condition.
                &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
                &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
                  &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
                  &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                    { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                      &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                      &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                    },
                  ],
                  &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
                  &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
                },
                &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
              },
            ],
          },
          &quot;branch&quot;: &quot;A String&quot;, # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch.
          &quot;canonicalFilter&quot;: &quot;A String&quot;, # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter.
          &quot;contentSearchSpec&quot;: { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search.
            &quot;chunkSpec&quot;: { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS
              &quot;numNextChunks&quot;: 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.
              &quot;numPreviousChunks&quot;: 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.
            },
            &quot;extractiveContentSpec&quot;: { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response.
              &quot;maxExtractiveAnswerCount&quot;: 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult.
              &quot;maxExtractiveSegmentCount&quot;: 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`.
              &quot;numNextSegments&quot;: 42, # Return at most `num_next_segments` segments after each selected segments.
              &quot;numPreviousSegments&quot;: 42, # Specifies whether to also include the adjacent from each selected segments. Return at most `num_previous_segments` segments before each selected segments.
              &quot;returnExtractiveSegmentScore&quot;: True or False, # Specifies whether to return the confidence score from the extractive segments in each search result. This feature is available only for new or allowlisted data stores. To allowlist your data store, contact your Customer Engineer. The default value is `false`.
            },
            &quot;searchResultMode&quot;: &quot;A String&quot;, # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`.
            &quot;snippetSpec&quot;: { # A specification for configuring snippets in a search response. # If `snippetSpec` is not specified, snippets are not included in the search response.
              &quot;maxSnippetCount&quot;: 42, # [DEPRECATED] This field is deprecated. To control snippet return, use `return_snippet` field. For backwards compatibility, we will return snippet if max_snippet_count &gt; 0.
              &quot;referenceOnly&quot;: True or False, # [DEPRECATED] This field is deprecated and will have no affect on the snippet.
              &quot;returnSnippet&quot;: True or False, # If `true`, then return snippet. If no snippet can be generated, we return &quot;No snippet is available for this page.&quot; A `snippet_status` with `SUCCESS` or `NO_SNIPPET_AVAILABLE` will also be returned.
            },
            &quot;summarySpec&quot;: { # A specification for configuring a summary returned in a search response. # If `summarySpec` is not specified, summaries are not included in the search response.
              &quot;ignoreAdversarialQuery&quot;: True or False, # Specifies whether to filter out adversarial queries. The default value is `false`. Google employs search-query classification to detect adversarial queries. No summary is returned if the search query is classified as an adversarial query. For example, a user might ask a question regarding negative comments about the company or submit a query designed to generate unsafe, policy-violating output. If this field is set to `true`, we skip generating summaries for adversarial queries and return fallback messages instead.
              &quot;ignoreJailBreakingQuery&quot;: True or False, # Optional. Specifies whether to filter out jail-breaking queries. The default value is `false`. Google employs search-query classification to detect jail-breaking queries. No summary is returned if the search query is classified as a jail-breaking query. A user might add instructions to the query to change the tone, style, language, content of the answer, or ask the model to act as a different entity, e.g. &quot;Reply in the tone of a competing company&#x27;s CEO&quot;. If this field is set to `true`, we skip generating summaries for jail-breaking queries and return fallback messages instead.
              &quot;ignoreLowRelevantContent&quot;: True or False, # Specifies whether to filter out queries that have low relevance. The default value is `false`. If this field is set to `false`, all search results are used regardless of relevance to generate answers. If set to `true`, only queries with high relevance search results will generate answers.
              &quot;ignoreNonSummarySeekingQuery&quot;: True or False, # Specifies whether to filter out queries that are not summary-seeking. The default value is `false`. Google employs search-query classification to detect summary-seeking queries. No summary is returned if the search query is classified as a non-summary seeking query. For example, `why is the sky blue` and `Who is the best soccer player in the world?` are summary-seeking queries, but `SFO airport` and `world cup 2026` are not. They are most likely navigational queries. If this field is set to `true`, we skip generating summaries for non-summary seeking queries and return fallback messages instead.
              &quot;includeCitations&quot;: True or False, # Specifies whether to include citations in the summary. The default value is `false`. When this field is set to `true`, summaries include in-line citation numbers. Example summary including citations: BigQuery is Google Cloud&#x27;s fully managed and completely serverless enterprise data warehouse [1]. BigQuery supports all data types, works across clouds, and has built-in machine learning and business intelligence, all within a unified platform [2, 3]. The citation numbers refer to the returned search results and are 1-indexed. For example, [1] means that the sentence is attributed to the first search result. [2, 3] means that the sentence is attributed to both the second and third search results.
              &quot;languageCode&quot;: &quot;A String&quot;, # Language code for Summary. Use language tags defined by [BCP47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Note: This is an experimental feature.
              &quot;modelPromptSpec&quot;: { # Specification of the prompt to use with the model. # If specified, the spec will be used to modify the prompt provided to the LLM.
                &quot;preamble&quot;: &quot;A String&quot;, # Text at the beginning of the prompt that instructs the assistant. Examples are available in the user guide.
              },
              &quot;modelSpec&quot;: { # Specification of the model. # If specified, the spec will be used to modify the model specification provided to the LLM.
                &quot;version&quot;: &quot;A String&quot;, # The model version used to generate the summary. Supported values are: * `stable`: string. Default value when no value is specified. Uses a generally available, fine-tuned model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models). * `preview`: string. (Public preview) Uses a preview model. For more information, see [Answer generation model versions and lifecycle](https://cloud.google.com/generative-ai-app-builder/docs/answer-generation-models).
              },
              &quot;multimodalSpec&quot;: { # Multimodal specification: Will return an image from specified source. If multiple sources are specified, the pick is a quality based decision. # Optional. Multimodal specification.
                &quot;imageSource&quot;: &quot;A String&quot;, # Optional. Source of image returned in the answer.
              },
              &quot;summaryResultCount&quot;: 42, # The number of top results to generate the summary from. If the number of results returned is less than `summaryResultCount`, the summary is generated from all of the results. At most 10 results for documents mode, or 50 for chunks mode, can be used to generate a summary. The chunks mode is used when SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS.
              &quot;useSemanticChunks&quot;: True or False, # If true, answer will be generated from most relevant chunks from top search results. This feature will improve summary quality. Note that with this feature enabled, not all top search results will be referenced and included in the reference list, so the citation source index only points to the search results listed in the reference list.
            },
          },
          &quot;dataStoreSpecs&quot;: [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. For engines with a single data store, the specs directly under SearchRequest should be used.
            { # A struct to define data stores to filter on in a search call and configurations for those data stores. Otherwise, an `INVALID_ARGUMENT` error is returned.
              &quot;boostSpec&quot;: { # Boost specification to boost certain documents. # Optional. Boost specification to boost certain documents. For more information on boosting, see [Boosting](https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results)
                &quot;conditionBoostSpecs&quot;: [ # Condition boost specifications. If a document matches multiple conditions in the specifications, boost scores from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20.
                  { # Boost applies to documents which match a condition.
                    &quot;boost&quot;: 3.14, # Strength of the condition boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the document a big promotion. However, it does not necessarily mean that the boosted document will be the top result at all times, nor that other documents will be excluded. Results could still be shown even when none of them matches the condition. And results that are significantly more relevant to the search query can still trump your heavily favored but irrelevant documents. Setting to -1.0 gives the document a big demotion. However, results that are deeply relevant might still be shown. The document will have an upstream battle to get a fairly high ranking, but it is not blocked out completely. Setting to 0.0 means no boost applied. The boosting condition is ignored. Only one of the (condition, boost) combination or the boost_control_spec below are set. If both are set then the global boost is ignored and the more fine-grained boost_control_spec is applied.
                    &quot;boostControlSpec&quot;: { # Specification for custom ranking based on customer specified attribute value. It provides more controls for customized ranking than the simple (condition, boost) combination above. # Complex specification for custom ranking based on customer defined attribute value.
                      &quot;attributeType&quot;: &quot;A String&quot;, # The attribute type to be used to determine the boost amount. The attribute value can be derived from the field value of the specified field_name. In the case of numerical it is straightforward i.e. attribute_value = numerical_field_value. In the case of freshness however, attribute_value = (time.now() - datetime_field_value).
                      &quot;controlPoints&quot;: [ # The control points used to define the curve. The monotonic function (defined through the interpolation_type above) passes through the control points listed here.
                        { # The control points used to define the curve. The curve defined through these control points can only be monotonically increasing or decreasing(constant values are acceptable).
                          &quot;attributeValue&quot;: &quot;A String&quot;, # Can be one of: 1. The numerical field value. 2. The duration spec for freshness: The value must be formatted as an XSD `dayTimeDuration` value (a restricted subset of an ISO 8601 duration value). The pattern for this is: `nDnM]`.
                          &quot;boostAmount&quot;: 3.14, # The value between -1 to 1 by which to boost the score if the attribute_value evaluates to the value specified above.
                        },
                      ],
                      &quot;fieldName&quot;: &quot;A String&quot;, # The name of the field whose value will be used to determine the boost amount.
                      &quot;interpolationType&quot;: &quot;A String&quot;, # The interpolation type to be applied to connect the control points listed below.
                    },
                    &quot;condition&quot;: &quot;A String&quot;, # An expression which specifies a boost condition. The syntax and supported fields are the same as a filter expression. See SearchRequest.filter for detail syntax and limitations. Examples: * To boost documents with document ID &quot;doc_1&quot; or &quot;doc_2&quot;, and color &quot;Red&quot; or &quot;Blue&quot;: `(document_id: ANY(&quot;doc_1&quot;, &quot;doc_2&quot;)) AND (color: ANY(&quot;Red&quot;, &quot;Blue&quot;))`
                  },
                ],
              },
              &quot;customSearchOperators&quot;: &quot;A String&quot;, # Optional. Custom search operators which if specified will be used to filter results from workspace data stores. For more information on custom search operators, see [SearchOperators](https://support.google.com/cloudsearch/answer/6172299).
              &quot;dataStore&quot;: &quot;A String&quot;, # Required. Full resource name of DataStore, such as `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. The path must include the project number, project id is not supported for this field.
              &quot;filter&quot;: &quot;A String&quot;, # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
            },
          ],
          &quot;displaySpec&quot;: { # Specifies features for display, like match highlighting. # Optional. Config for display feature, like match highlighting on search results.
            &quot;matchHighlightingCondition&quot;: &quot;A String&quot;, # The condition under which match highlighting should occur.
          },
          &quot;embeddingSpec&quot;: { # The specification that uses customized query embedding vector to do semantic document retrieval. # Uses the provided embedding to do additional semantic document retrieval. The retrieval is based on the dot product of SearchRequest.EmbeddingSpec.EmbeddingVector.vector and the document embedding that is provided in SearchRequest.EmbeddingSpec.EmbeddingVector.field_path. If SearchRequest.EmbeddingSpec.EmbeddingVector.field_path is not provided, it will use ServingConfig.EmbeddingConfig.field_path.
            &quot;embeddingVectors&quot;: [ # The embedding vector used for retrieval. Limit to 1.
              { # Embedding vector.
                &quot;fieldPath&quot;: &quot;A String&quot;, # Embedding field path in schema.
                &quot;vector&quot;: [ # Query embedding vector.
                  3.14,
                ],
              },
            ],
          },
          &quot;facetSpecs&quot;: [ # Facet specifications for faceted search. If empty, no facets are returned. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
            { # A facet specification to perform faceted search.
              &quot;enableDynamicPosition&quot;: True or False, # Enables dynamic position for this facet. If set to true, the position of this facet among all facets in the response is determined automatically. If dynamic facets are enabled, it is ordered together. If set to false, the position of this facet in the response is the same as in the request, and it is ranked before the facets with dynamic position enable and all dynamic facets. For example, you may always want to have rating facet returned in the response, but it&#x27;s not necessarily to always display the rating facet at the top. In that case, you can set enable_dynamic_position to true so that the position of rating facet in response is determined automatically. Another example, assuming you have the following facets in the request: * &quot;rating&quot;, enable_dynamic_position = true * &quot;price&quot;, enable_dynamic_position = false * &quot;brands&quot;, enable_dynamic_position = false And also you have a dynamic facets enabled, which generates a facet `gender`. Then the final order of the facets in the response can be (&quot;price&quot;, &quot;brands&quot;, &quot;rating&quot;, &quot;gender&quot;) or (&quot;price&quot;, &quot;brands&quot;, &quot;gender&quot;, &quot;rating&quot;) depends on how API orders &quot;gender&quot; and &quot;rating&quot; facets. However, notice that &quot;price&quot; and &quot;brands&quot; are always ranked at first and second position because their enable_dynamic_position is false.
              &quot;excludedFilterKeys&quot;: [ # List of keys to exclude when faceting. By default, FacetKey.key is not excluded from the filter unless it is listed in this field. Listing a facet key in this field allows its values to appear as facet results, even when they are filtered out of search results. Using this field does not affect what search results are returned. For example, suppose there are 100 documents with the color facet &quot;Red&quot; and 200 documents with the color facet &quot;Blue&quot;. A query containing the filter &quot;color:ANY(&quot;Red&quot;)&quot; and having &quot;color&quot; as FacetKey.key would by default return only &quot;Red&quot; documents in the search results, and also return &quot;Red&quot; with count 100 as the only color facet. Although there are also blue documents available, &quot;Blue&quot; would not be shown as an available facet value. If &quot;color&quot; is listed in &quot;excludedFilterKeys&quot;, then the query returns the facet values &quot;Red&quot; with count 100 and &quot;Blue&quot; with count 200, because the &quot;color&quot; key is now excluded from the filter. Because this field doesn&#x27;t affect search results, the search results are still correctly filtered to return only &quot;Red&quot; documents. A maximum of 100 values are allowed. Otherwise, an `INVALID_ARGUMENT` error is returned.
                &quot;A String&quot;,
              ],
              &quot;facetKey&quot;: { # Specifies how a facet is computed. # Required. The facet key specification.
                &quot;caseInsensitive&quot;: True or False, # True to make facet keys case insensitive when getting faceting values with prefixes or contains; false otherwise.
                &quot;contains&quot;: [ # Only get facet values that contain the given strings. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;contains&quot; to &quot;2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Sci-Fi &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
                  &quot;A String&quot;,
                ],
                &quot;intervals&quot;: [ # Set only if values should be bucketed into intervals. Must be set for facets with numerical values. Must not be set for facet with text values. Maximum number of intervals is 30.
                  { # A floating point interval.
                    &quot;exclusiveMaximum&quot;: 3.14, # Exclusive upper bound.
                    &quot;exclusiveMinimum&quot;: 3.14, # Exclusive lower bound.
                    &quot;maximum&quot;: 3.14, # Inclusive upper bound.
                    &quot;minimum&quot;: 3.14, # Inclusive lower bound.
                  },
                ],
                &quot;key&quot;: &quot;A String&quot;, # Required. Supported textual and numerical facet keys in Document object, over which the facet values are computed. Facet key is case-sensitive.
                &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Allowed values are: * &quot;count desc&quot;, which means order by SearchResponse.Facet.values.count descending. * &quot;value desc&quot;, which means order by SearchResponse.Facet.values.value descending. Only applies to textual facets. If not set, textual values are sorted in [natural order](https://en.wikipedia.org/wiki/Natural_sort_order); numerical intervals are sorted in the order given by FacetSpec.FacetKey.intervals.
                &quot;prefixes&quot;: [ # Only get facet values that start with the given string prefix. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;prefixes&quot; to &quot;Action&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot; and &quot;Action &gt; 2021&quot;. Only supported on textual fields. Maximum is 10.
                  &quot;A String&quot;,
                ],
                &quot;restrictedValues&quot;: [ # Only get facet for the given restricted values. Only supported on textual fields. For example, suppose &quot;category&quot; has three values &quot;Action &gt; 2022&quot;, &quot;Action &gt; 2021&quot; and &quot;Sci-Fi &gt; 2022&quot;. If set &quot;restricted_values&quot; to &quot;Action &gt; 2022&quot;, the &quot;category&quot; facet only contains &quot;Action &gt; 2022&quot;. Only supported on textual fields. Maximum is 10.
                  &quot;A String&quot;,
                ],
              },
              &quot;limit&quot;: 42, # Maximum facet values that are returned for this facet. If unspecified, defaults to 20. The maximum allowed value is 300. Values above 300 are coerced to 300. For aggregation in healthcare search, when the [FacetKey.key] is &quot;healthcare_aggregation_key&quot;, the limit will be overridden to 10,000 internally, regardless of the value set here. If this field is negative, an `INVALID_ARGUMENT` is returned.
            },
          ],
          &quot;filter&quot;: &quot;A String&quot;, # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field &#x27;name&#x27; in their schema. In this case the filter would look like this: filter --&gt; name:&#x27;ANY(&quot;king kong&quot;)&#x27; For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
          &quot;imageQuery&quot;: { # Specifies the image query input. # Raw image query.
            &quot;imageBytes&quot;: &quot;A String&quot;, # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP.
          },
          &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). This field helps to better interpret the query. If a value isn&#x27;t specified, the query language code is automatically detected, which may not be accurate.
          &quot;naturalLanguageQueryUnderstandingSpec&quot;: { # Specification to enable natural language understanding capabilities for search requests. # Config for natural language query understanding capabilities, such as extracting structured field filters from the query. Refer to [this documentation](https://cloud.google.com/generative-ai-app-builder/docs/natural-language-queries) for more information. If `naturalLanguageQueryUnderstandingSpec` is not specified, no additional natural language query understanding will be done.
            &quot;extractedFilterBehavior&quot;: &quot;A String&quot;, # Optional. Controls behavior of how extracted filters are applied to the search. The default behavior depends on the request. For single datastore structured search, the default is `HARD_FILTER`. For multi-datastore search, the default behavior is `SOFT_BOOST`. Location-based filters are always applied as hard filters, and the `SOFT_BOOST` setting will not affect them. This field is only used if SearchRequest.natural_language_query_understanding_spec.filter_extraction_condition is set to FilterExtractionCondition.ENABLED.
            &quot;filterExtractionCondition&quot;: &quot;A String&quot;, # The condition under which filter extraction should occur. Server behavior defaults to `DISABLED`.
            &quot;geoSearchQueryDetectionFieldNames&quot;: [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names.
              &quot;A String&quot;,
            ],
          },
          &quot;offset&quot;: 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned.
          &quot;oneBoxPageSize&quot;: 42, # The maximum number of results to return for OneBox. This applies to each OneBox type individually. Default number is 10.
          &quot;orderBy&quot;: &quot;A String&quot;, # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering the website search results, see [Order web search results](https://cloud.google.com/generative-ai-app-builder/docs/order-web-search-results). For more information on ordering the healthcare search results, see [Order healthcare search results](https://cloud.google.com/generative-ai-app-builder/docs/order-hc-results). If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.
          &quot;pageSize&quot;: 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned.
          &quot;pageToken&quot;: &quot;A String&quot;, # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
          &quot;params&quot;: { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. For example, `user_country_code: &quot;au&quot;` For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. For example, `search_type: 1`
            &quot;a_key&quot;: &quot;&quot;,
          },
          &quot;personalizationSpec&quot;: { # The specification for personalization. # The specification for personalization. Notice that if both ServingConfig.personalization_spec and SearchRequest.personalization_spec are set, SearchRequest.personalization_spec overrides ServingConfig.personalization_spec.
            &quot;mode&quot;: &quot;A String&quot;, # The personalization mode of the search request. Defaults to Mode.AUTO.
          },
          &quot;query&quot;: &quot;A String&quot;, # Raw search query.
          &quot;queryExpansionSpec&quot;: { # Specification to determine under which conditions query expansion should occur. # The query expansion specification that specifies the conditions under which query expansion occurs.
            &quot;condition&quot;: &quot;A String&quot;, # The condition under which query expansion should occur. Default to Condition.DISABLED.
            &quot;pinUnexpandedResults&quot;: True or False, # Whether to pin unexpanded results. If this field is set to true, unexpanded products are always at the top of the search results, followed by the expanded results.
          },
          &quot;rankingExpression&quot;: &quot;A String&quot;, # Optional. The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The syntax and supported features depend on the `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by &quot;+&quot;. * ranking_expression = function, { &quot; + &quot;, function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: * `relevance_score`: pre-defined keywords, used for measure relevance between query and document. * `embedding_field_path`: the document embedding field used with query embedding vector. * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and combinations of those chained using + or * operators) are supported: * `double` * `signal` * `log(signal)` * `exp(signal)` * `rr(signal, double &gt; 0)` -- reciprocal rank transformation with second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN, 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 | double, else returns signal1. Here are a few examples of ranking formulas that use the supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score` with slight `semantic_smilarity_score` adjustment. - `0.2 * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank by the exponent of `semantic_similarity_score` filling the value with 0 if it&#x27;s NaN, also add constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`: semantic similarity adjustment that is calculated using the embeddings generated by a proprietary Google model. This score determines how semantically similar a search query is to a document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25) ranking function. This score is calculated using a probabilistic model to estimate the probability that a document is relevant to a given query. * `relevance_score`: semantic relevance adjustment that uses a proprietary Google model to determine the meaning and intent behind a user&#x27;s query in context with the content in the documents. * `pctr_rank`: predicted conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the relevance and attractiveness of a search result from a user&#x27;s perspective. A higher pCTR suggests that the result is more likely to satisfy the user&#x27;s query and intent, making it a valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank * `document_age`: The time in hours elapsed since the document was last updated, a floating-point number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank. Uses proprietary Google model to determine the keyword-based overlap between the query and the document. * `base_rank`: the default rank of the result
          &quot;rankingExpressionBackend&quot;: &quot;A String&quot;, # Optional. The backend to use for the ranking expression evaluation.
          &quot;regionCode&quot;: &quot;A String&quot;, # The Unicode country/region code (CLDR) of a location, such as &quot;US&quot; and &quot;419&quot;. For more information, see [Standard fields](https://cloud.google.com/apis/design/standard_fields). If set, then results will be boosted based on the region_code provided.
          &quot;relevanceScoreSpec&quot;: { # The specification for returning the document relevance score. # Optional. The specification for returning the relevance score.
            &quot;returnRelevanceScore&quot;: True or False, # Optional. Whether to return the relevance score for search results. The higher the score, the more relevant the document is to the query.
          },
          &quot;relevanceThreshold&quot;: &quot;A String&quot;, # The relevance threshold of the search results. Default to Google defined threshold, leveraging a balance of precision and recall to deliver both highly accurate results and comprehensive coverage of relevant information. This feature is not supported for healthcare search.
          &quot;safeSearch&quot;: True or False, # Whether to turn on safe search. This is only supported for website search.
          &quot;searchAsYouTypeSpec&quot;: { # Specification for search as you type in search requests. # Search as you type configuration. Only supported for the IndustryVertical.MEDIA vertical.
            &quot;condition&quot;: &quot;A String&quot;, # The condition under which search as you type should occur. Default to Condition.DISABLED.
          },
          &quot;servingConfig&quot;: &quot;A String&quot;, # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search.
          &quot;session&quot;: &quot;A String&quot;, # The session resource name. Optional. Session allows users to do multi-turn /search API calls or coordination between /search API calls and /answer API calls. Example #1 (multi-turn /search API calls): Call /search API with the session ID generated in the first call. Here, the previous search query gets considered in query standing. I.e., if the first query is &quot;How did Alphabet do in 2022?&quot; and the current query is &quot;How about 2023?&quot;, the current query will be interpreted as &quot;How did Alphabet do in 2023?&quot;. Example #2 (coordination between /search API calls and /answer API calls): Call /answer API with the session ID generated in the first call. Here, the answer generation happens in the context of the search results from the first search call. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team.
          &quot;sessionSpec&quot;: { # Session specification. Multi-turn Search feature is currently at private GA stage. Please use v1alpha or v1beta version instead before we launch this feature to public GA. Or ask for allowlisting through Google Support team. # Session specification. Can be used only when `session` is set.
            &quot;queryId&quot;: &quot;A String&quot;, # If set, the search result gets stored to the &quot;turn&quot; specified by this query ID. Example: Let&#x27;s say the session looks like this: session { name: &quot;.../sessions/xxx&quot; turns { query { text: &quot;What is foo?&quot; query_id: &quot;.../questions/yyy&quot; } answer: &quot;Foo is ...&quot; } turns { query { text: &quot;How about bar then?&quot; query_id: &quot;.../questions/zzz&quot; } } } The user can call /search API with a request like this: session: &quot;.../sessions/xxx&quot; session_spec { query_id: &quot;.../questions/zzz&quot; } Then, the API stores the search result, associated with the last turn. The stored search result can be used by a subsequent /answer API call (with the session ID and the query ID specified). Also, it is possible to call /search and /answer in parallel with the same session ID &amp; query ID.
            &quot;searchResultPersistenceCount&quot;: 42, # The number of top search results to persist. The persisted search results can be used for the subsequent /answer api call. This field is similar to the `summary_result_count` field in SearchRequest.ContentSearchSpec.SummarySpec.summary_result_count. At most 10 results for documents mode, or 50 for chunks mode.
          },
          &quot;spellCorrectionSpec&quot;: { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect.
            &quot;mode&quot;: &quot;A String&quot;, # The mode under which spell correction replaces the original search query. Defaults to Mode.AUTO.
          },
          &quot;userInfo&quot;: { # Information of an end user. # Information about the end user. Highly recommended for analytics and personalization. UserInfo.user_agent is used to deduce `device_type` for analytics.
            &quot;timeZone&quot;: &quot;A String&quot;, # Optional. IANA time zone, e.g. Europe/Budapest.
            &quot;userAgent&quot;: &quot;A String&quot;, # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an `INVALID_ARGUMENT` error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if UserEvent.direct_user_request is set.
            &quot;userId&quot;: &quot;A String&quot;, # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. Don&#x27;t set for anonymous users. Always use a hashed value for this ID. Don&#x27;t set the field to the same fixed ID for different users. This mixes the event history of those users together, which results in degraded model quality. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
          },
          &quot;userLabels&quot;: { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details.
            &quot;a_key&quot;: &quot;A String&quot;,
          },
          &quot;userPseudoId&quot;: &quot;A String&quot;, # A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor logs in or out of the website. This field should NOT have a fixed value such as `unknown_visitor`. This should be the same identifier as UserEvent.user_pseudo_id and CompleteQueryRequest.user_pseudo_id The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an `INVALID_ARGUMENT` error is returned.
        },
      },
      &quot;name&quot;: &quot;A String&quot;, # Identifier. The full resource name of the Evaluation, in the format of `projects/{project}/locations/{location}/evaluations/{evaluation}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
      &quot;qualityMetrics&quot;: { # Describes the metrics produced by the evaluation. # Output only. The metrics produced by the evaluation, averaged across all SampleQuerys in the SampleQuerySet. Only populated when the evaluation&#x27;s state is SUCCEEDED.
        &quot;docNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per document, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved documents (D1, D2, D3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [D3 (0), D1 (1), D2 (1)] Ideal: [D1 (1), D2 (1), D3 (0)] Calculate NDCG@3 for each SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;docPrecision&quot;: { # Stores the metric values at specific top-k levels. # Precision per document, at various top-k cutoff levels. Precision is the fraction of retrieved documents that are relevant. Example (top-5): * For a single SampleQuery, If 4 out of 5 retrieved documents in the top-5 are relevant, precision@5 = 4/5 = 0.8
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;docRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per document, at various top-k cutoff levels. Recall is the fraction of relevant documents retrieved out of all relevant documents. Example (top-5): * For a single SampleQuery, If 3 out of 5 relevant documents are retrieved in the top-5, recall@5 = 3/5 = 0.6
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;pageNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per page, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved pages (P1, P2, P3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [P3 (0), P1 (1), P2 (1)] Ideal: [P1 (1), P2 (1), P3 (0)] Calculate NDCG@3 for SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;pageRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per page, at various top-k cutoff levels. Recall is the fraction of relevant pages retrieved out of all relevant pages. Example (top-5): * For a single SampleQuery, if 3 out of 5 relevant pages are retrieved in the top-5, recall@5 = 3/5 = 0.6
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
      },
      &quot;state&quot;: &quot;A String&quot;, # Output only. The state of the evaluation.
    },
  ],
  &quot;nextPageToken&quot;: &quot;A String&quot;, # A token that can be sent as ListEvaluationsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
}</pre>
</div>

<div class="method">
    <code class="details" id="listResults">listResults(evaluation, pageSize=None, pageToken=None, x__xgafv=None)</code>
  <pre>Gets a list of results for a given a Evaluation.

Args:
  evaluation: string, Required. The evaluation resource name, such as `projects/{project}/locations/{location}/evaluations/{evaluation}`. If the caller does not have permission to list ListEvaluationResultsResponse.EvaluationResult under this evaluation, regardless of whether or not this evaluation set exists, a `PERMISSION_DENIED` error is returned. (required)
  pageSize: integer, Optional. Maximum number of ListEvaluationResultsResponse.EvaluationResult to return. If unspecified, defaults to 100. The maximum allowed value is 1000. Values above 1000 will be coerced to 1000. If this field is negative, an `INVALID_ARGUMENT` error is returned.
  pageToken: string, Optional. A page token ListEvaluationResultsResponse.next_page_token, received from a previous EvaluationService.ListEvaluationResults call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to EvaluationService.ListEvaluationResults must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned.
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # Response message for EvaluationService.ListEvaluationResults method.
  &quot;evaluationResults&quot;: [ # The evaluation results for the SampleQuerys.
    { # Represents the results of an evaluation for a single SampleQuery.
      &quot;qualityMetrics&quot;: { # Describes the metrics produced by the evaluation. # Output only. The metrics produced by the evaluation, for a given SampleQuery.
        &quot;docNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per document, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved documents (D1, D2, D3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [D3 (0), D1 (1), D2 (1)] Ideal: [D1 (1), D2 (1), D3 (0)] Calculate NDCG@3 for each SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;docPrecision&quot;: { # Stores the metric values at specific top-k levels. # Precision per document, at various top-k cutoff levels. Precision is the fraction of retrieved documents that are relevant. Example (top-5): * For a single SampleQuery, If 4 out of 5 retrieved documents in the top-5 are relevant, precision@5 = 4/5 = 0.8
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;docRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per document, at various top-k cutoff levels. Recall is the fraction of relevant documents retrieved out of all relevant documents. Example (top-5): * For a single SampleQuery, If 3 out of 5 relevant documents are retrieved in the top-5, recall@5 = 3/5 = 0.6
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;pageNdcg&quot;: { # Stores the metric values at specific top-k levels. # Normalized discounted cumulative gain (NDCG) per page, at various top-k cutoff levels. NDCG measures the ranking quality, giving higher relevance to top results. Example (top-3): Suppose SampleQuery with three retrieved pages (P1, P2, P3) and binary relevance judgements (1 for relevant, 0 for not relevant): Retrieved: [P3 (0), P1 (1), P2 (1)] Ideal: [P1 (1), P2 (1), P3 (0)] Calculate NDCG@3 for SampleQuery: * DCG@3: 0/log2(1+1) + 1/log2(2+1) + 1/log2(3+1) = 1.13 * Ideal DCG@3: 1/log2(1+1) + 1/log2(2+1) + 0/log2(3+1) = 1.63 * NDCG@3: 1.13/1.63 = 0.693
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
        &quot;pageRecall&quot;: { # Stores the metric values at specific top-k levels. # Recall per page, at various top-k cutoff levels. Recall is the fraction of relevant pages retrieved out of all relevant pages. Example (top-5): * For a single SampleQuery, if 3 out of 5 relevant pages are retrieved in the top-5, recall@5 = 3/5 = 0.6
          &quot;top1&quot;: 3.14, # The top-1 value.
          &quot;top10&quot;: 3.14, # The top-10 value.
          &quot;top3&quot;: 3.14, # The top-3 value.
          &quot;top5&quot;: 3.14, # The top-5 value.
        },
      },
      &quot;sampleQuery&quot;: { # Sample Query captures metadata to be used for evaluation. # Output only. The SampleQuery that was evaluated.
        &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp the SampleQuery was created at.
        &quot;name&quot;: &quot;A String&quot;, # Identifier. The full resource name of the sample query, in the format of `projects/{project}/locations/{location}/sampleQuerySets/{sample_query_set}/sampleQueries/{sample_query}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.
        &quot;queryEntry&quot;: { # Query Entry captures metadata to be used for search evaluation. # The query entry.
          &quot;query&quot;: &quot;A String&quot;, # Required. The query.
          &quot;targets&quot;: [ # List of targets for the query.
            { # Defines the parameters of the query&#x27;s expected outcome.
              &quot;pageNumbers&quot;: [ # Expected page numbers of the target. Each page number must be non negative.
                42,
              ],
              &quot;score&quot;: 3.14, # Relevance score of the target.
              &quot;uri&quot;: &quot;A String&quot;, # Expected uri of the target. This field must be a UTF-8 encoded string with a length limit of 2048 characters. Example of valid uris: `https://example.com/abc`, `gcs://example/example.pdf`.
            },
          ],
        },
      },
    },
  ],
  &quot;nextPageToken&quot;: &quot;A String&quot;, # A token that can be sent as ListEvaluationResultsRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.
}</pre>
</div>

<div class="method">
    <code class="details" id="listResults_next">listResults_next()</code>
  <pre>Retrieves the next page of results.

        Args:
          previous_request: The request for the previous page. (required)
          previous_response: The response from the request for the previous page. (required)

        Returns:
          A request object that you can call &#x27;execute()&#x27; on to request the next
          page. Returns None if there are no more items in the collection.
        </pre>
</div>

<div class="method">
    <code class="details" id="list_next">list_next()</code>
  <pre>Retrieves the next page of results.

        Args:
          previous_request: The request for the previous page. (required)
          previous_response: The response from the request for the previous page. (required)

        Returns:
          A request object that you can call &#x27;execute()&#x27; on to request the next
          page. Returns None if there are no more items in the collection.
        </pre>
</div>

</body></html>