File: window.h

package info (click to toggle)
mysql-8.0 8.0.45-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,048 kB
  • sloc: cpp: 4,685,434; ansic: 412,712; pascal: 108,396; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; python: 21,816; sh: 17,285; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,083; makefile: 1,793; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (1585 lines) | stat: -rw-r--r-- 53,552 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
/* Copyright (c) 2017, 2025, Oracle and/or its affiliates.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License, version 2.0,
   as published by the Free Software Foundation.

   This program is designed to work with certain software (including
   but not limited to OpenSSL) that is licensed under separate terms,
   as designated in a particular file or component or in included license
   documentation.  The authors of MySQL hereby grant you an additional
   permission to link the program and your derivative works with the
   separately licensed software that they have either included with
   the program or referenced in the documentation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License, version 2.0, for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
*/

#ifndef WINDOWS_INCLUDED
#define WINDOWS_INCLUDED

#include <assert.h>
#include <sys/types.h>
#include <cstring>  // std::memcpy

#include "my_inttypes.h"
#include "sql/enum_query_type.h"
#include "sql/handler.h"
#include "sql/mem_root_array.h"
#include "sql/sql_lex.h"
#include "sql/sql_list.h"
#include "sql/table.h"

/*
  Some Window-related symbols must be known to sql_lex.h which is a frequently
  included header.
  To avoid that any change to window.h causes a recompilation of the whole
  Server, those symbols go into this header:
*/
#include "sql/window_lex.h"

class Cached_item;
class Item;
class Item_func;
class Item_string;
class Item_sum;
class PT_border;
class PT_frame;
class PT_order_list;
class PT_window;
class String;
class THD;
class Temp_table_param;

/**
  Position hints for the frame buffer are saved for these kind of row
  accesses, cf. #Window::m_frame_buffer_positions.
*/
enum class Window_retrieve_cached_row_reason {
  WONT_UPDATE_HINT = -1,  // special value when using restore_special_row
  FIRST_IN_PARTITION = 0,
  CURRENT = 1,
  FIRST_IN_FRAME = 2,
  LAST_IN_FRAME = 3,
  LAST_IN_PEERSET = 4,
  MISC_POSITIONS = 5  // NTH_VALUE, LEAD/LAG have dynamic indexes 5..N
};

/**
  The number of windows is limited to avoid the stack blowing up
  when constructing iterators recursively. There used to be no limit, but
  it would be unsafe since QEP_shared::m_idx of tmp tables assigned for windows
  would exceed the old range of its type plan_idx (int8). It
  has now been widened, so the max number of windows could be increased
  if need be, modulo other problems. We could also make it a system variable.
*/
constexpr int kMaxWindows = 127;

/**
  Represents the (explicit) window of a SQL 2003 section 7.11 \<window clause\>,
  or the implicit (inlined) window of a window function call, or a reference to
  a named window in a window function call (instead of the inlined definition)
  before resolution.  After resolving referencing instances become unused,
  having been replaced with the window resolved to in the w.f. call.

  @verbatim
  Cf. 7.11 <window definition> and <existing window name>
      6.10 <window name or specification> and
           <in-line window specification>
      5.4 <window name>
  @endverbatim

  See also PT_window (which wraps Window as a parse_tree_node), and the related
  classes PT_frame, PT_border and PT_exclusion in parse_tree_nodes.

  Currently includes both prepared query and execution state information.
  The latter is marked as such for ease of separation later.
*/
class Window {
  /*------------------------------------------------------------------------
   *
   * Variables stable during execution
   *
   *------------------------------------------------------------------------*/
 protected:
  Query_block *m_query_block;           ///< The SELECT the window is on
  PT_order_list *const m_partition_by;  ///< \<window partition clause\>
  PT_order_list *const m_order_by;      ///< \<window order clause\>
  ORDER *m_sorting_order;               ///< merged partition/order by
  PT_frame *const m_frame;              ///< \<window frame clause\>
  Item_string *m_name;                  ///< \<window name\>
  /**
    Position of definition in query's text, 1 for leftmost. References don't
    count. Thus, anonymous windows in SELECT list, then windows of WINDOW
    clause, then anonymous windows in ORDER BY.
  */
  uint m_def_pos;
  Item_string *const m_inherit_from;  ///< \<existing window name\>
  /**
    If true, m_name is an unbound window reference, other fields
    are unused.
  */
  const bool m_is_reference;

  /**
    (At least) one window function needs to buffer frame rows for evaluation
    i.e. it cannot be evaluated on the fly just from previous rows seen
  */
  bool m_needs_frame_buffering;

  /**
    (At least) one window function needs the peer set of the current row to
    evaluate the wf for the current row
  */
  bool m_needs_peerset;

  /**
    (At least) one window function (currently JSON_OBJECTAGG) needs the
    last peer for the current row to evaluate the wf for the current row.
    (This is used only during inversion/optimization)
  */
  bool m_needs_last_peer_in_frame;

  /**
    (At least) one window function needs the cardinality of the partition of
    the current row to evaluate the wf for the current row
  */
  bool m_needs_partition_cardinality;

  /**
    The functions are optimizable with ROW unit. For example SUM is, MAX is
    not always optimizable. Optimized means we can use the optimized evaluation
    path in process_buffered_windowing_record which uses inversion to avoid
    revisiting all frame rows for every row being evaluated.
  */
  bool m_row_optimizable;

  /**
    The functions are optimizable with RANGE unit. For example SUM is, MAX is
    not always optimizable. Optimized means we can use the optimized evaluation
    path in process_buffered_windowing_record which uses inversion to avoid
    revisiting all frame rows for every row being evaluated.
  */
  bool m_range_optimizable;

  /**
    The aggregates (SUM, etc) can be evaluated once for a partition, since it
    is static, i.e. all rows will have the same value for the aggregates, e.g.
    ROWS/RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING.
  */
  bool m_static_aggregates;

  /**
    Window equires re-evaluation of the first row in optimized moving frame mode
    e.g. FIRST_VALUE.
  */
  bool m_opt_first_row;

  /**
    Window requires re-evaluation of the last row in optimized moving frame mode
    e.g. LAST_VALUE.
  */
  bool m_opt_last_row;

  /**
    The last window to be evaluated at execution time.
  */
  bool m_last;

 public:
  struct st_offset {
    int64 m_rowno;
    bool m_from_last;
    st_offset() : m_rowno(0), m_from_last(false) {}
    /**
      Used for sorting offsets in ascending order for faster traversal of
      frame buffer tmp file
    */
    bool operator<(const st_offset &a) const { return m_rowno < a.m_rowno; }
  };

  struct st_ll_offset {
    int64 m_rowno;  ///< negative values is LEAD
    st_ll_offset() : m_rowno(INT_MIN64 /* uninitialized marker*/) {}
    /**
      Used for sorting offsets in ascending order for faster traversal of
      frame buffer tmp file
    */
    bool operator<(const st_ll_offset &a) const { return m_rowno < a.m_rowno; }
  };

  struct st_nth {
    Mem_root_array_YY<st_offset>
        m_offsets;  ///< sorted set of NTH_VALUE offsets
  };

  struct st_lead_lag {
    Mem_root_array_YY<st_ll_offset>
        m_offsets;  ///< sorted set of LEAD/LAG offsets
  };

 protected:
  /**
    Window requires re-evaluation of the Nth row in optimized moving frame mode
    e.g. NTH_VALUE.
  */
  st_nth m_opt_nth_row;
  st_lead_lag m_opt_lead_lag;

 protected:
  const Window *m_ancestor;    ///< resolved from existing window name
  List<Item_sum> m_functions;  ///< window functions based on 'this'
  Mem_root_array<Cached_item *>
      m_partition_items;  ///< items for the PARTITION BY columns
  Mem_root_array<Cached_item *>
      m_order_by_items;  ///< items for the ORDER BY exprs.

  /*------------------------------------------------------------------------
   *
   * Execution state variables
   *
   *------------------------------------------------------------------------*/
 public:
  /**
    Cardinality of m_frame_buffer_positions if no NTH_VALUE, LEAD/LAG
  */
  static constexpr int FRAME_BUFFER_POSITIONS_CARD =
      static_cast<int>(Window_retrieve_cached_row_reason::MISC_POSITIONS);

  /**
    Holds information about a position in the buffer frame as stored in a
    temporary file (cf. m_frame_buffer). We save a number of such positions
    to speed up lookup when we move the window frame, cf.
    m_frame_buffer_positions
  */
  struct Frame_buffer_position {
    ///< The size of the file position is determined by handler::ref_length
    uchar *m_position;
    ///< Row number in partition, 1-based
    int64 m_rowno;
    Frame_buffer_position(uchar *position, int64 rowno)
        : m_position(position), m_rowno(rowno) {}
  };

  /**
    Execution state: used iff m_needs_frame_buffering. Holds pointers to
    positions in the file in m_frame_buffer. We use these saved positions to
    avoid having to position to the first row in the partition and then
    making many read calls to find the desired row. By repositioning to a
    suitably(*) saved position we normally (**) need to do only one positioned
    read and one ha_rdn_next call to get at a desired row.
    @verbatim

      [0] the first row in the partition: at the
          beginning of a new partition that is the first row in the partition.
          Its rowno == 1 by definition.
      [1] position of the current row N  (for jump-back to current row or next
                                          current row in combo with ha_rnd_next)
      [2] position of the current first row M in frame (for aggregation looping
          jump-back)
      [3] position of the current last row in a frame
      [4] position and line number of the row last read
   and optionally:
      [5..X] positions of Nth row of X-5+1 NTH_VALUE functions invoked on window
      [X+1..Y] position of last row of lead/lag functions invoked on window

    @endverbatim
    Pointers are lazily initialized if needed.

    (*) We use the position closest below the desired position, cf logic in
    read_frame_buffer_row.

    (**) Unless we have a frame beyond the current row, in which case we need
    to do some scanning for the first row in the partition.
    Also NTH_VALUE with RANGE might sometimes needs to read several rows, since
    the frame start can jump several rows ahead when the current row moves
    forward.
  */
  Mem_root_array_YY<Frame_buffer_position> m_frame_buffer_positions;

  /**
    Sometimes we read one row too many, so that the saved position will
    be too far out because we subsequently need to read an earlier (previous)
    row of the same kind (reason). For such cases, we first save the
    current position, read, and if we see we read too far, restore the old
    position. See #save_pos and #restore_pos.
  */
  Frame_buffer_position m_tmp_pos;

  /**
    See #m_tmp_pos
  */
  void save_pos(Window_retrieve_cached_row_reason reason) {
    int reason_index = static_cast<int>(reason);
    m_tmp_pos.m_rowno = m_frame_buffer_positions[reason_index].m_rowno;
    std::memcpy(m_tmp_pos.m_position,
                m_frame_buffer_positions[reason_index].m_position,
                frame_buffer()->file->ref_length);
  }

  /**
    See #m_tmp_pos
  */
  void restore_pos(Window_retrieve_cached_row_reason reason) {
    int reason_index = static_cast<int>(reason);
    m_frame_buffer_positions[reason_index].m_rowno = m_tmp_pos.m_rowno;
    std::memcpy(m_frame_buffer_positions[reason_index].m_position,
                m_tmp_pos.m_position, frame_buffer()->file->ref_length);
  }

  /**
    Copy frame buffer position hint from one to another.
  */
  void copy_pos(Window_retrieve_cached_row_reason from_reason,
                Window_retrieve_cached_row_reason to_reason) {
    int from_index = static_cast<int>(from_reason);
    int to_index = static_cast<int>(to_reason);
    m_frame_buffer_positions[to_index].m_rowno =
        m_frame_buffer_positions[from_index].m_rowno;

    std::memcpy(m_frame_buffer_positions[to_index].m_position,
                m_frame_buffer_positions[from_index].m_position,
                frame_buffer()->file->ref_length);
  }

  /**
     Keys for m_special_rows_cache, for special
     rows (see the comment on m_special_row_cache). Note that they are negative,
     so that they will never collide with actual row numbers in the frame.
     This allows us to treat them interchangeably with real row numbers
     as function arguments; e.g., bring_back_frame_row() can restore either
     a “normal” row from the frame, or one of the special rows, and does not
     need to take in separate flags for the two.
     TODO(Chaithra): We have only one special key. Do we need the enum?
     Also m_special_rows_cache/cache_length/max_cache_length should be looked
     into.
  */
  enum Special_keys {
    /**
      We read an incoming row. We notice it is the start of a new
      partition. We must thus process the just-finished partition, but that
      processing uses this row's buffer; so, we save this row first, process
     the partition, and restore it later.
    */
    FBC_FIRST_IN_NEXT_PARTITION = -1,
    // Insert new values here.
    // And keep the ones below up to date.
    FBC_FIRST_KEY = FBC_FIRST_IN_NEXT_PARTITION,
    FBC_LAST_KEY = FBC_FIRST_IN_NEXT_PARTITION,
  };

 protected:
  /**
    Execution state: used iff m_needs_frame_buffering. Holds the temporary
    file (used for the frame buffering) parameters
  */
  Temp_table_param *m_frame_buffer_param;

  /**
    Holds whether this window should be “short-circuit”, ie., goes directly
    to the query output instead of to a temporary table.
  */
  bool m_short_circuit = false;

  /**
    Execution state: used iff m_needs_frame_buffering. Holds the TABLE
   object for the the temporary file used for the frame buffering.
  */
  TABLE *m_frame_buffer;

  /**
    Execution state: The frame buffer tmp file is not truncated for each new
    partition.  We need to keep track of where a partition starts in case we
    need to switch from heap to innodb tmp file on overflow, in order to
    re-initialize m_frame_buffer_positions with the current partition's row 1
    (which is the minimum hint required) as we cross over.  This number is
    incremented for each write.
  */
  int64 m_frame_buffer_total_rows;

  /**
    Execution state: Snapshot of m_frame_buffer_total_rows when we start a new
    partition, i.e. for the first row in the first partition we will have a
    value of 1.
  */
  int64 m_frame_buffer_partition_offset;

  /**
     If >=1: the row with this number (1-based, relative to start of
     partition) currently has its fields in the record buffer of the IN table
     and of the OUT table. 0 means "unset".
     Usable only with buffering. Set and read by bring_back_frame_row(), so
     that multiple successive calls to it for same row do only one read from
     FB (optimization).
  */
  int64 m_row_has_fields_in_out_table;

  /**
    Holds a fixed number of copies of special rows; each copy can use up to
    #m_special_rows_cache_max_length bytes. Special rows are those that are
    not part of our frame, but that we need to store away nevertheless, because
    they might be in the table's buffers, which we need for our own purposes
    during window processing.
    cf. the Special_keys enumeration.
  */
  uchar *m_special_rows_cache;
  /// Length of each copy in #m_special_rows_cache, in bytes
  size_t m_special_rows_cache_length[FBC_FIRST_KEY - FBC_LAST_KEY + 1];
  /// Maximum allocated size in #m_special_rows_cache
  size_t m_special_rows_cache_max_length;

  /**
    Execution state: used iff m_needs_frame_buffering. Holds the row
    number (in the partition) of the last row (hitherto) saved in the frame
    buffer
  */
  int64 m_last_rowno_in_cache;

  /**
    Execution state: used iff m_needs_peerset. Holds the rowno
    for the last row in this peer set.
  */
  int64 m_last_rowno_in_peerset;

  /**
    Execution state: used iff m_needs_last_peer_in_frame. True if a row
    leaving the frame is the last row in the peer set within the frame.
  */
  int64 m_is_last_row_in_peerset_within_frame;

  /**
    Execution state: the current row number in the current partition.
    Set in check_partition_boundary. Used while reading input rows, in contrast
    to m_rowno_in_partition, which is used when processing buffered rows.
    Cf. check_partition_boundary.
  */
  int64 m_part_row_number;

  /**
    Execution state: the current row starts a new partition.
    Set in check_partition_boundary.
  */
  bool m_partition_border;

  /**
    Execution state: The number, in the current partition, of the last output
    row, i.e. the row number of the last row hitherto evaluated and output to
    the next phase.
  */
  int64 m_last_row_output;

  /**
    Execution state: The number of the row being visited for its contribution
    to a window function, relative to the start of the partition. Note that
    this will often be different from the current row for which we are
    processing the window function, readying it for output. That is given by
    \c m_rowno_in_partition, q.v. Contrast also with \c m_rowno_in_frame
    which is frame relative.
  */
  int64 m_rowno_being_visited;

  /**
    Execution state: the row number of the row we are looking at for evaluating
    its contribution to some window function(s).  It is relative to start of
    the frame and 1-based. It is typically set after fetching a row from the
    frame buffer using \c bring_back_frame_row and before calling
    \c copy_funcs.  Cf. also \c m_is_last_row_in_frame.
  */
  int64 m_rowno_in_frame;

  /**
    Execution state: The row number of the current row being readied for
    output within the partition. 1-based.
    In \c process_buffered_windowing_record this is known as \c current_row.
  */
  int64 m_rowno_in_partition;

  /**
   Execution state: for optimizable aggregates, cf. m_row_optimizable and
   m_range_optimizable, we need to keep track of when we have computed the
   first aggregate, since aggregates for rows 2..N are computed in an optimized
   way by inverse aggregation of the row moving out of the frame.
   */
  bool m_aggregates_primed;

  /*------------------------------------------------------------------------
   *
   * RANGE boundary frame state variables.
   *
   *------------------------------------------------------------------------*/
 public:
  /*
    For RANGE bounds, we need to be able to check if a given row is
    before the lower bound, or after the upper bound, as we don't know
    ahead of time how many rows to skip (unlike for ROW bounds).
    Often, the lower and upper bounds are the same, as they are
    inclusive.

    We solve this by having an array of comparators of the type

       value ⋛ <cache>(ref_value)

    where ⋛ is a three-way comparator, and <cache>(ref_value) is an
    Item_cache where we can store the value of the reference row
    before we invoke the comparison. For instance, if we have a
    row bound such as

        ... OVER (ORDER BY a,b) FROM t1

     we will have an array with the two comparators

        t1.a ⋛ <cache>(a)
        t1.b ⋛ <cache>(b)

     and when we evaluate the frame bounds for a given row, we insert
     the reference values in the caches and then do the comparison to
     figure out if we're before, in or after the bound. As this is a
     lexicographic comparison, we only need to evaluate the second
     comparator if the first one returns equality.

     The expressions on the right-hand side can be somewhat more
     complicated; say that we have a window specification like

        ... OVER (ORDER BY a RANGE BETWEEN 2 PRECEDING AND 3 FOLLOWING)

     In this case, the lower bound and upper bound are different
     (both arrays will always contain only a single element):

        Lower: t1.a ⋛ <cache>(a) - 2
        Upper: t1.a ⋛ <cache>(a) + 3

     ie., there is an Item_func_plus or Item_func_minus with some
     constant expression as the second argument.

     The comparators for the lower bound is stored in [0], and for
     the upper bound in [1].
   */
  Bounds_checked_array<Arg_comparator> m_comparators[2];

  /**
    The logical ordering index (into LogicalOrderings) needed by this window's
    PARTITION BY and ORDER BY clauses together (if any; else, 0). Used only by
    the hypergraph join optimizer.
   */
  int m_ordering_idx;

  /**
    Used temporarily by the hypergraph join optimizer to mark which windows
    are referred to by a given ordering (so that one doesn't try to satisfy
    a window's ordering by an ordering referring to that window).
   */
  bool m_mark;

 protected:
  /**
    Execution state: the row number of the first row in a frame when evaluating
    RANGE based frame bounds. When using RANGE bounds, we don't know a priori
    when moving the frame which row number will be the next lower bound, but
    we know it will have to be a row number higher than the lower bound of
    the previous frame, since the bounds increase monotonically as long
    as the frame bounds are static within the query (current limitation).
    So, it makes sense to remember the first row number in a frame until
    we have determined the start of the next frame.

    If the frame for the previous current row in the partition was empty (cf.
    "current_row" in process_buffered_windowing_record), this should point
    to the next possible frame start. Relative to partition start, 1-based.
  */
  int64 m_first_rowno_in_range_frame;

  /**
    Execution state: used for RANGE bounds frame evaluation
    for the continued evaluation for current row > 2 in a partition.
    If the frame for the current row visited (cf "current_row" in
    process_buffered_windowing_record) was empty, the invariant

         m_last_rowno_in_range_frame < m_first_rowno_in_range_frame

    should hold after the visit. Relative to partition start. 1-based.
  */
  int64 m_last_rowno_in_range_frame;

  /**
    Execution state. Only used for ROWS frame optimized MIN/MAX.
    The (1-based) number in the partition of first row in the current frame.
  */
  int64 m_first_rowno_in_rows_frame;

  /*------------------------------------------------------------------------
   *
   * Window function special behaviour toggles. These boolean flag influence
   * the action taken when a window function is evaluated, i.e.
   *
   *     Item_xxx::val_xxx
   *
   * They are set locally just before and after a call to evaluate functions,
   * i.e.
   *
   *     w.set_<toggle>(true)
   *     copy_<kind>_window_functions()  [see process_buffered_windowing_record]
   *     w.set_<toggle>(false)
   *
   * to achieve a special semantic, since we cannot pass down extra parameters.
   *
   *------------------------------------------------------------------------*/

  /**
    Execution state: the current row is the last row in a window frame
    For some aggregate functions, e.g AVG, we can save computation by not
    evaluating the entire function value before the last row has been read.
    For AVG, do the summation for each row in the frame, but the division only
    for the last row, at which time the result is needed for the wf.
    Probably only useful for ROW based or static frames.
    For frame with peer set computation, determining the last row in the
    peer set ahead of processing is not possible, so we use a pessimistic
    assumption.
  */
  bool m_is_last_row_in_frame;

  /**
    Execution state: make frame wf produce a NULL (or 0 depending, e.g. if
    COUNT) value because no rows are available for aggregation: e.g. for first
    row in partition if frame is ROWS BETWEEN 2 PRECEDING and 1 PRECEDING has
    no rows for which aggregation can happen
  */
  bool m_do_copy_null;

  /**
    Execution state: do inverse, e.g. subtract rather than add in aggregates.
    Used for optimizing computation of sliding frames for eligible aggregates,
    cf. Item_sum::check_wf_semantics.
  */
  bool m_inverse_aggregation;

  /*------------------------------------------------------------------------
   *
   * Constructors
   *
   *------------------------------------------------------------------------*/
 private:
  /**
    Generic window constructor, shared
  */
  Window(Item_string *name, PT_order_list *part, PT_order_list *ord,
         PT_frame *frame, bool is_reference, Item_string *inherit)
      : m_query_block(nullptr),
        m_partition_by(part),
        m_order_by(ord),
        m_sorting_order(nullptr),
        m_frame(frame),
        m_name(name),
        m_def_pos(0),
        m_inherit_from(inherit),
        m_is_reference(is_reference),
        m_needs_frame_buffering(false),
        m_needs_peerset(false),
        m_needs_last_peer_in_frame(false),
        m_needs_partition_cardinality(false),
        m_row_optimizable(true),
        m_range_optimizable(true),
        m_static_aggregates(false),
        m_opt_first_row(false),
        m_opt_last_row(false),
        m_last(false),
        m_ancestor(nullptr),
        m_partition_items(*THR_MALLOC),
        m_order_by_items(*THR_MALLOC),
        m_tmp_pos(nullptr, -1),
        m_frame_buffer_param(nullptr),
        m_frame_buffer(nullptr),
        m_frame_buffer_total_rows(0),
        m_frame_buffer_partition_offset(0),
        m_row_has_fields_in_out_table(0),
        m_special_rows_cache_max_length(0),
        m_last_rowno_in_cache(0),
        m_last_rowno_in_peerset(0),
        m_is_last_row_in_peerset_within_frame(false),
        m_part_row_number(0),
        m_partition_border(true),
        m_last_row_output(0),
        m_rowno_being_visited(0),
        m_rowno_in_frame(0),
        m_rowno_in_partition(0),
        m_aggregates_primed(false),
        m_first_rowno_in_range_frame(1),
        m_last_rowno_in_range_frame(0),
        m_is_last_row_in_frame(false),
        m_do_copy_null(false),
        m_inverse_aggregation(false) {
    m_opt_nth_row.m_offsets.init_empty_const();
    m_opt_lead_lag.m_offsets.init_empty_const();
    m_frame_buffer_positions.init_empty_const();
  }

 public:
  /**
    Reference to a named window. This kind is only used before resolution,
    references to it being replaced by the referenced window object thereafter.
  */
  Window(Item_string *name)
      : Window(name, nullptr, nullptr, nullptr, true, nullptr) {}

  /**
    Unnamed window. If the window turns out to be named, the name will be set
    later, cf. #set_name().
  */
  Window(PT_order_list *partition_by, PT_order_list *order_by, PT_frame *frame)
      : Window(nullptr, partition_by, order_by, frame, false, nullptr) {}

  /**
    Unnamed window based on a named window. If the window turns out to be
    named, the name will be set later, cf. #set_name().
  */
  Window(PT_order_list *partition_by, PT_order_list *order_by, PT_frame *frame,
         Item_string *inherit)
      : Window(nullptr, partition_by, order_by, frame, false, inherit) {}

  /*------------------------------------------------------------------------
   *
   * Methods
   *
   *------------------------------------------------------------------------*/

  /**
    We have a named window. Now set its name. Used once, if at all, for a
    window as part of parsing.
  */
  void set_name(Item_string *name) { m_name = name; }

  /**
    After resolving an existing window name reference in a window definition,
    we set the ancestor pointer to easy access later.
  */
  void set_ancestor(Window *a) { m_ancestor = a; }

  /**
    Get the name of a window. Can be empty, cf. #printable_name which is not.
  */
  Item_string *name() const { return m_name; }

  uint def_pos() const { return m_def_pos; }       ///< @see #m_def_pos
  void set_def_pos(uint pos) { m_def_pos = pos; }  ///< @see #m_def_pos

  /**
    Get the frame, if any. SQL 2011 7.11 GR 1.b.i.6
  */
  const PT_frame *frame() const { return m_frame; }

  /**
    Get the ORDER BY, if any. That is, the first we find along
    the ancestor chain. Uniqueness checked in #setup_windows1
    SQL 2011 7.11 GR 1.b.i.5.A-C
  */
  const PT_order_list *effective_order_by() const {
    const PT_order_list *o = m_order_by;
    const Window *w = m_ancestor;

    while (o == nullptr && w != nullptr) {
      o = w->m_order_by;
      w = w->m_ancestor;
    }
    return o;
  }

  /**
    Get the first argument of the ORDER BY clause for this window
    if any. "ORDER BY" is not checked in ancestor unlike
    effective_order_by().
    Use when the goal is to operate on the set of item clauses for
    all windows of a query. When interrogating the effective order
    by for a window (specified for it or inherited from another
    window) use effective_order_by().
  */
  ORDER *first_order_by() const;

  /**
    Get partition, if any. That is, the partition if any, of the
    root window. SQL 2011 7.11 GR 1.b.i.4.A-C
  */
  const PT_order_list *effective_partition_by() const {
    const PT_order_list *p = m_partition_by;
    const Window *w = m_ancestor;
    while (w != nullptr) {
      if (w->m_ancestor == nullptr) {
        /* root  */
        p = w->m_partition_by;
      } else {
        /* See #setup_windows for checking */
        assert(w->m_partition_by == nullptr);
      }
      w = w->m_ancestor;
    }
    return p;
  }
  /**
    Get the first argument of the PARTITION clause for this window
    if any. "PARTITION BY" is not checked in ancestor unlike
    effective_partition_by().
    Use when the goal is to operate on the set of item clauses for
    all windows of a query. When interrogating the effective
    partition by for a window (specified for it or inherited from
    another window) use effective_partition_by().
  */
  ORDER *first_partition_by() const;

  /**
    Get the list of functions invoked on this window.
  */
  List<Item_sum> &functions() { return m_functions; }

  /**
    Concatenation of columns in PARTITION BY and ORDER BY.
    Columns present in both list (redundancies) are eliminated, while
    making sure the order of columns in the ORDER BY is maintained
    in the merged list.

    @param thd                Optional. Session state. If not nullptr,
                              initialize the cache.

    @param implicit_grouping  Optional. If true, we won't sort (single row
                              result set). Presence implies thd != nullptr for
                              the first call when we lazily set up this
                              information.  Succeeding calls return the cached
                              value.

    @returns The start of the concatenated ordering expressions, or nullptr
  */
  ORDER *sorting_order(THD *thd = nullptr, bool implicit_grouping = false);

  /**
    Check that the semantic requirements for window functions over this
    window are fulfilled, and accumulate evaluation requirements.
    This is run at resolution.
  */
  bool check_window_functions1(THD *thd, Query_block *select);
  /**
    Like check_window_functions1() but contains checks which must wait until
    the start of the execution phase.
  */
  bool check_window_functions2(THD *thd);

  /**
    For RANGE frames we need to do computations involving add/subtract and
    less than, smaller than. To make this work across types, we construct
    item trees to do the computations, so we can reuse all the special case
    handling, e.g. for signed/unsigned int wrap-around, overflow etc.
  */
  bool setup_range_expressions(THD *thd);

  /**
    Return if this window represents an unresolved window reference seen
    in a window function OVER clause.
  */
  bool is_reference() const { return m_is_reference; }

  /**
    Check if the just read input row marks the start of a new partition.
    Sets the member variables:

    m_partition_border and m_part_row_number

  */
  void check_partition_boundary();

  /**
    Reset the current row's ORDER BY expressions when starting a new
    peer set.
  */
  void reset_order_by_peer_set();

  /**
    Determine if the current row is not in the same peer set as the previous
    row. Used for RANGE frame and implicit RANGE frame (the latter is used by
    aggregates in the presence of ORDER BY).

    The current row is in the same peer set if all ORDER BY columns
    have the same value as in the previous row.

    For JSON_OBJECTAGG only the first order by column needs to be
    compared to check if a row is in peer set.

    @param compare_all_order_by_items If true, compare all the order by items
                                      to determine if a row is in peer set.
                                      Else, compare only the first order by
                                      item to determine peer set.

    @return true if current row is in a new peer set
  */
  bool in_new_order_by_peer_set(bool compare_all_order_by_items = true);

  /**
    While processing buffered rows in RANGE frame mode we, determine
    if the present row revisited from the buffer is before the row
    being processed; i.e. the current row.

    @return true if the present row is before the RANGE, i.e. not to
    be included
  */
  bool before_frame() { return before_or_after_frame(true); }

  ///< See before_frame()
  bool after_frame() { return before_or_after_frame(false); }

  /**
    Check if we have read all the rows in a partition, possibly
    having buffered them for further processing

    @returns true if this is the case
  */
  bool at_partition_border() const { return m_partition_border; }

  void save_special_row(uint64 special_rowno, TABLE *t);
  void restore_special_row(uint64 special_rowno, uchar *record);

  /**
    Resolve any named window to its definition
    and update m_window to point to the definition instead
  */
  static bool resolve_reference(THD *thd, Item_sum *wf, PT_window **m_window);

  /**
    Semantic checking of windows. Run at resolution.

    * Process any window inheritance, that is a window, that in its
    specification refer to another named window.

    Rules:
    1) There should be no loops
    2) The inheriting window can not specify partitioning
    3) The inheriting window can not specify is already specify by
       an ancestor.
    4) An ancestor can not specify window framing clause.

    Cf. SQL 2011 7.11 window clause SR 10-11.

    * Check requirements to the window from its using window functions and
    make a note of those so we know at execution time, for example if we need
    to buffer rows to process the window functions, whether inversion
    optimzation will be used for moving frames etc.

    * Prepare the physical ordering lists used by sorting at execution time.

    * Set up cached items for partition determination and for range/peer
    determination based on order by columns.

    * Check any frame semantics and for RANGE frames, set up bounds computation
    item trees.

    @param thd              The session's execution thread
    @param select           The select for which we are doing windowing
    @param ref_item_array   The base ref items
    @param tables           The list of tables involved
    @param fields           The list of all fields, including hidden ones
    @param windows          The list of windows defined for this select

    @return false if success, true if error
  */
  static bool setup_windows1(THD *thd, Query_block *select,
                             Ref_item_array ref_item_array, Table_ref *tables,
                             mem_root_deque<Item *> *fields,
                             List<Window> *windows);
  /**
    Like setup_windows1() but contains operations which must wait until
    the start of the execution phase.

    @param thd              The session's execution thread
    @param windows          The list of windows defined for this select

    @return false if success, true if error
  */
  static bool setup_windows2(THD *thd, List<Window> *windows);

  /**
    Check window definitions to remove unused windows. We do this
    only after syntactic and semantic checking for errors has been performed.
    Eliminate redundant sorts after unused windows are removed.

    @param windows         The list of windows defined for this select
  */
  static void eliminate_unused_objects(List<Window> *windows);

  /**
    Resolve and set up the PARTITION BY or an ORDER BY list of a window.

    @param thd              The session's execution thread
    @param ref_item_array   The base ref items
    @param tables           The list of tables involved
    @param fields           The list of all fields, including hidden ones
    @param o                A list of order by expressions
    @param partition_order  If true, o represent a windowing PARTITION BY,
           else it represents a windowing ORDER BY
    @returns false if success, true if error
  */
  bool resolve_window_ordering(THD *thd, Ref_item_array ref_item_array,
                               Table_ref *tables,
                               mem_root_deque<Item *> *fields, ORDER *o,
                               bool partition_order);
  /**
    Return true if this window's name is not unique in windows
  */
  bool check_unique_name(const List<Window> &windows);

  /**
    Specify that this window is to be evaluated after a given
    temporary table. This means that all expressions that have been
    materialized (as given by items_to_copy) will be replaced with the
    given temporary table fields. (If there are multiple materializations,
    this function must be called for each of them in order.)

    Only the hypergraph join optimizer uses this currently; the old
    join optimizer instead uses Item_ref objects that point to the
    base slice, which is then replaced at runtime depending on which
    temporary table we are to evaluate from.
  */
  void apply_temp_table(THD *thd, const Func_ptr_array &items_to_copy);

  /**
    Set up cached items for an partition or an order by list
    updating m_partition_items or m_order_by_items respectively.

    @param thd              The session's execution thread
    @param select           The select for which we are doing windowing
    @param o                The list of ordering expressions
    @param partition_order  If true, o represents a partition order list,
                            else an ORDER BY list.

    @returns false if success, true if error
  */
  bool setup_ordering_cached_items(THD *thd, Query_block *select,
                                   const PT_order_list *o,
                                   bool partition_order);

  /**
    Determine if the window had either a partition clause (inclusive) or a
    ORDER BY clause, either defined by itself or inherited from another window.

    @return true if we have such a clause, which means we need to sort the
            input table before evaluating the window functions, unless it has
            been made redundant by a previous windowing step, cf.
            reorder_and_eliminate_sorts, or due to a single row result set,
            cf. Query_block::is_implicitly_grouped().
  */
  bool needs_sorting() const { return m_sorting_order != nullptr; }

  /**
    If we cannot compute one of window functions without looking at succeeding
    rows, return true, else false.
  */
  bool needs_buffering() const { return m_needs_frame_buffering; }

  /**
    If we cannot compute one of window functions without looking at all
    rows in the peerset of the current row, return true, else
    false. E.g. CUME_DIST.
  */
  bool needs_peerset() const { return m_needs_peerset; }

  /**
    If we cannot compute one of window functions without looking at all
    rows in the peerset of the current row in this frame, return true, else
    false. E.g. JSON_OBJECTAGG.
  */
  bool needs_last_peer_in_frame() const { return m_needs_last_peer_in_frame; }
  /**
    If we need to read the entire partition before we can evaluate
    some window function(s) on this window,
    @returns true if that is the case, else false
  */
  bool needs_partition_cardinality() const {
    return m_needs_partition_cardinality;
  }

  /**
    Return true if the set of window functions are all ROW unit optimizable.
    Only relevant if m_needs_buffering and m_row_optimizable are true.
  */
  bool optimizable_row_aggregates() const { return m_row_optimizable; }

  /**
    Return true if the set of window functions are all RANGE unit optimizable.
    Only relevant if m_needs_buffering and m_range_optimizable are true.
  */
  bool optimizable_range_aggregates() const { return m_range_optimizable; }

  /**
    Return true if the aggregates are static, i.e. the same aggregate values for
    all rows in partition. Only relevant if m_needs_buffering is true.
  */
  bool static_aggregates() const { return m_static_aggregates; }

  /**
    See #m_opt_first_row
  */
  bool opt_first_row() const { return m_opt_first_row; }

  /**
    See #m_opt_last_row
  */
  bool opt_last_row() const { return m_opt_last_row; }

  /**
    See #m_last
  */
  bool is_last() const { return m_last; }

  /**
    An override used by the hypergraph join optimizer only.
  */
  void set_is_last(bool last) { m_last = last; }

  /**
    See #m_opt_nth_row
  */
  const st_nth &opt_nth_row() const { return m_opt_nth_row; }

  /**
    See #m_opt_lead_lag
  */
  const st_lead_lag &opt_lead_lag() const { return m_opt_lead_lag; }

  /**
    Getter for m_frame_buffer_param, q.v.
  */
  Temp_table_param *frame_buffer_param() const { return m_frame_buffer_param; }

  /**
    Setter for m_frame_buffer_param, q.v.
  */
  void set_frame_buffer_param(Temp_table_param *p) { m_frame_buffer_param = p; }

  /**
    Getter for m_frame_buffer, q.v.
  */
  TABLE *frame_buffer() const { return m_frame_buffer; }

  /**
    Setter for m_frame_buffer, q.v.
  */
  void set_frame_buffer(TABLE *tab) { m_frame_buffer = tab; }

  bool short_circuit() const { return m_short_circuit; }
  void set_short_circuit(bool short_circuit) {
    m_short_circuit = short_circuit;
  }

  /**
    Getter for m_part_row_number, q.v., the current row number within the
    partition.
  */
  int64 partition_rowno() const { return m_part_row_number; }

  /**
    Allocate the cache for special rows
    @param thd      thread handle
    @param out_tbl  The table where this window function's value is written to
    @returns true if error.
  */
  bool make_special_rows_cache(THD *thd, TABLE *out_tbl);

  /**
    See #m_last_row_output
  */
  int64 last_row_output() const { return m_last_row_output; }

  /**
    See #m_last_row_output
  */
  void set_last_row_output(int64 rno) { m_last_row_output = rno; }

  /**
   See #m_rowno_being_visited
   */
  int64 rowno_being_visited() const { return m_rowno_being_visited; }

  /**
   See #m_rowno_being_visited
   */
  void set_rowno_being_visited(int64 rno) { m_rowno_being_visited = rno; }

  /**
    See #m_last_rowno_in_cache
  */
  int64 last_rowno_in_cache() const { return m_last_rowno_in_cache; }

  /**
    See #m_last_rowno_in_cache
  */
  void set_last_rowno_in_cache(uint64 rno) { m_last_rowno_in_cache = rno; }

  /**
    See #m_last_rowno_in_range_frame
  */
  int64 last_rowno_in_range_frame() const {
    return m_last_rowno_in_range_frame;
  }

  /**
    See #m_last_rowno_in_range_frame
  */
  void set_last_rowno_in_range_frame(uint64 rno) {
    m_last_rowno_in_range_frame = rno;
  }

  /**
    See #m_last_rowno_in_peerset
  */
  int64 last_rowno_in_peerset() const { return m_last_rowno_in_peerset; }

  /**
    See #m_last_rowno_in_peerset
  */
  void set_last_rowno_in_peerset(uint64 rno) { m_last_rowno_in_peerset = rno; }

  /**
    See #m_is_last_row_in_peerset_within_frame
  */
  int64 is_last_row_in_peerset_within_frame() const {
    return m_is_last_row_in_peerset_within_frame;
  }

  /**
    See #m_is_last_row_in_peerset_within_frame
  */
  void set_is_last_row_in_peerset_within_frame(bool value) {
    m_is_last_row_in_peerset_within_frame = value;
  }

  /**
    See #m_do_copy_null
  */
  bool do_copy_null() const { return m_do_copy_null; }

  /**
    See #m_do_copy_null
  */
  void set_do_copy_null(bool b) { m_do_copy_null = b; }

  /**
    See #m_inverse_aggregation
  */
  bool do_inverse() const { return m_inverse_aggregation; }

  /**
    See #m_inverse_aggregation
  */
  Window &set_inverse(bool b) {
    m_inverse_aggregation = b;
    return *this;
  }

  /**
    See #m_aggregates_primed
  */
  bool aggregates_primed() const { return m_aggregates_primed; }

  /**
    See #m_aggregates_primed
  */
  void set_aggregates_primed(bool b) { m_aggregates_primed = b; }

  /**
    See #m_is_last_row_in_frame
  */
  bool is_last_row_in_frame() const {
    return m_is_last_row_in_frame || m_query_block->m_table_list.elements == 0;
  }

  /**
    See #m_is_last_row_in_frame
  */
  void set_is_last_row_in_frame(bool b) { m_is_last_row_in_frame = b; }

  /**
    Return the size of the frame in number of rows
    @returns frame size
  */
  // int64 frame_cardinality();

  /**
    See #m_rowno_in_frame
  */
  int64 rowno_in_frame() const { return m_rowno_in_frame; }

  /**
    See #m_rowno_in_frame
  */
  Window &set_rowno_in_frame(int64 rowno) {
    m_rowno_in_frame = rowno;
    return *this;
  }

  /**
    See #m_rowno_in_partition
  */
  int64 rowno_in_partition() const { return m_rowno_in_partition; }

  /**
    See #m_rowno_in_partition
  */
  void set_rowno_in_partition(int64 rowno) { m_rowno_in_partition = rowno; }

  /**
    See #m_first_rowno_in_rows_frame
  */
  void set_first_rowno_in_rows_frame(int64 rowno) {
    m_first_rowno_in_rows_frame = rowno;
  }

  int64 first_rowno_in_rows_frame() const {
    return m_first_rowno_in_rows_frame;
  }

  /**
    See #m_first_rowno_in_range_frame
  */
  void set_first_rowno_in_range_frame(int64 rowno) {
    m_first_rowno_in_range_frame = rowno;
  }

  /**
    See #m_first_rowno_in_range_frame
  */
  int64 first_rowno_in_range_frame() const {
    return m_first_rowno_in_range_frame;
  }

  /**
    See #m_frame_buffer_total_rows
  */
  void set_frame_buffer_total_rows(int64 rows) {
    m_frame_buffer_total_rows = rows;
  }

  /**
    See #m_frame_buffer_total_rows
  */
  int64 frame_buffer_total_rows() const { return m_frame_buffer_total_rows; }

  /**
    See #m_frame_buffer_partition_offset
  */
  void set_frame_buffer_partition_offset(int64 offset) {
    m_frame_buffer_partition_offset = offset;
  }

  /**
    See #m_frame_buffer_partition_offset
  */
  int64 frame_buffer_partition_offset() const {
    return m_frame_buffer_partition_offset;
  }

  /**
    See #m_row_has_fields_in_out_table
  */
  int64 row_has_fields_in_out_table() const {
    return m_row_has_fields_in_out_table;
  }

  /**
    See #m_row_has_fields_in_out_table
  */
  void set_row_has_fields_in_out_table(int64 rowno) {
    m_row_has_fields_in_out_table = rowno;
  }

  /**
    Free up any resource used to process the window functions of this window,
    e.g. temporary files and in-memory data structures. Called when done
    with all window processing steps from Query_block::cleanup.
  */
  void cleanup();

  /**
    Free structures that were set up during preparation of window functions
  */
  void destroy();

  /**
   Reset window state for a new partition.

   Reset the temporary storage used for window frames, typically when we find
   a new partition. The rows in the buffer are then no longer needed.
   */
  void reset_partition_state() { reset_execution_state(RL_PARTITION); }

  /**
    Reset execution state for next call to JOIN::exec, cf. JOIN::reset,
    or using [Buffering]WindowingIterator::Init.
  */
  void reset_round() { reset_execution_state(RL_ROUND); }

  /**
    Reset execution state for LEAD/LAG for the current row in partition.
  */
  void reset_lead_lag();

 private:
  enum Reset_level { RL_ROUND, RL_PARTITION };

  /// Common function for all types of resetting
  void reset_execution_state(Reset_level level);

 public:
  /**
    Reset the execution state for all window functions defined on this window.
  */
  void reset_all_wf_state();

  const char *printable_name() const;

  void print(const THD *thd, String *str, enum_query_type qt,
             bool expand_definition) const;

  bool has_windowing_steps() const;

  /**
    Compute sorting costs for windowing.

    @param cost Cost of sorting result set once
    @param windows The set of windows

    @returns the aggregated sorting costs of the windowing
  */
  static double compute_cost(double cost, const List<Window> &windows);

 private:
  /**
    Common implementation of before_frame() and after_frame().
    @param  before  True if 'before' is wanted; false if 'after' is.
  */
  bool before_or_after_frame(bool before);
  void print_frame(const THD *thd, String *str, enum_query_type qt) const;
  void print_border(const THD *thd, String *str, PT_border *b,
                    enum_query_type qt) const;

  /**
    Reorder windows and eliminate redundant ordering. If a window has the
    same ordering requirements as another, we will move them next to each other
    in the evaluation sequence, so we can sort only once, i.e. before the
    first window step. This allows us to fulfill the guarantee given by
    SQL standard when it comes to repeatability of non-deterministic (partially
    ordered) result sets for windowing inside a query, cf. #equal_sort.
    If more than two have the same ordering, the same applies, we only sort
    before the first (sort equivalent) window. The hypergraph optimizer uses
    the interesting order framework instead, eliding all sorts eliminated by
    this function and possibly more.

    Note that we do not merge windows that have the same ordering requirements,
    so we may still get the extra materialization and multiple rounds of
    buffering. Yet, we should get _correctness_, as long as materialization
    preserves row ordering (which it does for all of our supported temporary
    table types).

    If the result set is implicitly grouped, we also skip any sorting for
    windows.

    @param windows     list of windows
  */
  static void reorder_and_eliminate_sorts(List<Window> *windows);

  /**
    Return true of the physical[1] sort orderings for the two windows are the
    same, cf. guarantee of
    SQL 2014 4.15.15 Windowed tables bullet two: The windowing functions are
    computed using the same row ordering if they specify the same ordering.

    Collation and null handling is not supported, so moot.

    The two other bullet points are also covered by this test.

    [1] After concatenating effective PARTITION BY and ORDER BY (including
    inheritance) expressions.
  */
  static bool equal_sort(Window *w1, Window *w2);

  /**
    Check that a frame border is constant during execution and that it does
    not contain subqueries (relevant for INTERVAL only): implementation
    limitation.

    @param thd    Session thread
    @param border The border to check

    @return false if OK, true if error
  */
  bool check_constant_bound(THD *thd, PT_border *border);

  /**
    Check that frame borders are sane; resolution phase.

    @param thd      Session thread

    @returns true if error
  */
  bool check_border_sanity1(THD *thd);
  /**
    Like check_border_sanity1() but contains checks which must wait until
    the start of the execution phase.

    @param thd      Session thread

    @returns true if error
  */
  bool check_border_sanity2(THD *thd);
};

/**
  Collects evaluation requirements from a window function,
  used by Item_sum::check_wf_semantics and its overrides.
*/
struct Window_evaluation_requirements {
  /**
    Set to true if window function requires row buffering
  */
  bool needs_buffer;
  /**
    Set to true if we need peerset for evaluation (e.g. CUME_DIST)
  */
  bool needs_peerset;
  /**
    Set to true if we need last peer for evaluation within a frame
    (e.g. JSON_OBJECTAGG)
  */
  bool needs_last_peer_in_frame;
  /**
    Set to true if we need FIRST_VALUE or optimized MIN/MAX
  */
  bool opt_first_row;
  /**
    Set to true if we need LAST_VALUE or optimized MIN/MAX
  */
  bool opt_last_row;
  Window::st_offset opt_nth_row;    ///< Used if we have NTH_VALUE
  Window::st_ll_offset opt_ll_row;  ///< Used if we have LEAD or LAG
  /**
    Set to true if we can compute a sliding window by a combination of
    undoing the contribution of rows going out of the frame and adding the
    contribution of rows coming into the frame.  For example, SUM and AVG
    allows this, but MAX/MIN do not. Only applicable if the frame has ROW
    bounds unit.
  */
  bool row_optimizable;
  /**
    Similar to row_optimizable but for RANGE frame bounds unit
  */
  bool range_optimizable;

  Window_evaluation_requirements()
      : needs_buffer(false),
        needs_peerset(false),
        needs_last_peer_in_frame(false),
        opt_first_row(false),
        opt_last_row(false),
        row_optimizable(true),
        range_optimizable(true) {}
};

#endif /* WINDOWS_INCLUDED */