File: types.go

package info (click to toggle)
golang-github-aws-aws-sdk-go-v2 1.24.1-2~bpo12%2B1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-backports
  • size: 554,032 kB
  • sloc: java: 15,941; makefile: 419; sh: 175
file content (2952 lines) | stat: -rw-r--r-- 149,380 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
// Code generated by smithy-go-codegen DO NOT EDIT.

package types

import (
	smithydocument "github.com/aws/smithy-go/document"
)

// An object that represents an Batch array job.
type ArrayProperties struct {

	// The size of the array job.
	Size *int32

	noSmithyDocumentSerde
}

// An object that represents the array properties of a job.
type ArrayPropertiesDetail struct {

	// The job index within the array that's associated with this job. This parameter
	// is returned for array job children.
	Index *int32

	// The size of the array job. This parameter is returned for parent array jobs.
	Size *int32

	// A summary of the number of array job children in each available job status.
	// This parameter is returned for parent array jobs.
	StatusSummary map[string]int32

	noSmithyDocumentSerde
}

// An object that represents the array properties of a job.
type ArrayPropertiesSummary struct {

	// The job index within the array that's associated with this job. This parameter
	// is returned for children of array jobs.
	Index *int32

	// The size of the array job. This parameter is returned for parent array jobs.
	Size *int32

	noSmithyDocumentSerde
}

// An object that represents the details of a container that's part of a job
// attempt.
type AttemptContainerDetail struct {

	// The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts
	// the job attempt.
	ContainerInstanceArn *string

	// The exit code for the job attempt. A non-zero exit code is considered failed.
	ExitCode *int32

	// The name of the CloudWatch Logs log stream that's associated with the
	// container. The log group for Batch jobs is /aws/batch/job . Each container
	// attempt receives a log stream name when they reach the RUNNING status.
	LogStreamName *string

	// The network interfaces that are associated with the job attempt.
	NetworkInterfaces []NetworkInterface

	// A short (255 max characters) human-readable string to provide additional
	// details for a running or stopped container.
	Reason *string

	// The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with
	// the job attempt. Each container attempt receives a task ARN when they reach the
	// STARTING status.
	TaskArn *string

	noSmithyDocumentSerde
}

// An object that represents a job attempt.
type AttemptDetail struct {

	// The details for the container in this job attempt.
	Container *AttemptContainerDetail

	// The Unix timestamp (in milliseconds) for when the attempt was started (when the
	// attempt transitioned from the STARTING state to the RUNNING state).
	StartedAt *int64

	// A short, human-readable string to provide additional details for the current
	// status of the job attempt.
	StatusReason *string

	// The Unix timestamp (in milliseconds) for when the attempt was stopped (when the
	// attempt transitioned from the RUNNING state to a terminal state, such as
	// SUCCEEDED or FAILED ).
	StoppedAt *int64

	noSmithyDocumentSerde
}

// An object that represents an Batch compute environment.
type ComputeEnvironmentDetail struct {

	// The Amazon Resource Name (ARN) of the compute environment.
	//
	// This member is required.
	ComputeEnvironmentArn *string

	// The name of the compute environment. It can be up to 128 characters long. It
	// can contain uppercase and lowercase letters, numbers, hyphens (-), and
	// underscores (_).
	//
	// This member is required.
	ComputeEnvironmentName *string

	// The compute resources defined for the compute environment. For more
	// information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
	// in the Batch User Guide.
	ComputeResources *ComputeResource

	// The orchestration type of the compute environment. The valid values are ECS
	// (default) or EKS .
	ContainerOrchestrationType OrchestrationType

	// The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster that the
	// compute environment uses.
	EcsClusterArn *string

	// The configuration for the Amazon EKS cluster that supports the Batch compute
	// environment. Only specify this parameter if the containerOrchestrationType is
	// EKS .
	EksConfiguration *EksConfiguration

	// The service role that's associated with the compute environment that allows
	// Batch to make calls to Amazon Web Services API operations on your behalf. For
	// more information, see Batch service IAM role (https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html)
	// in the Batch User Guide.
	ServiceRole *string

	// The state of the compute environment. The valid values are ENABLED or DISABLED .
	// If the state is ENABLED , then the Batch scheduler can attempt to place jobs
	// from an associated job queue on the compute resources within the environment. If
	// the compute environment is managed, then it can scale its instances out or in
	// automatically based on the job queue demand. If the state is DISABLED , then the
	// Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a
	// STARTING or RUNNING state continue to progress normally. Managed compute
	// environments in the DISABLED state don't scale out. Compute environments in a
	// DISABLED state may continue to incur billing charges. To prevent additional
	// charges, turn off and then delete the compute environment. For more information,
	// see State (https://docs.aws.amazon.com/batch/latest/userguide/compute_environment_parameters.html#compute_environment_state)
	// in the Batch User Guide. When an instance is idle, the instance scales down to
	// the minvCpus value. However, the instance size doesn't change. For example,
	// consider a c5.8xlarge instance with a minvCpus value of 4 and a desiredvCpus
	// value of 36 . This instance doesn't scale down to a c5.large instance.
	State CEState

	// The current status of the compute environment (for example, CREATING or VALID ).
	Status CEStatus

	// A short, human-readable string to provide additional details for the current
	// status of the compute environment.
	StatusReason *string

	// The tags applied to the compute environment.
	Tags map[string]string

	// The type of the compute environment: MANAGED or UNMANAGED . For more
	// information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
	// in the Batch User Guide.
	Type CEType

	// The maximum number of VCPUs expected to be used for an unmanaged compute
	// environment.
	UnmanagedvCpus *int32

	// Specifies the infrastructure update policy for the compute environment. For
	// more information about infrastructure updates, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide.
	UpdatePolicy *UpdatePolicy

	// Unique identifier for the compute environment.
	Uuid *string

	noSmithyDocumentSerde
}

// The order that compute environments are tried in for job placement within a
// queue. Compute environments are tried in ascending order. For example, if two
// compute environments are associated with a job queue, the compute environment
// with a lower order integer value is tried for job placement first. Compute
// environments must be in the VALID state before you can associate them with a
// job queue. All of the compute environments must be either EC2 ( EC2 or SPOT ) or
// Fargate ( FARGATE or FARGATE_SPOT ); EC2 and Fargate compute environments can't
// be mixed. All compute environments that are associated with a job queue must
// share the same architecture. Batch doesn't support mixing compute environment
// architecture types in a single job queue.
type ComputeEnvironmentOrder struct {

	// The Amazon Resource Name (ARN) of the compute environment.
	//
	// This member is required.
	ComputeEnvironment *string

	// The order of the compute environment. Compute environments are tried in
	// ascending order. For example, if two compute environments are associated with a
	// job queue, the compute environment with a lower order integer value is tried
	// for job placement first.
	//
	// This member is required.
	Order *int32

	noSmithyDocumentSerde
}

// An object that represents an Batch compute resource. For more information, see
// Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
// in the Batch User Guide.
type ComputeResource struct {

	// The maximum number of vCPUs that a compute environment can support. With
	// BEST_FIT_PROGRESSIVE , SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED
	// allocation strategies using On-Demand or Spot Instances, and the BEST_FIT
	// strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your
	// capacity requirements. In this event, Batch never exceeds maxvCpus by more than
	// a single instance. For example, no more than a single instance from among those
	// specified in your compute environment is allocated.
	//
	// This member is required.
	MaxvCpus *int32

	// The VPC subnets where the compute resources are launched. These subnets must be
	// within the same VPC. Fargate compute resources can contain up to 16 subnets. For
	// more information, see VPCs and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
	// in the Amazon VPC User Guide. Batch on Amazon EC2 and Batch on Amazon EKS
	// support Local Zones. For more information, see Local Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones)
	// in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web
	// Services Local Zones (https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html)
	// in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength
	// Zones, and Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones)
	// in the Amazon ECS Developer Guide. Batch on Fargate doesn't currently support
	// Local Zones.
	//
	// This member is required.
	Subnets []string

	// The type of compute environment: EC2 , SPOT , FARGATE , or FARGATE_SPOT . For
	// more information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
	// in the Batch User Guide. If you choose SPOT , you must also specify an Amazon
	// EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information,
	// see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html)
	// in the Batch User Guide.
	//
	// This member is required.
	Type CRType

	// The allocation strategy to use for the compute resource if not enough instances
	// of the best fitting instance type can be allocated. This might be because of
	// availability of the instance type in the Region or Amazon EC2 service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
	// . For more information, see Allocation strategies (https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it. BEST_FIT (default) Batch selects
	// an instance type that best fits the needs of the jobs with a preference for the
	// lowest-cost instance type. If additional instances of the selected instance type
	// aren't available, Batch waits for the additional instances to be available. If
	// there aren't enough instances available or the user is reaching Amazon EC2
	// service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
	// , additional jobs aren't run until the currently running jobs are completed.
	// This allocation strategy keeps costs lower but can limit scaling. If you're
	// using Spot Fleets with BEST_FIT , the Spot Fleet IAM Role must be specified.
	// Compute resources that use a BEST_FIT allocation strategy don't support
	// infrastructure updates and can't update some parameters. For more information,
	// see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. BEST_FIT_PROGRESSIVE Batch selects additional instance
	// types that are large enough to meet the requirements of the jobs in the queue.
	// Its preference is for instance types with lower cost vCPUs. If additional
	// instances of the previously selected instance types aren't available, Batch
	// selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more
	// instance types that are large enough to meet the requirements of the jobs in the
	// queue. Its preference is for instance types that are less likely to be
	// interrupted. This allocation strategy is only available for Spot Instance
	// compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity
	// optimized allocation strategy looks at both price and capacity to select the
	// Spot Instance pools that are the least likely to be interrupted and have the
	// lowest possible price. This allocation strategy is only available for Spot
	// Instance compute resources. With BEST_FIT_PROGRESSIVE , SPOT_CAPACITY_OPTIMIZED
	// and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances,
	// and the BEST_FIT strategy using Spot Instances, Batch might need to exceed
	// maxvCpus to meet your capacity requirements. In this event, Batch never exceeds
	// maxvCpus by more than a single instance.
	AllocationStrategy CRAllocationStrategy

	// The maximum percentage that a Spot Instance price can be when compared with the
	// On-Demand price for that instance type before instances are launched. For
	// example, if your maximum percentage is 20%, then the Spot price must be less
	// than 20% of the current On-Demand price for that Amazon EC2 instance. You always
	// pay the lowest (market) price and never more than your maximum percentage. If
	// you leave this field empty, the default value is 100% of the On-Demand price.
	// For most use cases, we recommend leaving this field empty. This parameter isn't
	// applicable to jobs that are running on Fargate resources. Don't specify it.
	BidPercentage *int32

	// The desired number of vCPUS in the compute environment. Batch modifies this
	// value between the minimum and maximum values based on job queue demand. This
	// parameter isn't applicable to jobs that are running on Fargate resources. Don't
	// specify it.
	DesiredvCpus *int32

	// Provides information that's used to select Amazon Machine Images (AMIs) for EC2
	// instances in the compute environment. If Ec2Configuration isn't specified, the
	// default is ECS_AL2 . One or two values can be provided. This parameter isn't
	// applicable to jobs that are running on Fargate resources. Don't specify it.
	Ec2Configuration []Ec2Configuration

	// The Amazon EC2 key pair that's used for instances launched in the compute
	// environment. You can use this key pair to log in to your instances with SSH.
	// This parameter isn't applicable to jobs that are running on Fargate resources.
	// Don't specify it.
	Ec2KeyPair *string

	// The Amazon Machine Image (AMI) ID used for instances launched in the compute
	// environment. This parameter is overridden by the imageIdOverride member of the
	// Ec2Configuration structure. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it. The AMI that you choose for a
	// compute environment must match the architecture of the instance types that you
	// intend to use for that compute environment. For example, if your compute
	// environment uses A1 instance types, the compute resource AMI that you choose
	// must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
	// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon
	// ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html)
	// in the Amazon Elastic Container Service Developer Guide.
	//
	// Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride
	// instead.
	ImageId *string

	// The Amazon ECS instance profile applied to Amazon EC2 instances in a compute
	// environment. You can specify the short name or full Amazon Resource Name (ARN)
	// of an instance profile. For example, ecsInstanceRole  or
	// arn:aws:iam:::instance-profile/ecsInstanceRole . For more information, see
	// Amazon ECS instance role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	InstanceRole *string

	// The instances types that can be launched. You can specify instance families to
	// launch any instance type within those families (for example, c5 or p3 ), or you
	// can specify specific sizes within a family (such as c5.8xlarge ). You can also
	// choose optimal to select instance types (from the C4, M4, and R4 instance
	// families) that match the demand of your job queues. This parameter isn't
	// applicable to jobs that are running on Fargate resources. Don't specify it. When
	// you create a compute environment, the instance types that you select for the
	// compute environment must share the same architecture. For example, you can't mix
	// x86 and ARM instances in the same compute environment. Currently, optimal uses
	// instance types from the C4, M4, and R4 instance families. In Regions that don't
	// have instance types from those instance families, instance types from the C5,
	// M5, and R5 instance families are used.
	InstanceTypes []string

	// The launch template to use for your compute resources. Any other compute
	// resource parameters that you specify in a CreateComputeEnvironment API
	// operation override the same parameters in the launch template. You must specify
	// either the launch template ID or launch template name in the request, but not
	// both. For more information, see Launch template support (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	LaunchTemplate *LaunchTemplateSpecification

	// The minimum number of vCPUs that a compute environment should maintain (even if
	// the compute environment is DISABLED ). This parameter isn't applicable to jobs
	// that are running on Fargate resources. Don't specify it.
	MinvCpus *int32

	// The Amazon EC2 placement group to associate with your compute resources. If you
	// intend to submit multi-node parallel jobs to your compute environment, you
	// should consider creating a cluster placement group and associate it with your
	// compute resources. This keeps your multi-node parallel job on a logical grouping
	// of instances within a single Availability Zone with high network flow potential.
	// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
	// in the Amazon EC2 User Guide for Linux Instances. This parameter isn't
	// applicable to jobs that are running on Fargate resources. Don't specify it.
	PlacementGroup *string

	// The Amazon EC2 security groups that are associated with instances launched in
	// the compute environment. One or more security groups must be specified, either
	// in securityGroupIds or using a launch template referenced in launchTemplate .
	// This parameter is required for jobs that are running on Fargate resources and
	// must contain at least one security group. Fargate doesn't support launch
	// templates. If security groups are specified using both securityGroupIds and
	// launchTemplate , the values in securityGroupIds are used.
	SecurityGroupIds []string

	// The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to
	// a SPOT compute environment. This role is required if the allocation strategy
	// set to BEST_FIT or if the allocation strategy isn't specified. For more
	// information, see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it. To tag your Spot Instances on
	// creation, the Spot Fleet IAM role specified here must use the newer
	// AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended
	// AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to
	// tag Spot Instances. For more information, see Spot instances not tagged on
	// creation (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag)
	// in the Batch User Guide.
	SpotIamFleetRole *string

	// Key-value pair tags to be applied to EC2 resources that are launched in the
	// compute environment. For Batch, these take the form of "String1": "String2" ,
	// where String1 is the tag key and String2 is the tag value-for example, {
	// "Name": "Batch Instance - C4OnDemand" } . This is helpful for recognizing your
	// Batch instances in the Amazon EC2 console. Updating these tags requires an
	// infrastructure update to the compute environment. For more information, see
	// Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. These tags aren't seen when using the Batch
	// ListTagsForResource API operation. This parameter isn't applicable to jobs that
	// are running on Fargate resources. Don't specify it.
	Tags map[string]string

	noSmithyDocumentSerde
}

// An object that represents the attributes of a compute environment that can be
// updated. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
// in the Batch User Guide.
type ComputeResourceUpdate struct {

	// The allocation strategy to use for the compute resource if there's not enough
	// instances of the best fitting instance type that can be allocated. This might be
	// because of availability of the instance type in the Region or Amazon EC2
	// service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
	// . For more information, see Allocation strategies (https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html)
	// in the Batch User Guide. When updating a compute environment, changing the
	// allocation strategy requires an infrastructure update of the compute
	// environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. BEST_FIT isn't supported when updating a compute
	// environment. This parameter isn't applicable to jobs that are running on Fargate
	// resources. Don't specify it. BEST_FIT_PROGRESSIVE Batch selects additional
	// instance types that are large enough to meet the requirements of the jobs in the
	// queue. Its preference is for instance types with lower cost vCPUs. If additional
	// instances of the previously selected instance types aren't available, Batch
	// selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more
	// instance types that are large enough to meet the requirements of the jobs in the
	// queue. Its preference is for instance types that are less likely to be
	// interrupted. This allocation strategy is only available for Spot Instance
	// compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity
	// optimized allocation strategy looks at both price and capacity to select the
	// Spot Instance pools that are the least likely to be interrupted and have the
	// lowest possible price. This allocation strategy is only available for Spot
	// Instance compute resources. With both BEST_FIT_PROGRESSIVE ,
	// SPOT_CAPACITY_OPTIMIZED , and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using
	// On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances,
	// Batch might need to exceed maxvCpus to meet your capacity requirements. In this
	// event, Batch never exceeds maxvCpus by more than a single instance.
	AllocationStrategy CRUpdateAllocationStrategy

	// The maximum percentage that a Spot Instance price can be when compared with the
	// On-Demand price for that instance type before instances are launched. For
	// example, if your maximum percentage is 20%, the Spot price must be less than 20%
	// of the current On-Demand price for that Amazon EC2 instance. You always pay the
	// lowest (market) price and never more than your maximum percentage. For most use
	// cases, we recommend leaving this field empty. When updating a compute
	// environment, changing the bid percentage requires an infrastructure update of
	// the compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	BidPercentage *int32

	// The desired number of vCPUS in the compute environment. Batch modifies this
	// value between the minimum and maximum values based on job queue demand. This
	// parameter isn't applicable to jobs that are running on Fargate resources. Don't
	// specify it. Batch doesn't support changing the desired number of vCPUs of an
	// existing compute environment. Don't specify this parameter for compute
	// environments using Amazon EKS clusters. When you update the desiredvCpus
	// setting, the value must be between the minvCpus and maxvCpus values.
	// Additionally, the updated desiredvCpus value must be greater than or equal to
	// the current desiredvCpus value. For more information, see Troubleshooting Batch (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#error-desired-vcpus-update)
	// in the Batch User Guide.
	DesiredvCpus *int32

	// Provides information used to select Amazon Machine Images (AMIs) for EC2
	// instances in the compute environment. If Ec2Configuration isn't specified, the
	// default is ECS_AL2 . When updating a compute environment, changing this setting
	// requires an infrastructure update of the compute environment. For more
	// information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. To remove the EC2 configuration and any custom AMI ID
	// specified in imageIdOverride , set this value to an empty string. One or two
	// values can be provided. This parameter isn't applicable to jobs that are running
	// on Fargate resources. Don't specify it.
	Ec2Configuration []Ec2Configuration

	// The Amazon EC2 key pair that's used for instances launched in the compute
	// environment. You can use this key pair to log in to your instances with SSH. To
	// remove the Amazon EC2 key pair, set this value to an empty string. When updating
	// a compute environment, changing the EC2 key pair requires an infrastructure
	// update of the compute environment. For more information, see Updating compute
	// environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	Ec2KeyPair *string

	// The Amazon Machine Image (AMI) ID used for instances launched in the compute
	// environment. This parameter is overridden by the imageIdOverride member of the
	// Ec2Configuration structure. To remove the custom AMI ID and use the default AMI
	// ID, set this value to an empty string. When updating a compute environment,
	// changing the AMI ID requires an infrastructure update of the compute
	// environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it. The AMI that you choose for a
	// compute environment must match the architecture of the instance types that you
	// intend to use for that compute environment. For example, if your compute
	// environment uses A1 instance types, the compute resource AMI that you choose
	// must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
	// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon
	// ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html)
	// in the Amazon Elastic Container Service Developer Guide.
	ImageId *string

	// The Amazon ECS instance profile applied to Amazon EC2 instances in a compute
	// environment. You can specify the short name or full Amazon Resource Name (ARN)
	// of an instance profile. For example, ecsInstanceRole  or
	// arn:aws:iam:::instance-profile/ecsInstanceRole . For more information, see
	// Amazon ECS instance role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html)
	// in the Batch User Guide. When updating a compute environment, changing this
	// setting requires an infrastructure update of the compute environment. For more
	// information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	InstanceRole *string

	// The instances types that can be launched. You can specify instance families to
	// launch any instance type within those families (for example, c5 or p3 ), or you
	// can specify specific sizes within a family (such as c5.8xlarge ). You can also
	// choose optimal to select instance types (from the C4, M4, and R4 instance
	// families) that match the demand of your job queues. When updating a compute
	// environment, changing this setting requires an infrastructure update of the
	// compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it. When you create a compute
	// environment, the instance types that you select for the compute environment must
	// share the same architecture. For example, you can't mix x86 and ARM instances in
	// the same compute environment. Currently, optimal uses instance types from the
	// C4, M4, and R4 instance families. In Regions that don't have instance types from
	// those instance families, instance types from the C5, M5, and R5 instance
	// families are used.
	InstanceTypes []string

	// The updated launch template to use for your compute resources. You must specify
	// either the launch template ID or launch template name in the request, but not
	// both. For more information, see Launch template support (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html)
	// in the Batch User Guide. To remove the custom launch template and use the
	// default launch template, set launchTemplateId or launchTemplateName member of
	// the launch template specification to an empty string. Removing the launch
	// template from a compute environment will not remove the AMI specified in the
	// launch template. In order to update the AMI specified in a launch template, the
	// updateToLatestImageVersion parameter must be set to true . When updating a
	// compute environment, changing the launch template requires an infrastructure
	// update of the compute environment. For more information, see Updating compute
	// environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	LaunchTemplate *LaunchTemplateSpecification

	// The maximum number of Amazon EC2 vCPUs that an environment can reach. With
	// BEST_FIT_PROGRESSIVE , SPOT_CAPACITY_OPTIMIZED , and
	// SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot
	// Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to
	// exceed maxvCpus to meet your capacity requirements. In this event, Batch never
	// exceeds maxvCpus by more than a single instance. That is, no more than a single
	// instance from among those specified in your compute environment.
	MaxvCpus *int32

	// The minimum number of vCPUs that an environment should maintain (even if the
	// compute environment is DISABLED ). This parameter isn't applicable to jobs that
	// are running on Fargate resources. Don't specify it.
	MinvCpus *int32

	// The Amazon EC2 placement group to associate with your compute resources. If you
	// intend to submit multi-node parallel jobs to your compute environment, you
	// should consider creating a cluster placement group and associate it with your
	// compute resources. This keeps your multi-node parallel job on a logical grouping
	// of instances within a single Availability Zone with high network flow potential.
	// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
	// in the Amazon EC2 User Guide for Linux Instances. When updating a compute
	// environment, changing the placement group requires an infrastructure update of
	// the compute environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	PlacementGroup *string

	// The Amazon EC2 security groups that are associated with instances launched in
	// the compute environment. This parameter is required for Fargate compute
	// resources, where it can contain up to 5 security groups. For Fargate compute
	// resources, providing an empty list is handled as if this parameter wasn't
	// specified and no change is made. For EC2 compute resources, providing an empty
	// list removes the security groups from the compute resource. When updating a
	// compute environment, changing the EC2 security groups requires an infrastructure
	// update of the compute environment. For more information, see Updating compute
	// environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide.
	SecurityGroupIds []string

	// The VPC subnets where the compute resources are launched. Fargate compute
	// resources can contain up to 16 subnets. For Fargate compute resources, providing
	// an empty list will be handled as if this parameter wasn't specified and no
	// change is made. For EC2 compute resources, providing an empty list removes the
	// VPC subnets from the compute resource. For more information, see VPCs and
	// subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in
	// the Amazon VPC User Guide. When updating a compute environment, changing the VPC
	// subnets requires an infrastructure update of the compute environment. For more
	// information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. Batch on Amazon EC2 and Batch on Amazon EKS support
	// Local Zones. For more information, see Local Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones)
	// in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web
	// Services Local Zones (https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html)
	// in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength
	// Zones, and Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones)
	// in the Amazon ECS Developer Guide. Batch on Fargate doesn't currently support
	// Local Zones.
	Subnets []string

	// Key-value pair tags to be applied to EC2 resources that are launched in the
	// compute environment. For Batch, these take the form of "String1": "String2" ,
	// where String1 is the tag key and String2 is the tag value-for example, {
	// "Name": "Batch Instance - C4OnDemand" } . This is helpful for recognizing your
	// Batch instances in the Amazon EC2 console. These tags aren't seen when using the
	// Batch ListTagsForResource API operation. When updating a compute environment,
	// changing this setting requires an infrastructure update of the compute
	// environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. This parameter isn't applicable to jobs that are
	// running on Fargate resources. Don't specify it.
	Tags map[string]string

	// The type of compute environment: EC2 , SPOT , FARGATE , or FARGATE_SPOT . For
	// more information, see Compute environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
	// in the Batch User Guide. If you choose SPOT , you must also specify an Amazon
	// EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information,
	// see Amazon EC2 spot fleet role (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html)
	// in the Batch User Guide. When updating a compute environment, changing the type
	// of a compute environment requires an infrastructure update of the compute
	// environment. For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide.
	Type CRType

	// Specifies whether the AMI ID is updated to the latest one that's supported by
	// Batch when the compute environment has an infrastructure update. The default
	// value is false . An AMI ID can either be specified in the imageId or
	// imageIdOverride parameters or be determined by the launch template that's
	// specified in the launchTemplate parameter. If an AMI ID is specified any of
	// these ways, this parameter is ignored. For more information about to update AMI
	// IDs during an infrastructure update, see Updating the AMI ID (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html#updating-compute-environments-ami)
	// in the Batch User Guide. When updating a compute environment, changing this
	// setting requires an infrastructure update of the compute environment. For more
	// information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide.
	UpdateToLatestImageVersion *bool

	noSmithyDocumentSerde
}

// An object that represents the details of a container that's part of a job.
type ContainerDetail struct {

	// The command that's passed to the container.
	Command []string

	// The Amazon Resource Name (ARN) of the container instance that the container is
	// running on.
	ContainerInstanceArn *string

	// The environment variables to pass to a container. Environment variables cannot
	// start with " AWS_BATCH ". This naming convention is reserved for variables that
	// Batch sets.
	Environment []KeyValuePair

	// The amount of ephemeral storage allocated for the task. This parameter is used
	// to expand the total amount of ephemeral storage available, beyond the default
	// amount, for tasks hosted on Fargate.
	EphemeralStorage *EphemeralStorage

	// The Amazon Resource Name (ARN) of the execution role that Batch can assume. For
	// more information, see Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html)
	// in the Batch User Guide.
	ExecutionRoleArn *string

	// The exit code to return upon completion.
	ExitCode *int32

	// The platform configuration for jobs that are running on Fargate resources. Jobs
	// that are running on EC2 resources must not specify this parameter.
	FargatePlatformConfiguration *FargatePlatformConfiguration

	// The image used to start the container.
	Image *string

	// The instance type of the underlying host infrastructure of a multi-node
	// parallel job. This parameter isn't applicable to jobs that are running on
	// Fargate resources.
	InstanceType *string

	// The Amazon Resource Name (ARN) that's associated with the job when run.
	JobRoleArn *string

	// Linux-specific modifications that are applied to the container, such as details
	// for device mappings.
	LinuxParameters *LinuxParameters

	// The log configuration specification for the container. This parameter maps to
	// LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/)
	// . By default, containers use the same logging driver that the Docker daemon
	// uses. However, the container might use a different logging driver than the
	// Docker daemon by specifying a log driver with this parameter in the container
	// definition. To use a different logging driver for a container, the log system
	// must be configured properly on the container instance. Or, alternatively, it
	// must be configured on a different log server for remote logging options. For
	// more information on the options for different supported log drivers, see
	// Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/)
	// in the Docker documentation. Batch currently supports a subset of the logging
	// drivers available to the Docker daemon (shown in the LogConfiguration data
	// type). Additional log drivers might be available in future releases of the
	// Amazon ECS container agent. This parameter requires version 1.18 of the Docker
	// Remote API or greater on your container instance. To check the Docker Remote API
	// version on your container instance, log in to your container instance and run
	// the following command: sudo docker version | grep "Server API version" The
	// Amazon ECS container agent running on a container instance must register the
	// logging drivers available on that instance with the
	// ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on
	// that instance can use these log configuration options. For more information, see
	// Amazon ECS container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html)
	// in the Amazon Elastic Container Service Developer Guide.
	LogConfiguration *LogConfiguration

	// The name of the Amazon CloudWatch Logs log stream that's associated with the
	// container. The log group for Batch jobs is /aws/batch/job . Each container
	// attempt receives a log stream name when they reach the RUNNING status.
	LogStreamName *string

	// For jobs running on EC2 resources that didn't specify memory requirements using
	// resourceRequirements , the number of MiB of memory reserved for the job. For
	// other jobs, including all run on Fargate resources, see resourceRequirements .
	Memory *int32

	// The mount points for data volumes in your container.
	MountPoints []MountPoint

	// The network configuration for jobs that are running on Fargate resources. Jobs
	// that are running on EC2 resources must not specify this parameter.
	NetworkConfiguration *NetworkConfiguration

	// The network interfaces that are associated with the job.
	NetworkInterfaces []NetworkInterface

	// When this parameter is true, the container is given elevated permissions on the
	// host container instance (similar to the root user). The default value is false .
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided, or specified as false .
	Privileged *bool

	// When this parameter is true, the container is given read-only access to its
	// root file system. This parameter maps to ReadonlyRootfs in the Create a
	// container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section
	// of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
	// --read-only option to docker run (https://docs.docker.com/engine/reference/commandline/run/)
	// .
	ReadonlyRootFilesystem *bool

	// A short (255 max characters) human-readable string to provide additional
	// details for a running or stopped container.
	Reason *string

	// The type and amount of resources to assign to a container. The supported
	// resources include GPU , MEMORY , and VCPU .
	ResourceRequirements []ResourceRequirement

	// An object that represents the compute environment architecture for Batch jobs
	// on Fargate.
	RuntimePlatform *RuntimePlatform

	// The secrets to pass to the container. For more information, see Specifying
	// sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
	// in the Batch User Guide.
	Secrets []Secret

	// The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with
	// the container job. Each container attempt receives a task ARN when they reach
	// the STARTING status.
	TaskArn *string

	// A list of ulimit values to set in the container. This parameter maps to Ulimits
	// in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter isn't applicable to jobs that are running on Fargate resources.
	Ulimits []Ulimit

	// The user name to use inside the container. This parameter maps to User in the
	// Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --user option to docker run (https://docs.docker.com/engine/reference/run/)
	// .
	User *string

	// The number of vCPUs reserved for the container. For jobs that run on EC2
	// resources, you can specify the vCPU requirement for the job using
	// resourceRequirements , but you can't specify the vCPU requirements in both the
	// vcpus and resourceRequirements object. This parameter maps to CpuShares in the
	// Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/)
	// . Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one
	// vCPU. This is required but can be specified in several places. It must be
	// specified for each node at least once. This parameter isn't applicable to jobs
	// that run on Fargate resources. For jobs that run on Fargate resources, you must
	// specify the vCPU requirement for the job using resourceRequirements .
	Vcpus *int32

	// A list of volumes that are associated with the job.
	Volumes []Volume

	noSmithyDocumentSerde
}

// The overrides that should be sent to a container. For information about using
// Batch overrides when you connect event sources to targets, see
// BatchContainerOverrides (https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchContainerOverrides.html)
// .
type ContainerOverrides struct {

	// The command to send to the container that overrides the default command from
	// the Docker image or the job definition. This parameter can't contain an empty
	// string.
	Command []string

	// The environment variables to send to the container. You can add new environment
	// variables, which are added to the container at launch, or you can override the
	// existing environment variables from the Docker image or the job definition.
	// Environment variables cannot start with " AWS_BATCH ". This naming convention is
	// reserved for variables that Batch sets.
	Environment []KeyValuePair

	// The instance type to use for a multi-node parallel job. This parameter isn't
	// applicable to single-node container jobs or jobs that run on Fargate resources,
	// and shouldn't be provided.
	InstanceType *string

	// This parameter is deprecated, use resourceRequirements to override the memory
	// requirements specified in the job definition. It's not supported for jobs
	// running on Fargate resources. For jobs that run on EC2 resources, it overrides
	// the memory parameter set in the job definition, but doesn't override any memory
	// requirement that's specified in the resourceRequirements structure in the job
	// definition. To override memory requirements that are specified in the
	// resourceRequirements structure in the job definition, resourceRequirements must
	// be specified in the SubmitJob request, with type set to MEMORY and value set to
	// the new value. For more information, see Can't override job definition resource
	// requirements (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements)
	// in the Batch User Guide.
	//
	// Deprecated: This field is deprecated, use resourceRequirements instead.
	Memory *int32

	// The type and amount of resources to assign to a container. This overrides the
	// settings in the job definition. The supported resources include GPU , MEMORY ,
	// and VCPU .
	ResourceRequirements []ResourceRequirement

	// This parameter is deprecated, use resourceRequirements to override the vcpus
	// parameter that's set in the job definition. It's not supported for jobs running
	// on Fargate resources. For jobs that run on EC2 resources, it overrides the vcpus
	// parameter set in the job definition, but doesn't override any vCPU requirement
	// specified in the resourceRequirements structure in the job definition. To
	// override vCPU requirements that are specified in the resourceRequirements
	// structure in the job definition, resourceRequirements must be specified in the
	// SubmitJob request, with type set to VCPU and value set to the new value. For
	// more information, see Can't override job definition resource requirements (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements)
	// in the Batch User Guide.
	//
	// Deprecated: This field is deprecated, use resourceRequirements instead.
	Vcpus *int32

	noSmithyDocumentSerde
}

// Container properties are used for Amazon ECS based job definitions. These
// properties to describe the container that's launched as part of a job.
type ContainerProperties struct {

	// The command that's passed to the container. This parameter maps to Cmd in the
	// Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/)
	// . For more information, see
	// https://docs.docker.com/engine/reference/builder/#cmd (https://docs.docker.com/engine/reference/builder/#cmd)
	// .
	Command []string

	// The environment variables to pass to a container. This parameter maps to Env in
	// the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --env option to docker run (https://docs.docker.com/engine/reference/run/)
	// . We don't recommend using plaintext environment variables for sensitive
	// information, such as credential data. Environment variables cannot start with "
	// AWS_BATCH ". This naming convention is reserved for variables that Batch sets.
	Environment []KeyValuePair

	// The amount of ephemeral storage to allocate for the task. This parameter is
	// used to expand the total amount of ephemeral storage available, beyond the
	// default amount, for tasks hosted on Fargate.
	EphemeralStorage *EphemeralStorage

	// The Amazon Resource Name (ARN) of the execution role that Batch can assume. For
	// jobs that run on Fargate resources, you must provide an execution role. For more
	// information, see Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html)
	// in the Batch User Guide.
	ExecutionRoleArn *string

	// The platform configuration for jobs that are running on Fargate resources. Jobs
	// that are running on EC2 resources must not specify this parameter.
	FargatePlatformConfiguration *FargatePlatformConfiguration

	// The image used to start a container. This string is passed directly to the
	// Docker daemon. Images in the Docker Hub registry are available by default. Other
	// repositories are specified with repository-url/image:tag . It can be 255
	// characters long. It can contain uppercase and lowercase letters, numbers,
	// hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and
	// number signs (#). This parameter maps to Image in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/)
	// . Docker image architecture must match the processor architecture of the compute
	// resources that they're scheduled on. For example, ARM-based Docker images can
	// only run on ARM-based compute resources.
	//   - Images in Amazon ECR Public repositories use the full
	//   registry/repository[:tag] or registry/repository[@digest] naming conventions.
	//   For example, public.ecr.aws/registry_alias/my-web-app:latest .
	//   - Images in Amazon ECR repositories use the full registry and repository URI
	//   (for example, 123456789012.dkr.ecr..amazonaws.com/ ).
	//   - Images in official repositories on Docker Hub use a single name (for
	//   example, ubuntu or mongo ).
	//   - Images in other repositories on Docker Hub are qualified with an
	//   organization name (for example, amazon/amazon-ecs-agent ).
	//   - Images in other online repositories are qualified further by a domain name
	//   (for example, quay.io/assemblyline/ubuntu ).
	Image *string

	// The instance type to use for a multi-node parallel job. All node groups in a
	// multi-node parallel job must use the same instance type. This parameter isn't
	// applicable to single-node container jobs or jobs that run on Fargate resources,
	// and shouldn't be provided.
	InstanceType *string

	// The Amazon Resource Name (ARN) of the IAM role that the container can assume
	// for Amazon Web Services permissions. For more information, see IAM roles for
	// tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)
	// in the Amazon Elastic Container Service Developer Guide.
	JobRoleArn *string

	// Linux-specific modifications that are applied to the container, such as details
	// for device mappings.
	LinuxParameters *LinuxParameters

	// The log configuration specification for the container. This parameter maps to
	// LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/)
	// . By default, containers use the same logging driver that the Docker daemon
	// uses. However the container might use a different logging driver than the Docker
	// daemon by specifying a log driver with this parameter in the container
	// definition. To use a different logging driver for a container, the log system
	// must be configured properly on the container instance (or on a different log
	// server for remote logging options). For more information on the options for
	// different supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/)
	// in the Docker documentation. Batch currently supports a subset of the logging
	// drivers available to the Docker daemon (shown in the LogConfiguration data
	// type). This parameter requires version 1.18 of the Docker Remote API or greater
	// on your container instance. To check the Docker Remote API version on your
	// container instance, log in to your container instance and run the following
	// command: sudo docker version | grep "Server API version" The Amazon ECS
	// container agent running on a container instance must register the logging
	// drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
	// environment variable before containers placed on that instance can use these log
	// configuration options. For more information, see Amazon ECS container agent
	// configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html)
	// in the Amazon Elastic Container Service Developer Guide.
	LogConfiguration *LogConfiguration

	// This parameter is deprecated, use resourceRequirements to specify the memory
	// requirements for the job definition. It's not supported for jobs running on
	// Fargate resources. For jobs that run on EC2 resources, it specifies the memory
	// hard limit (in MiB) for a container. If your container attempts to exceed the
	// specified number, it's terminated. You must specify at least 4 MiB of memory for
	// a job using this parameter. The memory hard limit can be specified in several
	// places. It must be specified for each node at least once.
	//
	// Deprecated: This field is deprecated, use resourceRequirements instead.
	Memory *int32

	// The mount points for data volumes in your container. This parameter maps to
	// Volumes in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --volume option to docker run (https://docs.docker.com/engine/reference/run/)
	// .
	MountPoints []MountPoint

	// The network configuration for jobs that are running on Fargate resources. Jobs
	// that are running on EC2 resources must not specify this parameter.
	NetworkConfiguration *NetworkConfiguration

	// When this parameter is true, the container is given elevated permissions on the
	// host container instance (similar to the root user). This parameter maps to
	// Privileged in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/)
	// . The default value is false. This parameter isn't applicable to jobs that are
	// running on Fargate resources and shouldn't be provided, or specified as false.
	Privileged *bool

	// When this parameter is true, the container is given read-only access to its
	// root file system. This parameter maps to ReadonlyRootfs in the Create a
	// container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section
	// of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
	// --read-only option to docker run .
	ReadonlyRootFilesystem *bool

	// The type and amount of resources to assign to a container. The supported
	// resources include GPU , MEMORY , and VCPU .
	ResourceRequirements []ResourceRequirement

	// An object that represents the compute environment architecture for Batch jobs
	// on Fargate.
	RuntimePlatform *RuntimePlatform

	// The secrets for the container. For more information, see Specifying sensitive
	// data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
	// in the Batch User Guide.
	Secrets []Secret

	// A list of ulimits to set in the container. This parameter maps to Ulimits in
	// the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Ulimits []Ulimit

	// The user name to use inside the container. This parameter maps to User in the
	// Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --user option to docker run (https://docs.docker.com/engine/reference/run/)
	// .
	User *string

	// This parameter is deprecated, use resourceRequirements to specify the vCPU
	// requirements for the job definition. It's not supported for jobs running on
	// Fargate resources. For jobs running on EC2 resources, it specifies the number of
	// vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This
	// parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/)
	// . The number of vCPUs must be specified but can be specified in several places.
	// You must specify it at least once for each node.
	//
	// Deprecated: This field is deprecated, use resourceRequirements instead.
	Vcpus *int32

	// A list of data volumes used in a job.
	Volumes []Volume

	noSmithyDocumentSerde
}

// An object that represents summary details of a container within a job.
type ContainerSummary struct {

	// The exit code to return upon completion.
	ExitCode *int32

	// A short (255 max characters) human-readable string to provide additional
	// details for a running or stopped container.
	Reason *string

	noSmithyDocumentSerde
}

// An object that represents a container instance host device. This object isn't
// applicable to jobs that are running on Fargate resources and shouldn't be
// provided.
type Device struct {

	// The path for the device on the host container instance.
	//
	// This member is required.
	HostPath *string

	// The path inside the container that's used to expose the host device. By
	// default, the hostPath value is used.
	ContainerPath *string

	// The explicit permissions to provide to the container for the device. By
	// default, the container has permissions for read , write , and mknod for the
	// device.
	Permissions []DeviceCgroupPermission

	noSmithyDocumentSerde
}

// Provides information used to select Amazon Machine Images (AMIs) for instances
// in the compute environment. If Ec2Configuration isn't specified, the default is
// ECS_AL2 ( Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)
// ). This object isn't applicable to jobs that are running on Fargate resources.
type Ec2Configuration struct {

	// The image type to match with the instance type to select an AMI. The supported
	// values are different for ECS and EKS resources. ECS If the imageIdOverride
	// parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)
	// ( ECS_AL2 ) is used. If a new image type is specified in an update, but neither
	// an imageId nor a imageIdOverride parameter is specified, then the latest Amazon
	// ECS optimized AMI for that image type that's supported by Batch is used. ECS_AL2
	// Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)
	// : Default for all non-GPU instance families. ECS_AL2_NVIDIA Amazon Linux 2 (GPU) (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami)
	// : Default for all GPU instance families (for example P4 and G4 ) and can be used
	// for all non Amazon Web Services Graviton-based instance types. ECS_AL1 Amazon
	// Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)
	// . Amazon Linux has reached the end-of-life of standard support. For more
	// information, see Amazon Linux AMI (http://aws.amazon.com/amazon-linux-ami/) .
	// EKS If the imageIdOverride parameter isn't specified, then a recent Amazon
	// EKS-optimized Amazon Linux AMI (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html)
	// ( EKS_AL2 ) is used. If a new image type is specified in an update, but neither
	// an imageId nor a imageIdOverride parameter is specified, then the latest Amazon
	// EKS optimized AMI for that image type that Batch supports is used. EKS_AL2
	// Amazon Linux 2 (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html)
	// : Default for all non-GPU instance families. EKS_AL2_NVIDIA Amazon Linux 2
	// (accelerated) (https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html)
	// : Default for all GPU instance families (for example, P4 and G4 ) and can be
	// used for all non Amazon Web Services Graviton-based instance types.
	//
	// This member is required.
	ImageType *string

	// The AMI ID used for instances launched in the compute environment that match
	// the image type. This setting overrides the imageId set in the computeResource
	// object. The AMI that you choose for a compute environment must match the
	// architecture of the instance types that you intend to use for that compute
	// environment. For example, if your compute environment uses A1 instance types,
	// the compute resource AMI that you choose must support ARM instances. Amazon ECS
	// vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI.
	// For more information, see Amazon ECS-optimized Amazon Linux 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html)
	// in the Amazon Elastic Container Service Developer Guide.
	ImageIdOverride *string

	// The Kubernetes version for the compute environment. If you don't specify a
	// value, the latest version that Batch supports is used.
	ImageKubernetesVersion *string

	noSmithyDocumentSerde
}

// The authorization configuration details for the Amazon EFS file system.
type EFSAuthorizationConfig struct {

	// The Amazon EFS access point ID to use. If an access point is specified, the
	// root directory value specified in the EFSVolumeConfiguration must either be
	// omitted or set to / which enforces the path set on the EFS access point. If an
	// access point is used, transit encryption must be enabled in the
	// EFSVolumeConfiguration . For more information, see Working with Amazon EFS
	// access points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html)
	// in the Amazon Elastic File System User Guide.
	AccessPointId *string

	// Whether or not to use the Batch job IAM role defined in a job definition when
	// mounting the Amazon EFS file system. If enabled, transit encryption must be
	// enabled in the EFSVolumeConfiguration . If this parameter is omitted, the
	// default value of DISABLED is used. For more information, see Using Amazon EFS
	// access points (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints)
	// in the Batch User Guide. EFS IAM authorization requires that TransitEncryption
	// be ENABLED and that a JobRoleArn is specified.
	Iam EFSAuthorizationConfigIAM

	noSmithyDocumentSerde
}

// This is used when you're using an Amazon Elastic File System file system for
// job storage. For more information, see Amazon EFS Volumes (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html)
// in the Batch User Guide.
type EFSVolumeConfiguration struct {

	// The Amazon EFS file system ID to use.
	//
	// This member is required.
	FileSystemId *string

	// The authorization configuration details for the Amazon EFS file system.
	AuthorizationConfig *EFSAuthorizationConfig

	// The directory within the Amazon EFS file system to mount as the root directory
	// inside the host. If this parameter is omitted, the root of the Amazon EFS volume
	// is used instead. Specifying / has the same effect as omitting this parameter.
	// The maximum length is 4,096 characters. If an EFS access point is specified in
	// the authorizationConfig , the root directory parameter must either be omitted or
	// set to / , which enforces the path set on the Amazon EFS access point.
	RootDirectory *string

	// Determines whether to enable encryption for Amazon EFS data in transit between
	// the Amazon ECS host and the Amazon EFS server. Transit encryption must be
	// enabled if Amazon EFS IAM authorization is used. If this parameter is omitted,
	// the default value of DISABLED is used. For more information, see Encrypting
	// data in transit (https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html)
	// in the Amazon Elastic File System User Guide.
	TransitEncryption EFSTransitEncryption

	// The port to use when sending encrypted data between the Amazon ECS host and the
	// Amazon EFS server. If you don't specify a transit encryption port, it uses the
	// port selection strategy that the Amazon EFS mount helper uses. The value must be
	// between 0 and 65,535. For more information, see EFS mount helper (https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html)
	// in the Amazon Elastic File System User Guide.
	TransitEncryptionPort *int32

	noSmithyDocumentSerde
}

// An object that represents the details for an attempt for a job attempt that an
// Amazon EKS container runs.
type EksAttemptContainerDetail struct {

	// The exit code for the job attempt. A non-zero exit code is considered failed.
	ExitCode *int32

	// A short (255 max characters) human-readable string to provide additional
	// details for a running or stopped container.
	Reason *string

	noSmithyDocumentSerde
}

// An object that represents the details of a job attempt for a job attempt by an
// Amazon EKS container.
type EksAttemptDetail struct {

	// The details for the final status of the containers for this job attempt.
	Containers []EksAttemptContainerDetail

	// The name of the node for this job attempt.
	NodeName *string

	// The name of the pod for this job attempt.
	PodName *string

	// The Unix timestamp (in milliseconds) for when the attempt was started (when the
	// attempt transitioned from the STARTING state to the RUNNING state).
	StartedAt *int64

	// A short, human-readable string to provide additional details for the current
	// status of the job attempt.
	StatusReason *string

	// The Unix timestamp (in milliseconds) for when the attempt was stopped. This
	// happens when the attempt transitioned from the RUNNING state to a terminal
	// state, such as SUCCEEDED or FAILED .
	StoppedAt *int64

	noSmithyDocumentSerde
}

// Configuration for the Amazon EKS cluster that supports the Batch compute
// environment. The cluster must exist before the compute environment can be
// created.
type EksConfiguration struct {

	// The Amazon Resource Name (ARN) of the Amazon EKS cluster. An example is
	// arn:aws:eks:us-east-1:123456789012:cluster/ClusterForBatch .
	//
	// This member is required.
	EksClusterArn *string

	// The namespace of the Amazon EKS cluster. Batch manages pods in this namespace.
	// The value can't left empty or null. It must be fewer than 64 characters long,
	// can't be set to default , can't start with " kube- ," and must match this
	// regular expression: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ . For more information, see
	// Namespaces (https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/)
	// in the Kubernetes documentation.
	//
	// This member is required.
	KubernetesNamespace *string

	noSmithyDocumentSerde
}

// EKS container properties are used in job definitions for Amazon EKS based job
// definitions to describe the properties for a container node in the pod that's
// launched as part of a job. This can't be specified for Amazon ECS based job
// definitions.
type EksContainer struct {

	// The Docker image used to start the container.
	//
	// This member is required.
	Image *string

	// An array of arguments to the entrypoint. If this isn't specified, the CMD of
	// the container image is used. This corresponds to the args member in the
	// Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)
	// portion of the Pod (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/)
	// in Kubernetes. Environment variable references are expanded using the
	// container's environment. If the referenced environment variable doesn't exist,
	// the reference in the command isn't changed. For example, if the reference is to
	// " $(NAME1) " and the NAME1 environment variable doesn't exist, the command
	// string will remain " $(NAME1) ." $$ is replaced with $ , and the resulting
	// string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME)
	// whether or not the VAR_NAME environment variable exists. For more information,
	// see CMD (https://docs.docker.com/engine/reference/builder/#cmd) in the
	// Dockerfile reference and Define a command and arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/)
	// in the Kubernetes documentation.
	Args []string

	// The entrypoint for the container. This isn't run within a shell. If this isn't
	// specified, the ENTRYPOINT of the container image is used. Environment variable
	// references are expanded using the container's environment. If the referenced
	// environment variable doesn't exist, the reference in the command isn't changed.
	// For example, if the reference is to " $(NAME1) " and the NAME1 environment
	// variable doesn't exist, the command string will remain " $(NAME1) ." $$ is
	// replaced with $ and the resulting string isn't expanded. For example,
	// $$(VAR_NAME) will be passed as $(VAR_NAME) whether or not the VAR_NAME
	// environment variable exists. The entrypoint can't be updated. For more
	// information, see ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
	// in the Dockerfile reference and Define a command and arguments for a container (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/)
	// and Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)
	// in the Kubernetes documentation.
	Command []string

	// The environment variables to pass to a container. Environment variables cannot
	// start with " AWS_BATCH ". This naming convention is reserved for variables that
	// Batch sets.
	Env []EksContainerEnvironmentVariable

	// The image pull policy for the container. Supported values are Always ,
	// IfNotPresent , and Never . This parameter defaults to IfNotPresent . However, if
	// the :latest tag is specified, it defaults to Always . For more information, see
	// Updating images (https://kubernetes.io/docs/concepts/containers/images/#updating-images)
	// in the Kubernetes documentation.
	ImagePullPolicy *string

	// The name of the container. If the name isn't specified, the default name "
	// Default " is used. Each container in a pod must have a unique name.
	Name *string

	// The type and amount of resources to assign to a container. The supported
	// resources include memory , cpu , and nvidia.com/gpu . For more information, see
	// Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
	// in the Kubernetes documentation.
	Resources *EksContainerResourceRequirements

	// The security context for a job. For more information, see Configure a security
	// context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
	// in the Kubernetes documentation.
	SecurityContext *EksContainerSecurityContext

	// The volume mounts for the container. Batch supports emptyDir , hostPath , and
	// secret volume types. For more information about volumes and volume mounts in
	// Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/)
	// in the Kubernetes documentation.
	VolumeMounts []EksContainerVolumeMount

	noSmithyDocumentSerde
}

// The details for container properties that are returned by DescribeJobs for jobs
// that use Amazon EKS.
type EksContainerDetail struct {

	// An array of arguments to the entrypoint. If this isn't specified, the CMD of
	// the container image is used. This corresponds to the args member in the
	// Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)
	// portion of the Pod (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/)
	// in Kubernetes. Environment variable references are expanded using the
	// container's environment. If the referenced environment variable doesn't exist,
	// the reference in the command isn't changed. For example, if the reference is to
	// " $(NAME1) " and the NAME1 environment variable doesn't exist, the command
	// string will remain " $(NAME1) ". $$ is replaced with $ and the resulting string
	// isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or
	// not the VAR_NAME environment variable exists. For more information, see CMD (https://docs.docker.com/engine/reference/builder/#cmd)
	// in the Dockerfile reference and Define a command and arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/)
	// in the Kubernetes documentation.
	Args []string

	// The entrypoint for the container. For more information, see Entrypoint (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)
	// in the Kubernetes documentation.
	Command []string

	// The environment variables to pass to a container. Environment variables cannot
	// start with " AWS_BATCH ". This naming convention is reserved for variables that
	// Batch sets.
	Env []EksContainerEnvironmentVariable

	// The exit code for the job attempt. A non-zero exit code is considered failed.
	ExitCode *int32

	// The Docker image used to start the container.
	Image *string

	// The image pull policy for the container. Supported values are Always ,
	// IfNotPresent , and Never . This parameter defaults to Always if the :latest tag
	// is specified, IfNotPresent otherwise. For more information, see Updating images (https://kubernetes.io/docs/concepts/containers/images/#updating-images)
	// in the Kubernetes documentation.
	ImagePullPolicy *string

	// The name of the container. If the name isn't specified, the default name "
	// Default " is used. Each container in a pod must have a unique name.
	Name *string

	// A short human-readable string to provide additional details for a running or
	// stopped container. It can be up to 255 characters long.
	Reason *string

	// The type and amount of resources to assign to a container. The supported
	// resources include memory , cpu , and nvidia.com/gpu . For more information, see
	// Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
	// in the Kubernetes documentation.
	Resources *EksContainerResourceRequirements

	// The security context for a job. For more information, see Configure a security
	// context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
	// in the Kubernetes documentation.
	SecurityContext *EksContainerSecurityContext

	// The volume mounts for the container. Batch supports emptyDir , hostPath , and
	// secret volume types. For more information about volumes and volume mounts in
	// Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/)
	// in the Kubernetes documentation.
	VolumeMounts []EksContainerVolumeMount

	noSmithyDocumentSerde
}

// An environment variable.
type EksContainerEnvironmentVariable struct {

	// The name of the environment variable.
	//
	// This member is required.
	Name *string

	// The value of the environment variable.
	Value *string

	noSmithyDocumentSerde
}

// Object representing any Kubernetes overrides to a job definition that's used in
// a SubmitJob API operation.
type EksContainerOverride struct {

	// The arguments to the entrypoint to send to the container that overrides the
	// default arguments from the Docker image or the job definition. For more
	// information, see CMD (https://docs.docker.com/engine/reference/builder/#cmd) in
	// the Dockerfile reference and Define a command an arguments for a pod (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/)
	// in the Kubernetes documentation.
	Args []string

	// The command to send to the container that overrides the default command from
	// the Docker image or the job definition.
	Command []string

	// The environment variables to send to the container. You can add new environment
	// variables, which are added to the container at launch. Or, you can override the
	// existing environment variables from the Docker image or the job definition.
	// Environment variables cannot start with " AWS_BATCH ". This naming convention is
	// reserved for variables that Batch sets.
	Env []EksContainerEnvironmentVariable

	// The override of the Docker image that's used to start the container.
	Image *string

	// The type and amount of resources to assign to a container. These override the
	// settings in the job definition. The supported resources include memory , cpu ,
	// and nvidia.com/gpu . For more information, see Resource management for pods and
	// containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
	// in the Kubernetes documentation.
	Resources *EksContainerResourceRequirements

	noSmithyDocumentSerde
}

// The type and amount of resources to assign to a container. The supported
// resources include memory , cpu , and nvidia.com/gpu . For more information, see
// Resource management for pods and containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
// in the Kubernetes documentation.
type EksContainerResourceRequirements struct {

	// The type and quantity of the resources to reserve for the container. The values
	// vary based on the name that's specified. Resources can be requested using
	// either the limits or the requests objects. memory The memory hard limit (in
	// MiB) for the container, using whole integers, with a "Mi" suffix. If your
	// container attempts to exceed the memory specified, the container is terminated.
	// You must specify at least 4 MiB of memory for a job. memory can be specified in
	// limits , requests , or both. If memory is specified in both places, then the
	// value that's specified in limits must be equal to the value that's specified in
	// requests . To maximize your resource utilization, provide your jobs with as much
	// memory as possible for the specific instance type that you are using. To learn
	// how, see Memory management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html)
	// in the Batch User Guide. cpu The number of CPUs that's reserved for the
	// container. Values must be an even multiple of 0.25 . cpu can be specified in
	// limits , requests , or both. If cpu is specified in both places, then the value
	// that's specified in limits must be at least as large as the value that's
	// specified in requests . nvidia.com/gpu The number of GPUs that's reserved for
	// the container. Values must be a whole integer. memory can be specified in limits
	// , requests , or both. If memory is specified in both places, then the value
	// that's specified in limits must be equal to the value that's specified in
	// requests .
	Limits map[string]string

	// The type and quantity of the resources to request for the container. The values
	// vary based on the name that's specified. Resources can be requested by using
	// either the limits or the requests objects. memory The memory hard limit (in
	// MiB) for the container, using whole integers, with a "Mi" suffix. If your
	// container attempts to exceed the memory specified, the container is terminated.
	// You must specify at least 4 MiB of memory for a job. memory can be specified in
	// limits , requests , or both. If memory is specified in both, then the value
	// that's specified in limits must be equal to the value that's specified in
	// requests . If you're trying to maximize your resource utilization by providing
	// your jobs as much memory as possible for a particular instance type, see Memory
	// management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html)
	// in the Batch User Guide. cpu The number of CPUs that are reserved for the
	// container. Values must be an even multiple of 0.25 . cpu can be specified in
	// limits , requests , or both. If cpu is specified in both, then the value that's
	// specified in limits must be at least as large as the value that's specified in
	// requests . nvidia.com/gpu The number of GPUs that are reserved for the
	// container. Values must be a whole integer. nvidia.com/gpu can be specified in
	// limits , requests , or both. If nvidia.com/gpu is specified in both, then the
	// value that's specified in limits must be equal to the value that's specified in
	// requests .
	Requests map[string]string

	noSmithyDocumentSerde
}

// The security context for a job. For more information, see Configure a security
// context for a pod or container (https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
// in the Kubernetes documentation.
type EksContainerSecurityContext struct {

	// When this parameter is true , the container is given elevated permissions on the
	// host container instance. The level of permissions are similar to the root user
	// permissions. The default value is false . This parameter maps to privileged
	// policy in the Privileged pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#privileged)
	// in the Kubernetes documentation.
	Privileged *bool

	// When this parameter is true , the container is given read-only access to its
	// root file system. The default value is false . This parameter maps to
	// ReadOnlyRootFilesystem policy in the Volumes and file systems pod security
	// policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#volumes-and-file-systems)
	// in the Kubernetes documentation.
	ReadOnlyRootFilesystem *bool

	// When this parameter is specified, the container is run as the specified group
	// ID ( gid ). If this parameter isn't specified, the default is the group that's
	// specified in the image metadata. This parameter maps to RunAsGroup and MustRunAs
	// policy in the Users and groups pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups)
	// in the Kubernetes documentation.
	RunAsGroup *int64

	// When this parameter is specified, the container is run as a user with a uid
	// other than 0. If this parameter isn't specified, so such rule is enforced. This
	// parameter maps to RunAsUser and MustRunAsNonRoot policy in the Users and groups
	// pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups)
	// in the Kubernetes documentation.
	RunAsNonRoot *bool

	// When this parameter is specified, the container is run as the specified user ID
	// ( uid ). If this parameter isn't specified, the default is the user that's
	// specified in the image metadata. This parameter maps to RunAsUser and MustRanAs
	// policy in the Users and groups pod security policies (https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups)
	// in the Kubernetes documentation.
	RunAsUser *int64

	noSmithyDocumentSerde
}

// The volume mounts for a container for an Amazon EKS job. For more information
// about volumes and volume mounts in Kubernetes, see Volumes (https://kubernetes.io/docs/concepts/storage/volumes/)
// in the Kubernetes documentation.
type EksContainerVolumeMount struct {

	// The path on the container where the volume is mounted.
	MountPath *string

	// The name the volume mount. This must match the name of one of the volumes in
	// the pod.
	Name *string

	// If this value is true , the container has read-only access to the volume.
	// Otherwise, the container can write to the volume. The default value is false .
	ReadOnly *bool

	noSmithyDocumentSerde
}

// Specifies the configuration of a Kubernetes emptyDir volume. An emptyDir volume
// is first created when a pod is assigned to a node. It exists as long as that pod
// is running on that node. The emptyDir volume is initially empty. All containers
// in the pod can read and write the files in the emptyDir volume. However, the
// emptyDir volume can be mounted at the same or different paths in each container.
// When a pod is removed from a node for any reason, the data in the emptyDir is
// deleted permanently. For more information, see emptyDir (https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)
// in the Kubernetes documentation.
type EksEmptyDir struct {

	// The medium to store the volume. The default value is an empty string, which
	// uses the storage of the node. "" (Default) Use the disk storage of the node.
	// "Memory" Use the tmpfs volume that's backed by the RAM of the node. Contents of
	// the volume are lost when the node reboots, and any storage on the volume counts
	// against the container's memory limit.
	Medium *string

	// The maximum size of the volume. By default, there's no maximum size defined.
	SizeLimit *string

	noSmithyDocumentSerde
}

// Specifies the configuration of a Kubernetes hostPath volume. A hostPath volume
// mounts an existing file or directory from the host node's filesystem into your
// pod. For more information, see hostPath (https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)
// in the Kubernetes documentation.
type EksHostPath struct {

	// The path of the file or directory on the host to mount into containers on the
	// pod.
	Path *string

	noSmithyDocumentSerde
}

// Describes and uniquely identifies Kubernetes resources. For example, the
// compute environment that a pod runs in or the jobID for a job running in the
// pod. For more information, see Understanding Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/)
// in the Kubernetes documentation.
type EksMetadata struct {

	// Key-value pairs used to identify, sort, and organize cube resources. Can
	// contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and
	// underscores (_). Labels can be added or modified at any time. Each resource can
	// have multiple labels, but each key must be unique for a given object.
	Labels map[string]string

	noSmithyDocumentSerde
}

// The properties for the pod.
type EksPodProperties struct {

	// The properties of the container that's used on the Amazon EKS pod.
	Containers []EksContainer

	// The DNS policy for the pod. The default value is ClusterFirst . If the
	// hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet .
	// ClusterFirst indicates that any DNS query that does not match the configured
	// cluster domain suffix is forwarded to the upstream nameserver inherited from the
	// node. For more information, see Pod's DNS policy (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy)
	// in the Kubernetes documentation. Valid values: Default | ClusterFirst |
	// ClusterFirstWithHostNet
	DnsPolicy *string

	// Indicates if the pod uses the hosts' network IP address. The default value is
	// true . Setting this to false enables the Kubernetes pod networking model. Most
	// Batch workloads are egress-only and don't require the overhead of IP allocation
	// for each pod for incoming connections. For more information, see Host namespaces (https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces)
	// and Pod networking (https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking)
	// in the Kubernetes documentation.
	HostNetwork *bool

	// Metadata about the Kubernetes pod. For more information, see Understanding
	// Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/)
	// in the Kubernetes documentation.
	Metadata *EksMetadata

	// The name of the service account that's used to run the pod. For more
	// information, see Kubernetes service accounts (https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html)
	// and Configure a Kubernetes service account to assume an IAM role (https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html)
	// in the Amazon EKS User Guide and Configure service accounts for pods (https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
	// in the Kubernetes documentation.
	ServiceAccountName *string

	// Specifies the volumes for a job definition that uses Amazon EKS resources.
	Volumes []EksVolume

	noSmithyDocumentSerde
}

// The details for the pod.
type EksPodPropertiesDetail struct {

	// The properties of the container that's used on the Amazon EKS pod.
	Containers []EksContainerDetail

	// The DNS policy for the pod. The default value is ClusterFirst . If the
	// hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet .
	// ClusterFirst indicates that any DNS query that does not match the configured
	// cluster domain suffix is forwarded to the upstream nameserver inherited from the
	// node. If no value was specified for dnsPolicy in the RegisterJobDefinition (https://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html)
	// API operation, then no value will be returned for dnsPolicy by either of
	// DescribeJobDefinitions (https://docs.aws.amazon.com/batch/latest/APIReference/API_DescribeJobDefinitions.html)
	// or DescribeJobs (https://docs.aws.amazon.com/batch/latest/APIReference/API_DescribeJobs.html)
	// API operations. The pod spec setting will contain either ClusterFirst or
	// ClusterFirstWithHostNet , depending on the value of the hostNetwork parameter.
	// For more information, see Pod's DNS policy (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy)
	// in the Kubernetes documentation. Valid values: Default | ClusterFirst |
	// ClusterFirstWithHostNet
	DnsPolicy *string

	// Indicates if the pod uses the hosts' network IP address. The default value is
	// true . Setting this to false enables the Kubernetes pod networking model. Most
	// Batch workloads are egress-only and don't require the overhead of IP allocation
	// for each pod for incoming connections. For more information, see Host namespaces (https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces)
	// and Pod networking (https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking)
	// in the Kubernetes documentation.
	HostNetwork *bool

	// Describes and uniquely identifies Kubernetes resources. For example, the
	// compute environment that a pod runs in or the jobID for a job running in the
	// pod. For more information, see Understanding Kubernetes Objects (https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/)
	// in the Kubernetes documentation.
	Metadata *EksMetadata

	// The name of the node for this job.
	NodeName *string

	// The name of the pod for this job.
	PodName *string

	// The name of the service account that's used to run the pod. For more
	// information, see Kubernetes service accounts (https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html)
	// and Configure a Kubernetes service account to assume an IAM role (https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html)
	// in the Amazon EKS User Guide and Configure service accounts for pods (https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
	// in the Kubernetes documentation.
	ServiceAccountName *string

	// Specifies the volumes for a job definition using Amazon EKS resources.
	Volumes []EksVolume

	noSmithyDocumentSerde
}

// An object that contains overrides for the Kubernetes pod properties of a job.
type EksPodPropertiesOverride struct {

	// The overrides for the container that's used on the Amazon EKS pod.
	Containers []EksContainerOverride

	// Metadata about the overrides for the container that's used on the Amazon EKS
	// pod.
	Metadata *EksMetadata

	noSmithyDocumentSerde
}

// An object that contains the properties for the Kubernetes resources of a job.
type EksProperties struct {

	// The properties for the Kubernetes pod resources of a job.
	PodProperties *EksPodProperties

	noSmithyDocumentSerde
}

// An object that contains the details for the Kubernetes resources of a job.
type EksPropertiesDetail struct {

	// The properties for the Kubernetes pod resources of a job.
	PodProperties *EksPodPropertiesDetail

	noSmithyDocumentSerde
}

// An object that contains overrides for the Kubernetes resources of a job.
type EksPropertiesOverride struct {

	// The overrides for the Kubernetes pod resources of a job.
	PodProperties *EksPodPropertiesOverride

	noSmithyDocumentSerde
}

// Specifies the configuration of a Kubernetes secret volume. For more
// information, see secret (https://kubernetes.io/docs/concepts/storage/volumes/#secret)
// in the Kubernetes documentation.
type EksSecret struct {

	// The name of the secret. The name must be allowed as a DNS subdomain name. For
	// more information, see DNS subdomain names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names)
	// in the Kubernetes documentation.
	//
	// This member is required.
	SecretName *string

	// Specifies whether the secret or the secret's keys must be defined.
	Optional *bool

	noSmithyDocumentSerde
}

// Specifies an Amazon EKS volume for a job definition.
type EksVolume struct {

	// The name of the volume. The name must be allowed as a DNS subdomain name. For
	// more information, see DNS subdomain names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names)
	// in the Kubernetes documentation.
	//
	// This member is required.
	Name *string

	// Specifies the configuration of a Kubernetes emptyDir volume. For more
	// information, see emptyDir (https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)
	// in the Kubernetes documentation.
	EmptyDir *EksEmptyDir

	// Specifies the configuration of a Kubernetes hostPath volume. For more
	// information, see hostPath (https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)
	// in the Kubernetes documentation.
	HostPath *EksHostPath

	// Specifies the configuration of a Kubernetes secret volume. For more
	// information, see secret (https://kubernetes.io/docs/concepts/storage/volumes/#secret)
	// in the Kubernetes documentation.
	Secret *EksSecret

	noSmithyDocumentSerde
}

// The amount of ephemeral storage to allocate for the task. This parameter is
// used to expand the total amount of ephemeral storage available, beyond the
// default amount, for tasks hosted on Fargate.
type EphemeralStorage struct {

	// The total amount, in GiB, of ephemeral storage to set for the task. The minimum
	// supported value is 21 GiB and the maximum supported value is 200 GiB.
	//
	// This member is required.
	SizeInGiB *int32

	noSmithyDocumentSerde
}

// Specifies an array of up to 5 conditions to be met, and an action to take ( RETRY
// or EXIT ) if all conditions are met. If none of the EvaluateOnExit conditions
// in a RetryStrategy match, then the job is retried.
type EvaluateOnExit struct {

	// Specifies the action to take if all of the specified conditions ( onStatusReason
	// , onReason , and onExitCode ) are met. The values aren't case sensitive.
	//
	// This member is required.
	Action RetryAction

	// Contains a glob pattern to match against the decimal representation of the
	// ExitCode returned for a job. The pattern can be up to 512 characters long. It
	// can contain only numbers, and can end with an asterisk (*) so that only the
	// start of the string needs to be an exact match. The string can contain up to 512
	// characters.
	OnExitCode *string

	// Contains a glob pattern to match against the Reason returned for a job. The
	// pattern can contain up to 512 characters. It can contain letters, numbers,
	// periods (.), colons (:), and white space (including spaces and tabs). It can
	// optionally end with an asterisk (*) so that only the start of the string needs
	// to be an exact match.
	OnReason *string

	// Contains a glob pattern to match against the StatusReason returned for a job.
	// The pattern can contain up to 512 characters. It can contain letters, numbers,
	// periods (.), colons (:), and white spaces (including spaces or tabs). It can
	// optionally end with an asterisk (*) so that only the start of the string needs
	// to be an exact match.
	OnStatusReason *string

	noSmithyDocumentSerde
}

// The fair share policy for a scheduling policy.
type FairsharePolicy struct {

	// A value used to reserve some of the available maximum vCPU for fair share
	// identifiers that aren't already used. The reserved ratio is
	// (computeReservation/100)^ActiveFairShares where  ActiveFairShares  is the
	// number of active fair share identifiers. For example, a computeReservation
	// value of 50 indicates that Batchreserves 50% of the maximum available vCPU if
	// there's only one fair share identifier. It reserves 25% if there are two fair
	// share identifiers. It reserves 12.5% if there are three fair share identifiers.
	// A computeReservation value of 25 indicates that Batch should reserve 25% of the
	// maximum available vCPU if there's only one fair share identifier, 6.25% if there
	// are two fair share identifiers, and 1.56% if there are three fair share
	// identifiers. The minimum value is 0 and the maximum value is 99.
	ComputeReservation *int32

	// The amount of time (in seconds) to use to calculate a fair share percentage for
	// each fair share identifier in use. A value of zero (0) indicates that only
	// current usage is measured. The decay allows for more recently run jobs to have
	// more weight than jobs that ran earlier. The maximum supported value is 604800 (1
	// week).
	ShareDecaySeconds *int32

	// An array of SharedIdentifier objects that contain the weights for the fair
	// share identifiers for the fair share policy. Fair share identifiers that aren't
	// included have a default weight of 1.0 .
	ShareDistribution []ShareAttributes

	noSmithyDocumentSerde
}

// The platform configuration for jobs that are running on Fargate resources. Jobs
// that run on EC2 resources must not specify this parameter.
type FargatePlatformConfiguration struct {

	// The Fargate platform version where the jobs are running. A platform version is
	// specified only for jobs that are running on Fargate resources. If one isn't
	// specified, the LATEST platform version is used by default. This uses a recent,
	// approved version of the Fargate platform for compute resources. For more
	// information, see Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html)
	// in the Amazon Elastic Container Service Developer Guide.
	PlatformVersion *string

	noSmithyDocumentSerde
}

// Determine whether your data volume persists on the host container instance and
// where it's stored. If this parameter is empty, then the Docker daemon assigns a
// host path for your data volume. However, the data isn't guaranteed to persist
// after the containers that are associated with it stop running.
type Host struct {

	// The path on the host container instance that's presented to the container. If
	// this parameter is empty, then the Docker daemon has assigned a host path for
	// you. If this parameter contains a file location, then the data volume persists
	// at the specified location on the host container instance until you delete it
	// manually. If the source path location doesn't exist on the host container
	// instance, the Docker daemon creates it. If the location does exist, the contents
	// of the source path folder are exported. This parameter isn't applicable to jobs
	// that run on Fargate resources. Don't provide this for these jobs.
	SourcePath *string

	noSmithyDocumentSerde
}

// An object that represents an Batch job definition.
type JobDefinition struct {

	// The Amazon Resource Name (ARN) for the job definition.
	//
	// This member is required.
	JobDefinitionArn *string

	// The name of the job definition.
	//
	// This member is required.
	JobDefinitionName *string

	// The revision of the job definition.
	//
	// This member is required.
	Revision *int32

	// The type of job definition. It's either container or multinode . If the job is
	// run on Fargate resources, then multinode isn't supported. For more information
	// about multi-node parallel jobs, see Creating a multi-node parallel job
	// definition (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html)
	// in the Batch User Guide.
	//
	// This member is required.
	Type *string

	// The orchestration type of the compute environment. The valid values are ECS
	// (default) or EKS .
	ContainerOrchestrationType OrchestrationType

	// An object with various properties specific to Amazon ECS based jobs. Valid
	// values are containerProperties , eksProperties , and nodeProperties . Only one
	// can be specified.
	ContainerProperties *ContainerProperties

	// An object with various properties that are specific to Amazon EKS based jobs.
	// Valid values are containerProperties , eksProperties , and nodeProperties . Only
	// one can be specified.
	EksProperties *EksProperties

	// An object with various properties that are specific to multi-node parallel
	// jobs. Valid values are containerProperties , eksProperties , and nodeProperties
	// . Only one can be specified. If the job runs on Fargate resources, don't specify
	// nodeProperties . Use containerProperties instead.
	NodeProperties *NodeProperties

	// Default parameters or parameter substitution placeholders that are set in the
	// job definition. Parameters are specified as a key-value pair mapping. Parameters
	// in a SubmitJob request override any corresponding parameter defaults from the
	// job definition. For more information about specifying parameters, see Job
	// definition parameters (https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html)
	// in the Batch User Guide.
	Parameters map[string]string

	// The platform capabilities required by the job definition. If no value is
	// specified, it defaults to EC2 . Jobs run on Fargate resources specify FARGATE .
	PlatformCapabilities []PlatformCapability

	// Specifies whether to propagate the tags from the job or job definition to the
	// corresponding Amazon ECS task. If no value is specified, the tags aren't
	// propagated. Tags can only be propagated to the tasks when the tasks are created.
	// For tags with the same name, job tags are given priority over job definitions
	// tags. If the total number of combined tags from the job and job definition is
	// over 50, the job is moved to the FAILED state.
	PropagateTags *bool

	// The retry strategy to use for failed jobs that are submitted with this job
	// definition.
	RetryStrategy *RetryStrategy

	// The scheduling priority of the job definition. This only affects jobs in job
	// queues with a fair share policy. Jobs with a higher scheduling priority are
	// scheduled before jobs with a lower scheduling priority.
	SchedulingPriority *int32

	// The status of the job definition.
	Status *string

	// The tags that are applied to the job definition.
	Tags map[string]string

	// The timeout time for jobs that are submitted with this job definition. After
	// the amount of time you specify passes, Batch terminates your jobs if they aren't
	// finished.
	Timeout *JobTimeout

	noSmithyDocumentSerde
}

// An object that represents an Batch job dependency.
type JobDependency struct {

	// The job ID of the Batch job that's associated with this dependency.
	JobId *string

	// The type of the job dependency.
	Type ArrayJobDependency

	noSmithyDocumentSerde
}

// An object that represents an Batch job.
type JobDetail struct {

	// The Amazon Resource Name (ARN) of the job definition that this job uses.
	//
	// This member is required.
	JobDefinition *string

	// The job ID.
	//
	// This member is required.
	JobId *string

	// The job name.
	//
	// This member is required.
	JobName *string

	// The Amazon Resource Name (ARN) of the job queue that the job is associated with.
	//
	// This member is required.
	JobQueue *string

	// The Unix timestamp (in milliseconds) for when the job was started. More
	// specifically, it's when the job transitioned from the STARTING state to the
	// RUNNING state. This parameter isn't provided for child jobs of array jobs or
	// multi-node parallel jobs.
	//
	// This member is required.
	StartedAt *int64

	// The current status for the job. If your jobs don't progress to STARTING , see
	// Jobs stuck in RUNNABLE status (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable)
	// in the troubleshooting section of the Batch User Guide.
	//
	// This member is required.
	Status JobStatus

	// The array properties of the job, if it's an array job.
	ArrayProperties *ArrayPropertiesDetail

	// A list of job attempts that are associated with this job.
	Attempts []AttemptDetail

	// An object that represents the details for the container that's associated with
	// the job.
	Container *ContainerDetail

	// The Unix timestamp (in milliseconds) for when the job was created. For
	// non-array jobs and parent array jobs, this is when the job entered the SUBMITTED
	// state. This is specifically at the time SubmitJob was called. For array child
	// jobs, this is when the child job was spawned by its parent and entered the
	// PENDING state.
	CreatedAt *int64

	// A list of job IDs that this job depends on.
	DependsOn []JobDependency

	// A list of job attempts that are associated with this job.
	EksAttempts []EksAttemptDetail

	// An object with various properties that are specific to Amazon EKS based jobs.
	// Only one of container , eksProperties , or nodeDetails is specified.
	EksProperties *EksPropertiesDetail

	// Indicates whether the job is canceled.
	IsCancelled *bool

	// Indicates whether the job is terminated.
	IsTerminated *bool

	// The Amazon Resource Name (ARN) of the job.
	JobArn *string

	// An object that represents the details of a node that's associated with a
	// multi-node parallel job.
	NodeDetails *NodeDetails

	// An object that represents the node properties of a multi-node parallel job.
	// This isn't applicable to jobs that are running on Fargate resources.
	NodeProperties *NodeProperties

	// Additional parameters that are passed to the job that replace parameter
	// substitution placeholders or override any corresponding parameter defaults from
	// the job definition.
	Parameters map[string]string

	// The platform capabilities required by the job definition. If no value is
	// specified, it defaults to EC2 . Jobs run on Fargate resources specify FARGATE .
	PlatformCapabilities []PlatformCapability

	// Specifies whether to propagate the tags from the job or job definition to the
	// corresponding Amazon ECS task. If no value is specified, the tags aren't
	// propagated. Tags can only be propagated to the tasks when the tasks are created.
	// For tags with the same name, job tags are given priority over job definitions
	// tags. If the total number of combined tags from the job and job definition is
	// over 50, the job is moved to the FAILED state.
	PropagateTags *bool

	// The retry strategy to use for this job if an attempt fails.
	RetryStrategy *RetryStrategy

	// The scheduling policy of the job definition. This only affects jobs in job
	// queues with a fair share policy. Jobs with a higher scheduling priority are
	// scheduled before jobs with a lower scheduling priority.
	SchedulingPriority *int32

	// The share identifier for the job.
	ShareIdentifier *string

	// A short, human-readable string to provide more details for the current status
	// of the job.
	StatusReason *string

	// The Unix timestamp (in milliseconds) for when the job was stopped. More
	// specifically, it's when the job transitioned from the RUNNING state to a
	// terminal state, such as SUCCEEDED or FAILED .
	StoppedAt *int64

	// The tags that are applied to the job.
	Tags map[string]string

	// The timeout configuration for the job.
	Timeout *JobTimeout

	noSmithyDocumentSerde
}

// An object that represents the details for an Batch job queue.
type JobQueueDetail struct {

	// The compute environments that are attached to the job queue and the order that
	// job placement is preferred. Compute environments are selected for job placement
	// in ascending order.
	//
	// This member is required.
	ComputeEnvironmentOrder []ComputeEnvironmentOrder

	// The Amazon Resource Name (ARN) of the job queue.
	//
	// This member is required.
	JobQueueArn *string

	// The job queue name.
	//
	// This member is required.
	JobQueueName *string

	// The priority of the job queue. Job queues with a higher priority (or a higher
	// integer value for the priority parameter) are evaluated first when associated
	// with the same compute environment. Priority is determined in descending order.
	// For example, a job queue with a priority value of 10 is given scheduling
	// preference over a job queue with a priority value of 1 . All of the compute
	// environments must be either EC2 ( EC2 or SPOT ) or Fargate ( FARGATE or
	// FARGATE_SPOT ). EC2 and Fargate compute environments can't be mixed.
	//
	// This member is required.
	Priority *int32

	// Describes the ability of the queue to accept new jobs. If the job queue state
	// is ENABLED , it can accept jobs. If the job queue state is DISABLED , new jobs
	// can't be added to the queue, but jobs already in the queue can finish.
	//
	// This member is required.
	State JQState

	// The Amazon Resource Name (ARN) of the scheduling policy. The format is
	// aws:Partition:batch:Region:Account:scheduling-policy/Name . For example,
	// aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy .
	SchedulingPolicyArn *string

	// The status of the job queue (for example, CREATING or VALID ).
	Status JQStatus

	// A short, human-readable string to provide additional details for the current
	// status of the job queue.
	StatusReason *string

	// The tags that are applied to the job queue. For more information, see Tagging
	// your Batch resources (https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html)
	// in Batch User Guide.
	Tags map[string]string

	noSmithyDocumentSerde
}

// An object that represents summary details of a job.
type JobSummary struct {

	// The job ID.
	//
	// This member is required.
	JobId *string

	// The job name.
	//
	// This member is required.
	JobName *string

	// The array properties of the job, if it's an array job.
	ArrayProperties *ArrayPropertiesSummary

	// An object that represents the details of the container that's associated with
	// the job.
	Container *ContainerSummary

	// The Unix timestamp (in milliseconds) for when the job was created. For
	// non-array jobs and parent array jobs, this is when the job entered the SUBMITTED
	// state (at the time SubmitJob was called). For array child jobs, this is when
	// the child job was spawned by its parent and entered the PENDING state.
	CreatedAt *int64

	// The Amazon Resource Name (ARN) of the job.
	JobArn *string

	// The Amazon Resource Name (ARN) of the job definition.
	JobDefinition *string

	// The node properties for a single node in a job summary list. This isn't
	// applicable to jobs that are running on Fargate resources.
	NodeProperties *NodePropertiesSummary

	// The Unix timestamp for when the job was started. More specifically, it's when
	// the job transitioned from the STARTING state to the RUNNING state.
	StartedAt *int64

	// The current status for the job.
	Status JobStatus

	// A short, human-readable string to provide more details for the current status
	// of the job.
	StatusReason *string

	// The Unix timestamp for when the job was stopped. More specifically, it's when
	// the job transitioned from the RUNNING state to a terminal state, such as
	// SUCCEEDED or FAILED .
	StoppedAt *int64

	noSmithyDocumentSerde
}

// An object that represents a job timeout configuration.
type JobTimeout struct {

	// The job timeout time (in seconds) that's measured from the job attempt's
	// startedAt timestamp. After this time passes, Batch terminates your jobs if they
	// aren't finished. The minimum value for the timeout is 60 seconds. For array
	// jobs, the timeout applies to the child jobs, not to the parent array job. For
	// multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the
	// individual nodes.
	AttemptDurationSeconds *int32

	noSmithyDocumentSerde
}

// A key-value pair object.
type KeyValuePair struct {

	// The name of the key-value pair. For environment variables, this is the name of
	// the environment variable.
	Name *string

	// The value of the key-value pair. For environment variables, this is the value
	// of the environment variable.
	Value *string

	noSmithyDocumentSerde
}

// A filter name and value pair that's used to return a more specific list of
// results from a ListJobs API operation.
type KeyValuesPair struct {

	// The name of the filter. Filter names are case sensitive.
	Name *string

	// The filter values.
	Values []string

	noSmithyDocumentSerde
}

// An object that represents a launch template that's associated with a compute
// resource. You must specify either the launch template ID or launch template name
// in the request, but not both. If security groups are specified using both the
// securityGroupIds parameter of CreateComputeEnvironment and the launch template,
// the values in the securityGroupIds parameter of CreateComputeEnvironment will
// be used. This object isn't applicable to jobs that are running on Fargate
// resources.
type LaunchTemplateSpecification struct {

	// The ID of the launch template.
	LaunchTemplateId *string

	// The name of the launch template.
	LaunchTemplateName *string

	// The version number of the launch template, $Latest , or $Default . If the value
	// is $Latest , the latest version of the launch template is used. If the value is
	// $Default , the default version of the launch template is used. If the AMI ID
	// that's used in a compute environment is from the launch template, the AMI isn't
	// changed when the compute environment is updated. It's only changed if the
	// updateToLatestImageVersion parameter for the compute environment is set to true
	// . During an infrastructure update, if either $Latest or $Default is specified,
	// Batch re-evaluates the launch template version, and it might use a different
	// version of the launch template. This is the case even if the launch template
	// isn't specified in the update. When updating a compute environment, changing the
	// launch template requires an infrastructure update of the compute environment.
	// For more information, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
	// in the Batch User Guide. Default: $Default .
	Version *string

	noSmithyDocumentSerde
}

// Linux-specific modifications that are applied to the container, such as details
// for device mappings.
type LinuxParameters struct {

	// Any of the host devices to expose to the container. This parameter maps to
	// Devices in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --device option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter isn't applicable to jobs that are running on Fargate resources.
	// Don't provide it for these jobs.
	Devices []Device

	// If true, run an init process inside the container that forwards signals and
	// reaps processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter requires version 1.25 of the Docker Remote API or greater on
	// your container instance. To check the Docker Remote API version on your
	// container instance, log in to your container instance and run the following
	// command: sudo docker version | grep "Server API version"
	InitProcessEnabled *bool

	// The total amount of swap memory (in MiB) a container can use. This parameter is
	// translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/)
	// where the value is the sum of the container memory plus the maxSwap value. For
	// more information, see --memory-swap details (https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details)
	// in the Docker documentation. If a maxSwap value of 0 is specified, the
	// container doesn't use swap. Accepted values are 0 or any positive integer. If
	// the maxSwap parameter is omitted, the container doesn't use the swap
	// configuration for the container instance that it's running on. A maxSwap value
	// must be set for the swappiness parameter to be used. This parameter isn't
	// applicable to jobs that are running on Fargate resources. Don't provide it for
	// these jobs.
	MaxSwap *int32

	// The value for the size (in MiB) of the /dev/shm volume. This parameter maps to
	// the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter isn't applicable to jobs that are running on Fargate resources.
	// Don't provide it for these jobs.
	SharedMemorySize *int32

	// You can use this parameter to tune a container's memory swappiness behavior. A
	// swappiness value of 0 causes swapping to not occur unless absolutely necessary.
	// A swappiness value of 100 causes pages to be swapped aggressively. Valid values
	// are whole numbers between 0 and 100 . If the swappiness parameter isn't
	// specified, a default value of 60 is used. If a value isn't specified for maxSwap
	// , then this parameter is ignored. If maxSwap is set to 0, the container doesn't
	// use swap. This parameter maps to the --memory-swappiness option to docker run (https://docs.docker.com/engine/reference/run/)
	// . Consider the following when you use a per-container swap configuration.
	//   - Swap space must be enabled and allocated on the container instance for the
	//   containers to use. By default, the Amazon ECS optimized AMIs don't have swap
	//   enabled. You must enable swap on the instance to use this feature. For more
	//   information, see Instance store swap volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html)
	//   in the Amazon EC2 User Guide for Linux Instances or How do I allocate memory
	//   to work as swap space in an Amazon EC2 instance by using a swap file? (http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/)
	//   - The swap space parameters are only supported for job definitions using EC2
	//   resources.
	//   - If the maxSwap and swappiness parameters are omitted from a job definition,
	//   each container has a default swappiness value of 60. Moreover, the total swap
	//   usage is limited to two times the memory reservation of the container.
	// This parameter isn't applicable to jobs that are running on Fargate resources.
	// Don't provide it for these jobs.
	Swappiness *int32

	// The container path, mount options, and size (in MiB) of the tmpfs mount. This
	// parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/)
	// . This parameter isn't applicable to jobs that are running on Fargate resources.
	// Don't provide this parameter for this resource type.
	Tmpfs []Tmpfs

	noSmithyDocumentSerde
}

// Log configuration options to send to a custom log driver for the container.
type LogConfiguration struct {

	// The log driver to use for the container. The valid values that are listed for
	// this parameter are log drivers that the Amazon ECS container agent can
	// communicate with by default. The supported log drivers are awslogs , fluentd ,
	// gelf , json-file , journald , logentries , syslog , and splunk . Jobs that are
	// running on Fargate resources are restricted to the awslogs and splunk log
	// drivers. awslogs Specifies the Amazon CloudWatch Logs logging driver. For more
	// information, see Using the awslogs log driver (https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html)
	// in the Batch User Guide and Amazon CloudWatch Logs logging driver (https://docs.docker.com/config/containers/logging/awslogs/)
	// in the Docker documentation. fluentd Specifies the Fluentd logging driver. For
	// more information including usage and options, see Fluentd logging driver (https://docs.docker.com/config/containers/logging/fluentd/)
	// in the Docker documentation. gelf Specifies the Graylog Extended Format (GELF)
	// logging driver. For more information including usage and options, see Graylog
	// Extended Format logging driver (https://docs.docker.com/config/containers/logging/gelf/)
	// in the Docker documentation. journald Specifies the journald logging driver. For
	// more information including usage and options, see Journald logging driver (https://docs.docker.com/config/containers/logging/journald/)
	// in the Docker documentation. json-file Specifies the JSON file logging driver.
	// For more information including usage and options, see JSON File logging driver (https://docs.docker.com/config/containers/logging/json-file/)
	// in the Docker documentation. splunk Specifies the Splunk logging driver. For
	// more information including usage and options, see Splunk logging driver (https://docs.docker.com/config/containers/logging/splunk/)
	// in the Docker documentation. syslog Specifies the syslog logging driver. For
	// more information including usage and options, see Syslog logging driver (https://docs.docker.com/config/containers/logging/syslog/)
	// in the Docker documentation. If you have a custom driver that's not listed
	// earlier that you want to work with the Amazon ECS container agent, you can fork
	// the Amazon ECS container agent project that's available on GitHub (https://github.com/aws/amazon-ecs-agent)
	// and customize it to work with that driver. We encourage you to submit pull
	// requests for changes that you want to have included. However, Amazon Web
	// Services doesn't currently support running modified copies of this software.
	// This parameter requires version 1.18 of the Docker Remote API or greater on your
	// container instance. To check the Docker Remote API version on your container
	// instance, log in to your container instance and run the following command: sudo
	// docker version | grep "Server API version"
	//
	// This member is required.
	LogDriver LogDriver

	// The configuration options to send to the log driver. This parameter requires
	// version 1.19 of the Docker Remote API or greater on your container instance. To
	// check the Docker Remote API version on your container instance, log in to your
	// container instance and run the following command: sudo docker version | grep
	// "Server API version"
	Options map[string]string

	// The secrets to pass to the log configuration. For more information, see
	// Specifying sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
	// in the Batch User Guide.
	SecretOptions []Secret

	noSmithyDocumentSerde
}

// Details for a Docker volume mount point that's used in a job's container
// properties. This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container)
// section of the Docker Remote API and the --volume option to docker run.
type MountPoint struct {

	// The path on the container where the host volume is mounted.
	ContainerPath *string

	// If this value is true , the container has read-only access to the volume.
	// Otherwise, the container can write to the volume. The default value is false .
	ReadOnly *bool

	// The name of the volume to mount.
	SourceVolume *string

	noSmithyDocumentSerde
}

// The network configuration for jobs that are running on Fargate resources. Jobs
// that are running on EC2 resources must not specify this parameter.
type NetworkConfiguration struct {

	// Indicates whether the job has a public IP address. For a job that's running on
	// Fargate resources in a private subnet to send outbound traffic to the internet
	// (for example, to pull container images), the private subnet requires a NAT
	// gateway be attached to route requests to the internet. For more information, see
	// Amazon ECS task networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html)
	// in the Amazon Elastic Container Service Developer Guide. The default value is "
	// DISABLED ".
	AssignPublicIp AssignPublicIp

	noSmithyDocumentSerde
}

// An object that represents the elastic network interface for a multi-node
// parallel job node.
type NetworkInterface struct {

	// The attachment ID for the network interface.
	AttachmentId *string

	// The private IPv6 address for the network interface.
	Ipv6Address *string

	// The private IPv4 address for the network interface.
	PrivateIpv4Address *string

	noSmithyDocumentSerde
}

// An object that represents the details of a multi-node parallel job node.
type NodeDetails struct {

	// Specifies whether the current node is the main node for a multi-node parallel
	// job.
	IsMainNode *bool

	// The node index for the node. Node index numbering starts at zero. This index is
	// also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment
	// variable.
	NodeIndex *int32

	noSmithyDocumentSerde
}

// An object that represents any node overrides to a job definition that's used in
// a SubmitJob API operation. This parameter isn't applicable to jobs that are
// running on Fargate resources. Don't provide it for these jobs. Rather, use
// containerOverrides instead.
type NodeOverrides struct {

	// The node property overrides for the job.
	NodePropertyOverrides []NodePropertyOverride

	// The number of nodes to use with a multi-node parallel job. This value overrides
	// the number of nodes that are specified in the job definition. To use this
	// override, you must meet the following conditions:
	//   - There must be at least one node range in your job definition that has an
	//   open upper boundary, such as : or n: .
	//   - The lower boundary of the node range that's specified in the job definition
	//   must be fewer than the number of nodes specified in the override.
	//   - The main node index that's specified in the job definition must be fewer
	//   than the number of nodes specified in the override.
	NumNodes *int32

	noSmithyDocumentSerde
}

// An object that represents the node properties of a multi-node parallel job.
// Node properties can't be specified for Amazon EKS based job definitions.
type NodeProperties struct {

	// Specifies the node index for the main node of a multi-node parallel job. This
	// node index value must be fewer than the number of nodes.
	//
	// This member is required.
	MainNode *int32

	// A list of node ranges and their properties that are associated with a
	// multi-node parallel job.
	//
	// This member is required.
	NodeRangeProperties []NodeRangeProperty

	// The number of nodes that are associated with a multi-node parallel job.
	//
	// This member is required.
	NumNodes *int32

	noSmithyDocumentSerde
}

// An object that represents the properties of a node that's associated with a
// multi-node parallel job.
type NodePropertiesSummary struct {

	// Specifies whether the current node is the main node for a multi-node parallel
	// job.
	IsMainNode *bool

	// The node index for the node. Node index numbering begins at zero. This index is
	// also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment
	// variable.
	NodeIndex *int32

	// The number of nodes that are associated with a multi-node parallel job.
	NumNodes *int32

	noSmithyDocumentSerde
}

// The object that represents any node overrides to a job definition that's used
// in a SubmitJob API operation.
type NodePropertyOverride struct {

	// The range of nodes, using node index values, that's used to override. A range
	// of 0:3 indicates nodes with index values of 0 through 3 . If the starting range
	// value is omitted ( :n ), then 0 is used to start the range. If the ending range
	// value is omitted ( n: ), then the highest possible node index is used to end the
	// range.
	//
	// This member is required.
	TargetNodes *string

	// The overrides that are sent to a node range.
	ContainerOverrides *ContainerOverrides

	noSmithyDocumentSerde
}

// An object that represents the properties of the node range for a multi-node
// parallel job.
type NodeRangeProperty struct {

	// The range of nodes, using node index values. A range of 0:3 indicates nodes
	// with index values of 0 through 3 . If the starting range value is omitted ( :n
	// ), then 0 is used to start the range. If the ending range value is omitted ( n:
	// ), then the highest possible node index is used to end the range. Your
	// accumulative node ranges must account for all nodes ( 0:n ). You can nest node
	// ranges (for example, 0:10 and 4:5 ). In this case, the 4:5 range properties
	// override the 0:10 properties.
	//
	// This member is required.
	TargetNodes *string

	// The container details for the node range.
	Container *ContainerProperties

	noSmithyDocumentSerde
}

// The type and amount of a resource to assign to a container. The supported
// resources include GPU , MEMORY , and VCPU .
type ResourceRequirement struct {

	// The type of resource to assign to a container. The supported resources include
	// GPU , MEMORY , and VCPU .
	//
	// This member is required.
	Type ResourceType

	// The quantity of the specified resource to reserve for the container. The values
	// vary based on the type specified. type="GPU" The number of physical GPUs to
	// reserve for the container. Make sure that the number of GPUs reserved for all
	// containers in a job doesn't exceed the number of available GPUs on the compute
	// resource that the job is launched on. GPUs aren't available for jobs that are
	// running on Fargate resources. type="MEMORY" The memory hard limit (in MiB)
	// present to the container. This parameter is supported for jobs that are running
	// on EC2 resources. If your container attempts to exceed the memory specified, the
	// container is terminated. This parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --memory option to docker run (https://docs.docker.com/engine/reference/run/)
	// . You must specify at least 4 MiB of memory for a job. This is required but can
	// be specified in several places for multi-node parallel (MNP) jobs. It must be
	// specified for each node at least once. This parameter maps to Memory in the
	// Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --memory option to docker run (https://docs.docker.com/engine/reference/run/)
	// . If you're trying to maximize your resource utilization by providing your jobs
	// as much memory as possible for a particular instance type, see Memory management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html)
	// in the Batch User Guide. For jobs that are running on Fargate resources, then
	// value is the hard limit (in MiB), and must match one of the supported values and
	// the VCPU values must be one of the values supported for that memory value.
	// value = 512 VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU =
	// 0.25, 0.5, or 1 value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2
	// value = 5120, 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, or 4 value
	// = 9216, 10240, 11264, 12288, 13312, 14336, or 15360 VCPU = 2 or 4 value = 16384
	// VCPU = 2, 4, or 8 value = 17408, 18432, 19456, 21504, 22528, 23552, 25600,
	// 26624, 27648, 29696, or 30720 VCPU = 4 value = 20480, 24576, or 28672 VCPU = 4
	// or 8 value = 36864, 45056, 53248, or 61440 VCPU = 8 value = 32768, 40960,
	// 49152, or 57344 VCPU = 8 or 16 value = 65536, 73728, 81920, 90112, 98304,
	// 106496, 114688, or 122880 VCPU = 16 type="VCPU" The number of vCPUs reserved
	// for the container. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/)
	// . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must
	// specify at least one vCPU. This is required but can be specified in several
	// places; it must be specified for each node at least once. The default for the
	// Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information
	// about Fargate quotas, see Fargate quotas (https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate)
	// in the Amazon Web Services General Reference. For jobs that are running on
	// Fargate resources, then value must match one of the supported values and the
	// MEMORY values must be one of the values supported for that VCPU value. The
	// supported values are 0.25, 0.5, 1, 2, 4, 8, and 16 value = 0.25 MEMORY = 512,
	// 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072, or 4096 value = 1 MEMORY =
	// 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2 MEMORY = 4096, 5120,
	// 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384 value
	// = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384,
	// 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648,
	// 28672, 29696, or 30720 value = 8 MEMORY = 16384, 20480, 24576, 28672, 32768,
	// 36864, 40960, 45056, 49152, 53248, 57344, or 61440 value = 16 MEMORY = 32768,
	// 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or
	// 122880
	//
	// This member is required.
	Value *string

	noSmithyDocumentSerde
}

// The retry strategy that's associated with a job. For more information, see
// Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html)
// in the Batch User Guide.
type RetryStrategy struct {

	// The number of times to move a job to the RUNNABLE status. You can specify
	// between 1 and 10 attempts. If the value of attempts is greater than one, the
	// job is retried on failure the same number of attempts as the value.
	Attempts *int32

	// Array of up to 5 objects that specify the conditions where jobs are retried or
	// failed. If this parameter is specified, then the attempts parameter must also
	// be specified. If none of the listed conditions match, then the job is retried.
	EvaluateOnExit []EvaluateOnExit

	noSmithyDocumentSerde
}

// An object that represents the compute environment architecture for Batch jobs
// on Fargate.
type RuntimePlatform struct {

	// The vCPU architecture. The default value is X86_64 . Valid values are X86_64
	// and ARM64 . This parameter must be set to X86_64 for Windows containers.
	CpuArchitecture *string

	// The operating system for the compute environment. Valid values are: LINUX
	// (default), WINDOWS_SERVER_2019_CORE , WINDOWS_SERVER_2019_FULL ,
	// WINDOWS_SERVER_2022_CORE , and WINDOWS_SERVER_2022_FULL . The following
	// parameters can’t be set for Windows containers: linuxParameters , privileged ,
	// user , ulimits , readonlyRootFilesystem , and efsVolumeConfiguration . The Batch
	// Scheduler checks before registering a task definition with Fargate. If the job
	// requires a Windows container and the first compute environment is LINUX , the
	// compute environment is skipped and the next is checked until a Windows-based
	// compute environment is found. Fargate Spot is not supported for Windows-based
	// containers on Fargate. A job queue will be blocked if a Fargate Windows job is
	// submitted to a job queue with only Fargate Spot compute environments. However,
	// you can attach both FARGATE and FARGATE_SPOT compute environments to the same
	// job queue.
	OperatingSystemFamily *string

	noSmithyDocumentSerde
}

// An object that represents a scheduling policy.
type SchedulingPolicyDetail struct {

	// The Amazon Resource Name (ARN) of the scheduling policy. An example is
	// arn:aws:batch:us-east-1:123456789012:scheduling-policy/HighPriority .
	//
	// This member is required.
	Arn *string

	// The name of the scheduling policy.
	//
	// This member is required.
	Name *string

	// The fair share policy for the scheduling policy.
	FairsharePolicy *FairsharePolicy

	// The tags that you apply to the scheduling policy to categorize and organize
	// your resources. Each tag consists of a key and an optional value. For more
	// information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
	// in Amazon Web Services General Reference.
	Tags map[string]string

	noSmithyDocumentSerde
}

// An object that contains the details of a scheduling policy that's returned in a
// ListSchedulingPolicy action.
type SchedulingPolicyListingDetail struct {

	// Amazon Resource Name (ARN) of the scheduling policy.
	//
	// This member is required.
	Arn *string

	noSmithyDocumentSerde
}

// An object that represents the secret to expose to your container. Secrets can
// be exposed to a container in the following ways:
//   - To inject sensitive data into your containers as environment variables, use
//     the secrets container definition parameter.
//   - To reference sensitive information in the log configuration of a container,
//     use the secretOptions container definition parameter.
//
// For more information, see Specifying sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
// in the Batch User Guide.
type Secret struct {

	// The name of the secret.
	//
	// This member is required.
	Name *string

	// The secret to expose to the container. The supported values are either the full
	// Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the
	// parameter in the Amazon Web Services Systems Manager Parameter Store. If the
	// Amazon Web Services Systems Manager Parameter Store parameter exists in the same
	// Region as the job you're launching, then you can use either the full Amazon
	// Resource Name (ARN) or name of the parameter. If the parameter exists in a
	// different Region, then the full ARN must be specified.
	//
	// This member is required.
	ValueFrom *string

	noSmithyDocumentSerde
}

// Specifies the weights for the fair share identifiers for the fair share policy.
// Fair share identifiers that aren't included have a default weight of 1.0 .
type ShareAttributes struct {

	// A fair share identifier or fair share identifier prefix. If the string ends
	// with an asterisk (*), this entry specifies the weight factor to use for fair
	// share identifiers that start with that prefix. The list of fair share
	// identifiers in a fair share policy can't overlap. For example, you can't have
	// one that specifies a shareIdentifier of UserA* and another that specifies a
	// shareIdentifier of UserA-1 . There can be no more than 500 fair share
	// identifiers active in a job queue. The string is limited to 255 alphanumeric
	// characters, and can be followed by an asterisk (*).
	//
	// This member is required.
	ShareIdentifier *string

	// The weight factor for the fair share identifier. The default value is 1.0. A
	// lower value has a higher priority for compute resources. For example, jobs that
	// use a share identifier with a weight factor of 0.125 (1/8) get 8 times the
	// compute resources of jobs that use a share identifier with a weight factor of 1.
	// The smallest supported value is 0.0001, and the largest supported value is
	// 999.9999.
	WeightFactor *float32

	noSmithyDocumentSerde
}

// The container path, mount options, and size of the tmpfs mount. This object
// isn't applicable to jobs that are running on Fargate resources.
type Tmpfs struct {

	// The absolute file path in the container where the tmpfs volume is mounted.
	//
	// This member is required.
	ContainerPath *string

	// The size (in MiB) of the tmpfs volume.
	//
	// This member is required.
	Size *int32

	// The list of tmpfs volume mount options. Valid values: " defaults " | " ro " | "
	// rw " | " suid " | " nosuid " | " dev " | " nodev " | " exec " | " noexec " | "
	// sync " | " async " | " dirsync " | " remount " | " mand " | " nomand " | " atime
	// " | " noatime " | " diratime " | " nodiratime " | " bind " | " rbind" |
	// "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared" |
	// "slave" | "rslave" | "relatime " | " norelatime " | " strictatime " | "
	// nostrictatime " | " mode " | " uid " | " gid " | " nr_inodes " | " nr_blocks " |
	// " mpol "
	MountOptions []string

	noSmithyDocumentSerde
}

// The ulimit settings to pass to the container. This object isn't applicable to
// jobs that are running on Fargate resources.
type Ulimit struct {

	// The hard limit for the ulimit type.
	//
	// This member is required.
	HardLimit *int32

	// The type of the ulimit .
	//
	// This member is required.
	Name *string

	// The soft limit for the ulimit type.
	//
	// This member is required.
	SoftLimit *int32

	noSmithyDocumentSerde
}

// Specifies the infrastructure update policy for the compute environment. For
// more information about infrastructure updates, see Updating compute environments (https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html)
// in the Batch User Guide.
type UpdatePolicy struct {

	// Specifies the job timeout (in minutes) when the compute environment
	// infrastructure is updated. The default value is 30.
	JobExecutionTimeoutMinutes *int64

	// Specifies whether jobs are automatically terminated when the computer
	// environment infrastructure is updated. The default value is false .
	TerminateJobsOnUpdate *bool

	noSmithyDocumentSerde
}

// A data volume that's used in a job's container properties.
type Volume struct {

	// This parameter is specified when you're using an Amazon Elastic File System
	// file system for job storage. Jobs that are running on Fargate resources must
	// specify a platformVersion of at least 1.4.0 .
	EfsVolumeConfiguration *EFSVolumeConfiguration

	// The contents of the host parameter determine whether your data volume persists
	// on the host container instance and where it's stored. If the host parameter is
	// empty, then the Docker daemon assigns a host path for your data volume. However,
	// the data isn't guaranteed to persist after the containers that are associated
	// with it stop running. This parameter isn't applicable to jobs that are running
	// on Fargate resources and shouldn't be provided.
	Host *Host

	// The name of the volume. It can be up to 255 characters long. It can contain
	// uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This
	// name is referenced in the sourceVolume parameter of container definition
	// mountPoints .
	Name *string

	noSmithyDocumentSerde
}

type noSmithyDocumentSerde = smithydocument.NoSerde