File: dataflow_v1b3.projects.jobs.html

package info (click to toggle)
python-googleapi 1.5.5-1
  • links: PTS
  • area: main
  • in suites: buster, stretch
  • size: 39,832 kB
  • ctags: 5,921
  • sloc: python: 7,176; makefile: 64; sh: 53; xml: 5
file content (1041 lines) | stat: -rw-r--r-- 116,731 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
<html><body>
<style>

body, h1, h2, h3, div, span, p, pre, a {
  margin: 0;
  padding: 0;
  border: 0;
  font-weight: inherit;
  font-style: inherit;
  font-size: 100%;
  font-family: inherit;
  vertical-align: baseline;
}

body {
  font-size: 13px;
  padding: 1em;
}

h1 {
  font-size: 26px;
  margin-bottom: 1em;
}

h2 {
  font-size: 24px;
  margin-bottom: 1em;
}

h3 {
  font-size: 20px;
  margin-bottom: 1em;
  margin-top: 1em;
}

pre, code {
  line-height: 1.5;
  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}

pre {
  margin-top: 0.5em;
}

h1, h2, h3, p {
  font-family: Arial, sans serif;
}

h1, h2, h3 {
  border-bottom: solid #CCC 1px;
}

.toc_element {
  margin-top: 0.5em;
}

.firstline {
  margin-left: 2 em;
}

.method  {
  margin-top: 1em;
  border: solid 1px #CCC;
  padding: 1em;
  background: #EEE;
}

.details {
  font-weight: bold;
  font-size: 14px;
}

</style>

<h1><a href="dataflow_v1b3.html">Google Dataflow API</a> . <a href="dataflow_v1b3.projects.html">projects</a> . <a href="dataflow_v1b3.projects.jobs.html">jobs</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
  <code><a href="dataflow_v1b3.projects.jobs.debug.html">debug()</a></code>
</p>
<p class="firstline">Returns the debug Resource.</p>

<p class="toc_element">
  <code><a href="dataflow_v1b3.projects.jobs.messages.html">messages()</a></code>
</p>
<p class="firstline">Returns the messages Resource.</p>

<p class="toc_element">
  <code><a href="dataflow_v1b3.projects.jobs.workItems.html">workItems()</a></code>
</p>
<p class="firstline">Returns the workItems Resource.</p>

<p class="toc_element">
  <code><a href="#create">create(projectId, body, x__xgafv=None, replaceJobId=None, view=None)</a></code></p>
<p class="firstline">Creates a dataflow job.</p>
<p class="toc_element">
  <code><a href="#get">get(projectId, jobId, x__xgafv=None, view=None)</a></code></p>
<p class="firstline">Gets the state of the specified dataflow job.</p>
<p class="toc_element">
  <code><a href="#getMetrics">getMetrics(projectId, jobId, startTime=None, x__xgafv=None)</a></code></p>
<p class="firstline">Request the job status.</p>
<p class="toc_element">
  <code><a href="#list">list(projectId, pageSize=None, filter=None, pageToken=None, x__xgafv=None, view=None)</a></code></p>
<p class="firstline">List the jobs of a project</p>
<p class="toc_element">
  <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
  <code><a href="#update">update(projectId, jobId, body, x__xgafv=None)</a></code></p>
<p class="firstline">Updates the state of an existing dataflow job.</p>
<h3>Method Details</h3>
<div class="method">
    <code class="details" id="create">create(projectId, body, x__xgafv=None, replaceJobId=None, view=None)</code>
  <pre>Creates a dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  body: object, The request body. (required)
    The object takes the form of:

{ # Defines a job to be run by the Dataflow service.
    "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
    "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
    "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
    "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
    "projectId": "A String", # The project which owns the job.
    "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
      "a_key": "A String",
    },
    "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
      "a_key": "A String",
    },
    "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
    "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
      "version": { # A structure describing which components and their versions of the service are required in order to run the job.
        "a_key": "", # Properties of the object.
      },
      "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "internalExperiments": { # Experimental settings.
        "a_key": "", # Properties of the object. Contains field @type with type URL.
      },
      "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
      "experiments": [ # The list of experiments to enable.
        "A String",
      ],
      "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
      "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
        "a_key": "", # Properties of the object.
      },
      "userAgent": { # A description of the process that generated the request.
        "a_key": "", # Properties of the object.
      },
      "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
      "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
        { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
          "diskSourceImage": "A String", # Fully qualified source image for disks.
          "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
            "workflowFileName": "A String", # Store the workflow in this file.
            "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "commandlinesFileName": "A String", # Store preprocessing commands in this file.
            "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
              "reportingEnabled": True or False, # Send work progress updates to service.
              "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
              "workerId": "A String", # ID of the worker running this pipeline.
              "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
              "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            },
            "vmId": "A String", # ID string of VM.
            "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
            "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
            "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
            "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
            "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
            "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
              "A String",
            ],
            "languageHint": "A String", # Suggested backend language.
            "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
            "streamingWorkerMainClass": "A String", # Streaming worker main class name.
            "logDir": "A String", # Directory on the VM to store logs.
            "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
            "harnessCommand": "A String", # Command to launch the worker harness.
            "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
          },
          "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
          "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
          "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
          "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
          "ipConfiguration": "A String", # Configuration for VM IPs.
          "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
          "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
          "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
          "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
          "metadata": { # Metadata to set on the Google Compute Engine VMs.
            "a_key": "A String",
          },
          "poolArgs": { # Extra arguments for this worker pool.
            "a_key": "", # Properties of the object. Contains field @type with type URL.
          },
          "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
          "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
          "packages": [ # Packages to be installed on workers.
            { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
              "name": "A String", # The name of the package.
              "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
            },
          ],
          "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
            "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
            "algorithm": "A String", # The algorithm to use for autoscaling.
          },
          "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
          "dataDisks": [ # Data disks that are used by a VM in this workflow.
            { # Describes the data disk used by a workflow job.
              "mountPoint": "A String", # Directory in a VM where disk is mounted.
              "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
              "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                  # /zones//diskTypes/pd-standard
            },
          ],
        },
      ],
    },
    "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
    "steps": [ # The top-level steps that constitute the entire job.
      { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
        "kind": "A String", # The kind of step in the dataflow Job.
        "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
        "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
          "a_key": "", # Properties of the object.
        },
      },
    ],
    "currentStateTime": "A String", # The timestamp associated with the current state.
    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "A String",
    ],
    "type": "A String", # The type of dataflow job.
    "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
    "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
    "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
      "stages": { # A mapping from each stage to the information about that stage.
        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
          "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
            "A String",
          ],
        },
      },
    },
  }

  x__xgafv: string, V1 error format.
  replaceJobId: string, DEPRECATED. This field is now on the Job message.
  view: string, Level of information requested in response.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
        "a_key": "A String",
      },
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @type with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
            "ipConfiguration": "A String", # Configuration for VM IPs.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @type with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

<div class="method">
    <code class="details" id="get">get(projectId, jobId, x__xgafv=None, view=None)</code>
  <pre>Gets the state of the specified dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  jobId: string, Identifies a single job. (required)
  x__xgafv: string, V1 error format.
  view: string, Level of information requested in response.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
        "a_key": "A String",
      },
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @type with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
            "ipConfiguration": "A String", # Configuration for VM IPs.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @type with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

<div class="method">
    <code class="details" id="getMetrics">getMetrics(projectId, jobId, startTime=None, x__xgafv=None)</code>
  <pre>Request the job status.

Args:
  projectId: string, A project id. (required)
  jobId: string, The job to get messages for. (required)
  startTime: string, Return only metric data that has changed since this time. Default is to return all information about all metrics for the job.
  x__xgafv: string, V1 error format.

Returns:
  An object of the form:

    { # JobMetrics contains a collection of metrics descibing the detailed progress of a Dataflow job. Metrics correspond to user-defined and system-defined metrics in the job. This resource captures only the most recent values of each metric; time-series data can be queried for them (under the same metric names) from Cloud Monitoring.
    "metrics": [ # All metrics for this job.
      { # Describes the state of a metric.
        "meanCount": "", # Worker-computed aggregate value for the "Mean" aggregation kind. This holds the count of the aggregated values and is used in combination with mean_sum above to obtain the actual mean aggregate value. The only possible value type is Long.
        "kind": "A String", # Metric aggregation kind. The possible metric aggregation kinds are "Sum", "Max", "Min", "Mean", "Set", "And", and "Or". The specified aggregation kind is case-insensitive. If omitted, this is not an aggregated value but instead a single metric sample value.
        "set": "", # Worker-computed aggregate value for the "Set" aggregation kind. The only possible value type is a list of Values whose type can be Long, Double, or String, according to the metric's type. All Values in the list must be of the same type.
        "name": { # Identifies a metric, by describing the source which generated the metric. # Name of the metric.
          "origin": "A String", # Origin (namespace) of metric name. May be blank for user-define metrics; will be "dataflow" for metrics defined by the Dataflow service or SDK.
          "name": "A String", # Worker-defined metric name.
          "context": { # Zero or more labeled fields which identify the part of the job this metric is associated with, such as the name of a step or collection. For example, built-in counters associated with steps will have context['step'] = . Counters associated with PCollections in the SDK will have context['pcollection'] =
              # .
            "a_key": "A String",
          },
        },
        "cumulative": True or False, # True if this metric is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this metric is reported as a delta that is not associated with any WorkItem.
        "updateTime": "A String", # Timestamp associated with the metric value. Optional when workers are reporting work progress; it will be filled in responses from the metrics API.
        "scalar": "", # Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", "And", and "Or". The possible value types are Long, Double, and Boolean.
        "meanSum": "", # Worker-computed aggregate value for the "Mean" aggregation kind. This holds the sum of the aggregated values and is used in combination with mean_count below to obtain the actual mean aggregate value. The only possible value types are Long and Double.
        "internal": "", # Worker-computed aggregate value for internal use by the Dataflow service.
      },
    ],
    "metricTime": "A String", # Timestamp as of which metric values are current.
  }</pre>
</div>

<div class="method">
    <code class="details" id="list">list(projectId, pageSize=None, filter=None, pageToken=None, x__xgafv=None, view=None)</code>
  <pre>List the jobs of a project

Args:
  projectId: string, The project which owns the jobs. (required)
  pageSize: integer, If there are many jobs, limit response to at most this many. The actual number of jobs returned will be the lesser of max_responses and an unspecified server-defined limit.
  filter: string, The kind of filter to use.
  pageToken: string, Set this to the 'next_page_token' field of a previous response to request additional results in a long list.
  x__xgafv: string, V1 error format.
  view: string, Level of information requested in response. Default is SUMMARY.

Returns:
  An object of the form:

    { # Response to a request to list Dataflow jobs. This may be a partial response, depending on the page size in the ListJobsRequest.
    "nextPageToken": "A String", # Set if there may be more results than fit in this response.
    "jobs": [ # A subset of the requested job information.
      { # Defines a job to be run by the Dataflow service.
          "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
          "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
          "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
          "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
          "projectId": "A String", # The project which owns the job.
          "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
            "a_key": "A String",
          },
          "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
            "a_key": "A String",
          },
          "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
          "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
            "version": { # A structure describing which components and their versions of the service are required in order to run the job.
              "a_key": "", # Properties of the object.
            },
            "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "internalExperiments": { # Experimental settings.
              "a_key": "", # Properties of the object. Contains field @type with type URL.
            },
            "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
            "experiments": [ # The list of experiments to enable.
              "A String",
            ],
            "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
            "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
              "a_key": "", # Properties of the object.
            },
            "userAgent": { # A description of the process that generated the request.
              "a_key": "", # Properties of the object.
            },
            "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
            "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
              { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
                "diskSourceImage": "A String", # Fully qualified source image for disks.
                "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
                  "workflowFileName": "A String", # Store the workflow in this file.
                  "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  "commandlinesFileName": "A String", # Store preprocessing commands in this file.
                  "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                    "reportingEnabled": True or False, # Send work progress updates to service.
                    "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                    "workerId": "A String", # ID of the worker running this pipeline.
                    "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                    "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                    "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  },
                  "vmId": "A String", # ID string of VM.
                  "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
                  "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
                  "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                  "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
                  "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
                  "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                    "A String",
                  ],
                  "languageHint": "A String", # Suggested backend language.
                  "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
                  "streamingWorkerMainClass": "A String", # Streaming worker main class name.
                  "logDir": "A String", # Directory on the VM to store logs.
                  "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
                  "harnessCommand": "A String", # Command to launch the worker harness.
                  "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
                  "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
                },
                "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
                "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
                "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
                "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
                "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
                "ipConfiguration": "A String", # Configuration for VM IPs.
                "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
                "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
                "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
                "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "metadata": { # Metadata to set on the Google Compute Engine VMs.
                  "a_key": "A String",
                },
                "poolArgs": { # Extra arguments for this worker pool.
                  "a_key": "", # Properties of the object. Contains field @type with type URL.
                },
                "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
                "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
                "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
                "packages": [ # Packages to be installed on workers.
                  { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                    "name": "A String", # The name of the package.
                    "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
                  },
                ],
                "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
                  "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
                  "algorithm": "A String", # The algorithm to use for autoscaling.
                },
                "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
                "dataDisks": [ # Data disks that are used by a VM in this workflow.
                  { # Describes the data disk used by a workflow job.
                    "mountPoint": "A String", # Directory in a VM where disk is mounted.
                    "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                    "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                        # /zones//diskTypes/pd-standard
                  },
                ],
              },
            ],
          },
          "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
          "steps": [ # The top-level steps that constitute the entire job.
            { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
              "kind": "A String", # The kind of step in the dataflow Job.
              "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
              "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
                "a_key": "", # Properties of the object.
              },
            },
          ],
          "currentStateTime": "A String", # The timestamp associated with the current state.
          "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "A String",
          ],
          "type": "A String", # The type of dataflow job.
          "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
          "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
          "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
            "stages": { # A mapping from each stage to the information about that stage.
              "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
                "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
                  "A String",
                ],
              },
            },
          },
        },
    ],
  }</pre>
</div>

<div class="method">
    <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
  <pre>Retrieves the next page of results.

Args:
  previous_request: The request for the previous page. (required)
  previous_response: The response from the request for the previous page. (required)

Returns:
  A request object that you can call 'execute()' on to request the next
  page. Returns None if there are no more items in the collection.
    </pre>
</div>

<div class="method">
    <code class="details" id="update">update(projectId, jobId, body, x__xgafv=None)</code>
  <pre>Updates the state of an existing dataflow job.

Args:
  projectId: string, The project which owns the job. (required)
  jobId: string, Identifies a single job. (required)
  body: object, The request body. (required)
    The object takes the form of:

{ # Defines a job to be run by the Dataflow service.
    "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
    "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
    "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
    "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
    "projectId": "A String", # The project which owns the job.
    "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
      "a_key": "A String",
    },
    "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
      "a_key": "A String",
    },
    "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
    "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
      "version": { # A structure describing which components and their versions of the service are required in order to run the job.
        "a_key": "", # Properties of the object.
      },
      "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "internalExperiments": { # Experimental settings.
        "a_key": "", # Properties of the object. Contains field @type with type URL.
      },
      "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
      "experiments": [ # The list of experiments to enable.
        "A String",
      ],
      "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
      "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
        "a_key": "", # Properties of the object.
      },
      "userAgent": { # A description of the process that generated the request.
        "a_key": "", # Properties of the object.
      },
      "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
      "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
        { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
          "diskSourceImage": "A String", # Fully qualified source image for disks.
          "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
            "workflowFileName": "A String", # Store the workflow in this file.
            "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "commandlinesFileName": "A String", # Store preprocessing commands in this file.
            "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
              "reportingEnabled": True or False, # Send work progress updates to service.
              "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
              "workerId": "A String", # ID of the worker running this pipeline.
              "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
              "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            },
            "vmId": "A String", # ID string of VM.
            "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
            "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
            "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
            "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
            "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
            "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
              "A String",
            ],
            "languageHint": "A String", # Suggested backend language.
            "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
            "streamingWorkerMainClass": "A String", # Streaming worker main class name.
            "logDir": "A String", # Directory on the VM to store logs.
            "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
            "harnessCommand": "A String", # Command to launch the worker harness.
            "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
            "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
          },
          "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
          "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
          "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
          "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
          "ipConfiguration": "A String", # Configuration for VM IPs.
          "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
          "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
          "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
          "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
          "metadata": { # Metadata to set on the Google Compute Engine VMs.
            "a_key": "A String",
          },
          "poolArgs": { # Extra arguments for this worker pool.
            "a_key": "", # Properties of the object. Contains field @type with type URL.
          },
          "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
          "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
          "packages": [ # Packages to be installed on workers.
            { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
              "name": "A String", # The name of the package.
              "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
            },
          ],
          "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
            "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
            "algorithm": "A String", # The algorithm to use for autoscaling.
          },
          "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
          "dataDisks": [ # Data disks that are used by a VM in this workflow.
            { # Describes the data disk used by a workflow job.
              "mountPoint": "A String", # Directory in a VM where disk is mounted.
              "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
              "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                  # /zones//diskTypes/pd-standard
            },
          ],
        },
      ],
    },
    "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
    "steps": [ # The top-level steps that constitute the entire job.
      { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
        "kind": "A String", # The kind of step in the dataflow Job.
        "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
        "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
          "a_key": "", # Properties of the object.
        },
      },
    ],
    "currentStateTime": "A String", # The timestamp associated with the current state.
    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
      "A String",
    ],
    "type": "A String", # The type of dataflow job.
    "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
    "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
    "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
      "stages": { # A mapping from each stage to the information about that stage.
        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
          "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
            "A String",
          ],
        },
      },
    },
  }

  x__xgafv: string, V1 error format.

Returns:
  An object of the form:

    { # Defines a job to be run by the Dataflow service.
      "clientRequestId": "A String", # Client's unique identifier of the job, re-used by SDK across retried attempts. If this field is set, the service will ensure its uniqueness. That is, the request to create a job will fail if the service has knowledge of a previously submitted job with the same client's id and job name. The caller may, for example, use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
      "requestedState": "A String", # The job's requested state. UpdateJob may be used to switch between the JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. UpdateJob may also be used to directly set a job's requested state to JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if it has not already reached a terminal state.
      "name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
      "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
      "projectId": "A String", # The project which owns the job.
      "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
        "a_key": "A String",
      },
      "transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
        "a_key": "A String",
      },
      "createTime": "A String", # Timestamp when job was initially created. Immutable, set by the Dataflow service.
      "environment": { # Describes the environment in which a Dataflow Job runs. # Environment for the job.
        "version": { # A structure describing which components and their versions of the service are required in order to run the job.
          "a_key": "", # Properties of the object.
        },
        "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "internalExperiments": { # Experimental settings.
          "a_key": "", # Properties of the object. Contains field @type with type URL.
        },
        "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
        "experiments": [ # The list of experiments to enable.
          "A String",
        ],
        "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
        "sdkPipelineOptions": { # The Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
          "a_key": "", # Properties of the object.
        },
        "userAgent": { # A description of the process that generated the request.
          "a_key": "", # Properties of the object.
        },
        "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
        "workerPools": [ # Worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
          { # Describes one particular pool of Dataflow workers to be instantiated by the Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
            "diskSourceImage": "A String", # Fully qualified source image for disks.
            "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
              "workflowFileName": "A String", # Store the workflow in this file.
              "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "commandlinesFileName": "A String", # Store preprocessing commands in this file.
              "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # Settings to pass to the parallel worker harness.
                "reportingEnabled": True or False, # Send work progress updates to service.
                "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
                "workerId": "A String", # ID of the worker running this pipeline.
                "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
                "servicePath": "A String", # The Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
                "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              },
              "vmId": "A String", # ID string of VM.
              "baseTaskDir": "A String", # Location on the worker for task-specific subdirectories.
              "continueOnException": True or False, # Do we continue taskrunner if an exception is hit?
              "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
              "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
              "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
              "oauthScopes": [ # OAuth2 scopes to be requested by the taskrunner in order to access the dataflow API.
                "A String",
              ],
              "languageHint": "A String", # Suggested backend language.
              "logToSerialconsole": True or False, # Send taskrunner log into to Google Compute Engine VM serial console?
              "streamingWorkerMainClass": "A String", # Streaming worker main class name.
              "logDir": "A String", # Directory on the VM to store logs.
              "dataflowApiVersion": "A String", # API version of endpoint, e.g. "v1b3"
              "harnessCommand": "A String", # Command to launch the worker harness.
              "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
              "alsologtostderr": True or False, # Also send taskrunner log info to stderr?
            },
            "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
            "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
            "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
            "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
            "ipConfiguration": "A String", # Configuration for VM IPs.
            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
            "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
            "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
            "metadata": { # Metadata to set on the Google Compute Engine VMs.
              "a_key": "A String",
            },
            "poolArgs": { # Extra arguments for this worker pool.
              "a_key": "", # Properties of the object. Contains field @type with type URL.
            },
            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
            "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
            "packages": [ # Packages to be installed on workers.
              { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
                "name": "A String", # The name of the package.
                "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
              },
            ],
            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
              "algorithm": "A String", # The algorithm to use for autoscaling.
            },
            "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
            "dataDisks": [ # Data disks that are used by a VM in this workflow.
              { # Describes the data disk used by a workflow job.
                "mountPoint": "A String", # Directory in a VM where disk is mounted.
                "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
                "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/
                    # /zones//diskTypes/pd-standard
              },
            ],
          },
        ],
      },
      "replaceJobId": "A String", # If this job is an update of an existing job, this field will be the ID of the job it replaced. When sending a CreateJobRequest, you can update a job by specifying it here. The job named here will be stopped, and its intermediate state transferred to this job.
      "steps": [ # The top-level steps that constitute the entire job.
        { # Defines a particular step within a Dataflow job. A job consists of multiple steps, each of which performs some specific operation as part of the overall job. Data is typically passed from one step to another as part of the job. Here's an example of a sequence of steps which together implement a Map-Reduce job: * Read a collection of data from some source, parsing the collection's elements. * Validate the elements. * Apply a user-defined function to map each element to some value and extract an element-specific key value. * Group elements with the same key into a single element with that key, transforming a multiply-keyed collection into a uniquely-keyed collection. * Write the elements out to some data sink. (Note that the Dataflow service may be used to run many different types of jobs, not just Map-Reduce).
          "kind": "A String", # The kind of step in the dataflow Job.
          "name": "A String", # Name identifying the step. This must be unique for each step with respect to all other steps in the dataflow Job.
          "properties": { # Named properties associated with the step. Each kind of predefined step has its own required set of properties.
            "a_key": "", # Properties of the object.
          },
        },
      ],
      "currentStateTime": "A String", # The timestamp associated with the current state.
      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
        "A String",
      ],
      "type": "A String", # The type of dataflow job.
      "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
      "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
      "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
        "stages": { # A mapping from each stage to the information about that stage.
          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
            "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
              "A String",
            ],
          },
        },
      },
    }</pre>
</div>

</body></html>