File: job_filtering.py

package info (click to toggle)
nordugrid-arc 7.1.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky
  • size: 29,364 kB
  • sloc: cpp: 136,663; python: 12,452; perl: 12,313; php: 11,408; sh: 10,878; ansic: 3,305; makefile: 3,161; xml: 180; sql: 130; javascript: 53; sed: 30
file content (46 lines) | stat: -rwxr-xr-x 1,490 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#! /usr/bin/env python
import arc
import sys

def example():
    uc = arc.UserConfig()

    # Create a JobSupervisor to handle all the jobs
    job_supervisor = arc.JobSupervisor(uc)

    # Retrieve all the jobs from this computing element
    endpoint = arc.Endpoint("https://piff.hep.lu.se:443/arex", arc.Endpoint.JOBLIST)
    sys.stdout.write("Querying %s for jobs...\n" % endpoint.str())
    retriever = arc.JobListRetriever(uc)
    retriever.addConsumer(job_supervisor)
    retriever.addEndpoint(endpoint)
    retriever.wait()

    sys.stdout.write("%s jobs found\n" % len(job_supervisor.GetAllJobs()))

    sys.stdout.write("Getting job states...\n")
    # Update the states of the jobs
    job_supervisor.Update()

    # Print state of updated jobs
    sys.stdout.write("The jobs have the following states: %s\n"%(", ".join([job.State.GetGeneralState() for job in job_supervisor.GetAllJobs()])))

    # Select failed jobs
    job_supervisor.SelectByStatus(["Failed"])
    failed_jobs = job_supervisor.GetSelectedJobs()

    sys.stdout.write("The failed jobs:\n")
    for job in failed_jobs:
        job.SaveToStream(arc.CPyOstream(sys.stdout), True)

# wait for all the background threads to finish before we destroy the objects they may use
import atexit
@atexit.register
def wait_exit():
    arc.ThreadInitializer().waitExit()

# arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr))
# arc.Logger.getRootLogger().setThreshold(arc.DEBUG)

# run the example
example()