File: chrome_trace_parser.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (73 lines) | stat: -rwxr-xr-x 2,236 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/usr/bin/env python3
import argparse
import logging
import os

import pandas as pd

from torch._functorch.benchmark_utils import compute_utilization


# process the chrome traces output by the pytorch profiler
# require the json input file's name to be in format {model_name}_chrome_trace_*.json
# the runtimes file should have format (model_name, runtime)


def get_model_name(filename):
    """
    Get model name from a file in format {model_name}_chrome_trace_*.json
    """
    _, tail = os.path.split(filename)
    modelname = tail[: tail.find("_chrome_trace")]
    return modelname


def get_total_length(run_times_df, modelname):
    return float(run_times_df[run_times_df["name"] == modelname]["runtime"])


def main():
    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group(required=True)
    parser.add_argument(
        "--runtime", "-runf", help="file name of the runtime file", required=True
    )
    group.add_argument(
        "--filename",
        "-f",
        action="append",
        help="a filename of the json file to process",
    )
    group.add_argument("--folder", "-fd", help="a folder of the json files to process")
    args = parser.parse_args()

    if args.filename:
        filenames = args.filename
    elif args.folder:
        filenames = []
        directory = args.folder
        for filename in os.listdir(directory):
            f = os.path.join(directory, filename)
            if os.path.isfile(f) and f.endswith(".json"):
                filenames.append(f)
    else:
        print("Please provide a filename or a folder name")

    print("modelname, GPU Utilization, MM and Conv time")

    run_times_df = pd.read_csv(args.runtime)
    for filename in filenames:
        try:
            modelname = get_model_name(filename)
            total_length = get_total_length(run_times_df, modelname) * 1e6
            utilization, mm_conv_utilization = compute_utilization(
                filenames, total_length
            )
            print(f"{modelname}, {utilization}, {mm_conv_utilization}")
        except BaseException:
            logging.exception("%s, ERROR", filename)
            print(f"{filename}, ERROR")


if __name__ == "__main__":
    main()