File: testtf.py

package info (click to toggle)
scalene 1.5.54-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 15,980 kB
  • sloc: cpp: 22,870; python: 14,493; javascript: 12,297; ansic: 817; makefile: 196; sh: 45
file content (38 lines) | stat: -rwxr-xr-x 1,097 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import tensorflow as tf
from time import perf_counter


def config():
    num_threads = 16
    tf.config.threading.set_inter_op_parallelism_threads(num_threads)
    tf.config.threading.set_intra_op_parallelism_threads(num_threads)


def run_benchmark():
    mnist = tf.keras.datasets.mnist

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    model = tf.keras.models.Sequential(
        [
            tf.keras.layers.Flatten(input_shape=(28, 28)),
            tf.keras.layers.Dense(128, activation="relu"),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(10),
        ]
    )

    predictions = model(x_train[:1]).numpy()
    print("predictions", predictions)

    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
    t0 = perf_counter()
    model.fit(x_train, y_train, epochs=5)
    model.evaluate(x_test, y_test, verbose=2)
    dt = perf_counter() - t0
    print(f"Total time: {dt}")


run_benchmark()