1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
|
from __future__ import print_function
import numpy as np
import pytest
import string
from keras.utils.test_utils import get_test_data
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras import layers, optimizers
import keras.backend as K
import keras
def test_temporal_classification():
'''
Classify temporal sequences of float numbers
of length 3 into 2 classes using
single layer of GRU units and softmax applied
to the last activations of the units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 4),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(layers.GRU(8,
input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=5, batch_size=10,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['accuracy'][-1] >= 0.8)
config = model.get_config()
model = Sequential.from_config(config)
def test_temporal_classification_functional():
'''
Classify temporal sequences of float numbers
of length 3 into 2 classes using
single layer of GRU units and softmax applied
to the last activations of the units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 4),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
x = layers.SimpleRNN(8)(inputs)
outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=5, batch_size=10,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['accuracy'][-1] >= 0.75)
def test_temporal_regression():
'''
Predict float numbers (regression) based on sequences
of float numbers of length 3 using a single layer of GRU units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 5),
output_shape=(2,),
classification=False)
model = Sequential()
model.add(layers.LSTM(y_train.shape[-1],
input_shape=(x_train.shape[1], x_train.shape[2])))
model.compile(loss='hinge', optimizer='adam')
history = model.fit(x_train, y_train, epochs=5, batch_size=16,
validation_data=(x_test, y_test), verbose=0)
assert(history.history['loss'][-1] < 1.)
def test_3d_to_3d():
'''
Apply a same Dense layer for each element of time dimension of the input
and make predictions of the output sequence elements.
This does not make use of the temporal structure of the sequence
(see TimeDistributedDense for more details)
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=100,
num_test=20,
input_shape=(3, 5),
output_shape=(3, 5),
classification=False)
model = Sequential()
model.add(layers.TimeDistributed(
layers.Dense(y_train.shape[-1]), input_shape=x_train.shape[1:3]))
model.compile(loss='hinge', optimizer='rmsprop')
history = model.fit(x_train, y_train, epochs=20, batch_size=16,
validation_data=(x_test, y_test), verbose=0)
assert(history.history['loss'][-1] < 1.)
def test_stacked_lstm_char_prediction():
'''
Learn alphabetical char sequence with stacked LSTM.
Predict the whole alphabet based on the first two letters ('ab' -> 'ab...z')
See non-toy example in examples/lstm_text_generation.py
'''
# generate alphabet:
# http://stackoverflow.com/questions/16060899/alphabet-range-python
alphabet = string.ascii_lowercase
number_of_chars = len(alphabet)
# generate char sequences of length 'sequence_length' out of alphabet and
# store the next char as label (e.g. 'ab'->'c')
sequence_length = 2
sentences = [alphabet[i: i + sequence_length]
for i in range(len(alphabet) - sequence_length)]
next_chars = [alphabet[i + sequence_length]
for i in range(len(alphabet) - sequence_length)]
# Transform sequences and labels into 'one-hot' encoding
x = np.zeros((len(sentences), sequence_length, number_of_chars), dtype=np.bool)
y = np.zeros((len(sentences), number_of_chars), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, ord(char) - ord('a')] = 1
y[i, ord(next_chars[i]) - ord('a')] = 1
# learn the alphabet with stacked LSTM
model = Sequential([
layers.LSTM(16, return_sequences=True,
input_shape=(sequence_length, number_of_chars)),
layers.LSTM(16, return_sequences=False),
layers.Dense(number_of_chars, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x, y, batch_size=1, epochs=60, verbose=1)
# prime the model with 'ab' sequence and let it generate the learned alphabet
sentence = alphabet[:sequence_length]
generated = sentence
for iteration in range(number_of_chars - sequence_length):
x = np.zeros((1, sequence_length, number_of_chars))
for t, char in enumerate(sentence):
x[0, t, ord(char) - ord('a')] = 1.
preds = model.predict(x, verbose=0)[0]
next_char = chr(np.argmax(preds) + ord('a'))
generated += next_char
sentence = sentence[1:] + next_char
# check that it did generate the alphabet correctly
assert(generated == alphabet)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TF backend')
def test_embedding_with_clipnorm():
model = Sequential()
model.add(layers.Embedding(input_dim=1, output_dim=1))
model.compile(optimizer=optimizers.SGD(clipnorm=0.1), loss='mse')
model.fit(np.array([[0]]), np.array([[[0.5]]]), epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
|