初心者のRNN(LSTM) | Kerasで試してみる
上記記事ののコードを実行してみる

In [30]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import matplotlib.pyplot as plt
In [31]:
from tensorflow.keras.layers import LSTM
In [32]:
def sin(x, T=100):
    return np.sin(2.0 * np.pi * x / T)

# sin波にノイズを付与する
def toy_problem(T=100, ampl=0.05):
    x = np.arange(0, 2 * T + 1)
    noise = ampl * np.random.uniform(low=-1.0, high=1.0, size=len(x))
    return sin(x) + noise

f = toy_problem()
In [33]:
def make_dataset(low_data, n_prev=100):

    data, target = [], []
    maxlen = 25

    for i in range(len(low_data)-maxlen):
        data.append(low_data[i:i + maxlen])
        target.append(low_data[i + maxlen])

    re_data = np.array(data).reshape(len(data), maxlen, 1)
    re_target = np.array(target).reshape(len(data), 1)

    return re_data, re_target


#g -> 学習データ,h -> 学習ラベル
g, h = make_dataset(f)
In [34]:
# モデル構築

# 1つの学習データのStep数(今回は25)
length_of_sequence = g.shape[1] 
in_out_neurons = 1
n_hidden = 300

model = Sequential()
model.add(Bidirectional(LSTM(n_hidden, batch_input_shape=(None, length_of_sequence, in_out_neurons), return_sequences=False)))#【コメント】BidirectionalつけるだけでOK
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
optimizer = Adam(learning_rate=0.001)
model.compile(loss="mean_squared_error", optimizer=optimizer)
In [35]:
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=20)
model.fit(g, h,
          batch_size=300,
          epochs=100,
          validation_split=0.1,
          callbacks=[early_stopping]
          )
#【コメント】early_stoppingにより38epochで停止
Epoch 1/100
1/1 [==============================] - 6s 6s/step - loss: 0.5961 - val_loss: 0.1524
Epoch 2/100
1/1 [==============================] - 1s 520ms/step - loss: 0.3491 - val_loss: 0.0599
Epoch 3/100
1/1 [==============================] - 1s 519ms/step - loss: 0.2150 - val_loss: 0.2243
Epoch 4/100
1/1 [==============================] - 1s 515ms/step - loss: 0.1984 - val_loss: 0.3404
Epoch 5/100
1/1 [==============================] - 1s 529ms/step - loss: 0.1875 - val_loss: 0.2224
Epoch 6/100
1/1 [==============================] - 1s 537ms/step - loss: 0.1272 - val_loss: 0.0953
Epoch 7/100
1/1 [==============================] - 1s 538ms/step - loss: 0.0886 - val_loss: 0.0361
Epoch 8/100
1/1 [==============================] - 1s 532ms/step - loss: 0.0791 - val_loss: 0.0220
Epoch 9/100
1/1 [==============================] - 1s 532ms/step - loss: 0.0765 - val_loss: 0.0217
Epoch 10/100
1/1 [==============================] - 1s 543ms/step - loss: 0.0648 - val_loss: 0.0190
Epoch 11/100
1/1 [==============================] - 1s 548ms/step - loss: 0.0416 - val_loss: 0.0122
Epoch 12/100
1/1 [==============================] - 1s 538ms/step - loss: 0.0178 - val_loss: 0.0081
Epoch 13/100
1/1 [==============================] - 1s 519ms/step - loss: 0.0152 - val_loss: 0.0092
Epoch 14/100
1/1 [==============================] - 1s 551ms/step - loss: 0.0261 - val_loss: 0.0048
Epoch 15/100
1/1 [==============================] - 1s 535ms/step - loss: 0.0114 - val_loss: 0.0030
Epoch 16/100
1/1 [==============================] - 1s 535ms/step - loss: 0.0038 - val_loss: 0.0086
Epoch 17/100
1/1 [==============================] - 1s 523ms/step - loss: 0.0122 - val_loss: 0.0111
Epoch 18/100
1/1 [==============================] - 1s 529ms/step - loss: 0.0165 - val_loss: 0.0062
Epoch 19/100
1/1 [==============================] - 1s 528ms/step - loss: 0.0115 - val_loss: 0.0015
Epoch 20/100
1/1 [==============================] - 1s 544ms/step - loss: 0.0050 - val_loss: 0.0029
Epoch 21/100
1/1 [==============================] - 1s 517ms/step - loss: 0.0032 - val_loss: 0.0065
Epoch 22/100
1/1 [==============================] - 1s 539ms/step - loss: 0.0046 - val_loss: 0.0055
Epoch 23/100
1/1 [==============================] - 1s 528ms/step - loss: 0.0050 - val_loss: 0.0022
Epoch 24/100
1/1 [==============================] - 1s 545ms/step - loss: 0.0040 - val_loss: 0.0015
Epoch 25/100
1/1 [==============================] - 1s 533ms/step - loss: 0.0038 - val_loss: 0.0038
Epoch 26/100
1/1 [==============================] - 1s 524ms/step - loss: 0.0047 - val_loss: 0.0063
Epoch 27/100
1/1 [==============================] - 1s 540ms/step - loss: 0.0057 - val_loss: 0.0069
Epoch 28/100
1/1 [==============================] - 1s 516ms/step - loss: 0.0058 - val_loss: 0.0052
Epoch 29/100
1/1 [==============================] - 1s 528ms/step - loss: 0.0047 - val_loss: 0.0025
Epoch 30/100
1/1 [==============================] - 1s 537ms/step - loss: 0.0031 - val_loss: 0.0013
Epoch 31/100
1/1 [==============================] - 1s 515ms/step - loss: 0.0020 - val_loss: 0.0026
Epoch 32/100
1/1 [==============================] - 1s 520ms/step - loss: 0.0021 - val_loss: 0.0049
Epoch 33/100
1/1 [==============================] - 1s 561ms/step - loss: 0.0029 - val_loss: 0.0054
Epoch 34/100
1/1 [==============================] - 1s 514ms/step - loss: 0.0031 - val_loss: 0.0035
Epoch 35/100
1/1 [==============================] - 1s 541ms/step - loss: 0.0025 - val_loss: 0.0017
Epoch 36/100
1/1 [==============================] - 1s 524ms/step - loss: 0.0017 - val_loss: 0.0016
Epoch 37/100
1/1 [==============================] - 1s 542ms/step - loss: 0.0015 - val_loss: 0.0029
Epoch 38/100
1/1 [==============================] - 1s 538ms/step - loss: 0.0018 - val_loss: 0.0041
Epoch 39/100
1/1 [==============================] - 1s 520ms/step - loss: 0.0019 - val_loss: 0.0041
Epoch 40/100
1/1 [==============================] - 1s 508ms/step - loss: 0.0017 - val_loss: 0.0032
Epoch 41/100
1/1 [==============================] - 1s 535ms/step - loss: 0.0014 - val_loss: 0.0022
Epoch 42/100
1/1 [==============================] - 1s 530ms/step - loss: 0.0013 - val_loss: 0.0016
Epoch 43/100
1/1 [==============================] - 1s 527ms/step - loss: 0.0016 - val_loss: 0.0014
Epoch 44/100
1/1 [==============================] - 1s 525ms/step - loss: 0.0017 - val_loss: 0.0014
Epoch 45/100
1/1 [==============================] - 1s 549ms/step - loss: 0.0015 - val_loss: 0.0014
Epoch 46/100
1/1 [==============================] - 1s 523ms/step - loss: 0.0011 - val_loss: 0.0014
Epoch 47/100
1/1 [==============================] - 1s 532ms/step - loss: 9.4307e-04 - val_loss: 0.0015
Epoch 48/100
1/1 [==============================] - 1s 525ms/step - loss: 0.0010 - val_loss: 0.0015
Epoch 49/100
1/1 [==============================] - 1s 524ms/step - loss: 0.0012 - val_loss: 0.0015
Epoch 50/100
1/1 [==============================] - 1s 570ms/step - loss: 0.0012 - val_loss: 0.0015
Out[35]:
<keras.callbacks.History at 0x7f0d848dfe90>
In [36]:
# 予測
predicted = model.predict(g)
In [37]:
plt.figure()
plt.plot(range(25,len(predicted)+25),predicted, color="r", label="predict_data")
plt.plot(range(0, len(f)), f, color="b", label="row_data")
plt.legend()
plt.show()