初心者のRNN(LSTM) | Kerasで試してみる
上記記事ののコードを実行してみる

In [1]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import matplotlib.pyplot as plt
In [2]:
def sin(x, T=100):
    return np.sin(2.0 * np.pi * x / T)

# sin波にノイズを付与する
def toy_problem(T=100, ampl=0.05):
    x = np.arange(0, 2 * T + 1)
    noise = ampl * np.random.uniform(low=-1.0, high=1.0, size=len(x))
    return sin(x) + noise

f = toy_problem()
In [3]:
def make_dataset(low_data, n_prev=100):

    data, target = [], []
    maxlen = 25

    for i in range(len(low_data)-maxlen):
        data.append(low_data[i:i + maxlen])
        target.append(low_data[i + maxlen])

    re_data = np.array(data).reshape(len(data), maxlen, 1)
    re_target = np.array(target).reshape(len(data), 1)

    return re_data, re_target


#g -> 学習データ,h -> 学習ラベル
g, h = make_dataset(f)
In [4]:
# モデル構築

# 1つの学習データのStep数(今回は25)
length_of_sequence = g.shape[1] 
in_out_neurons = 1
n_hidden = 300

model = Sequential()
model.add(LSTM(n_hidden, batch_input_shape=(None, length_of_sequence, in_out_neurons), return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
optimizer = Adam(lr=0.001)
model.compile(loss="mean_squared_error", optimizer=optimizer)
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
  super(Adam, self).__init__(name, **kwargs)
In [5]:
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=20)
model.fit(g, h,
          batch_size=300,
          epochs=100,
          validation_split=0.1,
          callbacks=[early_stopping]
          )
#【コメント】early_stoppingにより38epochで停止
Epoch 1/100
1/1 [==============================] - 5s 5s/step - loss: 0.5185 - val_loss: 0.1123
Epoch 2/100
1/1 [==============================] - 0s 471ms/step - loss: 0.3079 - val_loss: 0.0381
Epoch 3/100
1/1 [==============================] - 0s 459ms/step - loss: 0.1710 - val_loss: 0.1221
Epoch 4/100
1/1 [==============================] - 0s 428ms/step - loss: 0.1240 - val_loss: 0.2379
Epoch 5/100
1/1 [==============================] - 1s 571ms/step - loss: 0.1226 - val_loss: 0.1722
Epoch 6/100
1/1 [==============================] - 0s 441ms/step - loss: 0.0821 - val_loss: 0.0817
Epoch 7/100
1/1 [==============================] - 0s 463ms/step - loss: 0.0501 - val_loss: 0.0340
Epoch 8/100
1/1 [==============================] - 0s 336ms/step - loss: 0.0408 - val_loss: 0.0168
Epoch 9/100
1/1 [==============================] - 0s 459ms/step - loss: 0.0411 - val_loss: 0.0119
Epoch 10/100
1/1 [==============================] - 0s 468ms/step - loss: 0.0403 - val_loss: 0.0101
Epoch 11/100
1/1 [==============================] - 0s 459ms/step - loss: 0.0347 - val_loss: 0.0087
Epoch 12/100
1/1 [==============================] - 0s 477ms/step - loss: 0.0251 - val_loss: 0.0078
Epoch 13/100
1/1 [==============================] - 0s 449ms/step - loss: 0.0151 - val_loss: 0.0090
Epoch 14/100
1/1 [==============================] - 1s 573ms/step - loss: 0.0094 - val_loss: 0.0130
Epoch 15/100
1/1 [==============================] - 0s 408ms/step - loss: 0.0105 - val_loss: 0.0156
Epoch 16/100
1/1 [==============================] - 0s 451ms/step - loss: 0.0114 - val_loss: 0.0116
Epoch 17/100
1/1 [==============================] - 0s 392ms/step - loss: 0.0066 - val_loss: 0.0046
Epoch 18/100
1/1 [==============================] - 1s 517ms/step - loss: 0.0029 - val_loss: 0.0014
Epoch 19/100
1/1 [==============================] - 0s 447ms/step - loss: 0.0042 - val_loss: 0.0027
Epoch 20/100
1/1 [==============================] - 1s 671ms/step - loss: 0.0065 - val_loss: 0.0043
Epoch 21/100
1/1 [==============================] - 1s 560ms/step - loss: 0.0063 - val_loss: 0.0038
Epoch 22/100
1/1 [==============================] - 0s 470ms/step - loss: 0.0043 - val_loss: 0.0024
Epoch 23/100
1/1 [==============================] - 0s 442ms/step - loss: 0.0025 - val_loss: 0.0021
Epoch 24/100
1/1 [==============================] - 0s 332ms/step - loss: 0.0026 - val_loss: 0.0029
Epoch 25/100
1/1 [==============================] - 0s 455ms/step - loss: 0.0040 - val_loss: 0.0033
Epoch 26/100
1/1 [==============================] - 0s 442ms/step - loss: 0.0053 - val_loss: 0.0026
Epoch 27/100
1/1 [==============================] - 0s 391ms/step - loss: 0.0053 - val_loss: 0.0017
Epoch 28/100
1/1 [==============================] - 0s 440ms/step - loss: 0.0045 - val_loss: 0.0017
Epoch 29/100
1/1 [==============================] - 0s 435ms/step - loss: 0.0039 - val_loss: 0.0024
Epoch 30/100
1/1 [==============================] - 0s 366ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 31/100
1/1 [==============================] - 1s 507ms/step - loss: 0.0033 - val_loss: 0.0033
Epoch 32/100
1/1 [==============================] - 0s 425ms/step - loss: 0.0028 - val_loss: 0.0028
Epoch 33/100
1/1 [==============================] - 0s 410ms/step - loss: 0.0023 - val_loss: 0.0021
Epoch 34/100
1/1 [==============================] - 0s 375ms/step - loss: 0.0022 - val_loss: 0.0017
Epoch 35/100
1/1 [==============================] - 0s 404ms/step - loss: 0.0025 - val_loss: 0.0016
Epoch 36/100
1/1 [==============================] - 0s 397ms/step - loss: 0.0027 - val_loss: 0.0017
Epoch 37/100
1/1 [==============================] - 0s 401ms/step - loss: 0.0025 - val_loss: 0.0016
Epoch 38/100
1/1 [==============================] - 0s 434ms/step - loss: 0.0021 - val_loss: 0.0015
Out[5]:
<keras.callbacks.History at 0x7feb737edfd0>
In [6]:
# 予測
predicted = model.predict(g)
In [7]:
plt.figure()
plt.plot(range(25,len(predicted)+25),predicted, color="r", label="predict_data")
plt.plot(range(0, len(f)), f, color="b", label="row_data")
plt.legend()
plt.show()