一对一的LSTM
# one input and one outputfrom numpy import arrayfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.layers import LSTM# prepare sequencelength = 5seq = array([i/float(length) for i in range(length)])X = seq.reshape(len(seq), 1, 1)y = seq.reshape(len(seq), 1)# define LSTM configurationn_neurons = lengthn_batch = lengthn_epoch = 1000# create LSTMmodel = Sequential()model.add(LSTM(n_neurons, input_shape=(1, 1)))model.add(Dense(1))model.compile(loss='mean_squared_error', optimizer='adam')print(model.summary())# train LSTMmodel.fit(X, y, epochs=n_epoch, batch_size=n_batch, verbose=2)# evaluateresult = model.predict(X, batch_size=n_batch, verbose=0)for value in result: print('%.1f' % value)
多对一的LSTM
#multinput to one output from numpy import arrayfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.layers import LSTM# prepare sequencelength = 5seq = array([i/float(length) for i in range(length)])X = seq.reshape(1, length, 1)y = seq.reshape(1, length)# define LSTM configurationn_neurons = lengthn_batch = 1n_epoch = 500# create LSTMmodel = Sequential()model.add(LSTM(n_neurons, input_shape=(length, 1)))model.add(Dense(length))model.compile(loss='mean_squared_error', optimizer='adam')print(model.summary())# train LSTMmodel.fit(X, y, epochs=n_epoch, batch_size=n_batch, verbose=2)# evaluateresult = model.predict(X, batch_size=n_batch, verbose=0)for value in result[0,:]: print('%.1f' % value)
多对多的LSTM
# multinput and multioutput from numpy import arrayfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.layers import TimeDistributedfrom keras.layers import LSTM# prepare sequencelength = 5seq = array([i/float(length) for i in range(length)])X = seq.reshape(1, length, 1)y = seq.reshape(1, length, 1)# define LSTM configurationn_neurons = lengthn_batch = 1n_epoch = 1000# create LSTMmodel = Sequential()model.add(LSTM(n_neurons, input_shape=(length, 1), return_sequences=True))model.add(TimeDistributed(Dense(1)))model.compile(loss='mean_squared_error', optimizer='adam')print(model.summary())# train LSTMmodel.fit(X, y, epochs=n_epoch, batch_size=n_batch, verbose=2)# evaluateresult = model.predict(X, batch_size=n_batch, verbose=0)for value in result[0,:,0]: print('%.1f' % value)