I'm trying to implement an attention model based this model but I want my model to not just look one frame to decide the attention for that frame, I want a model that will try to look at the frame in respect to the whole sequence. So what I'm doing is multiplying each frame by a sequence vector, which is the output of a lstm (return_sequence=False)
These are the modified functions:
def build(self, input_shape):
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name))
if self.lstm_size is None:
self.lstm_size = input_shape[-1]
self.vec_lstm = LSTM(self.lstm_size, return_sequences=False)
self.vec_lstm.build(input_shape)
self.seq_lstm = LSTM(self.lstm_size, return_sequences=True)
self.seq_lstm.build(input_shape)
self.trainable_weights = [self.W]+self.vec_lstm.trainable_weights + self.seq_lstm.trainable_weights
super(Attention2, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x, mask=None):
vec = self.vec_lstm(x)
seq = self.seq_lstm(x)
#
eij = # combine seq and vec somehow?
#
eij = K.dot(eij,self.W)
eij = K.tanh(eij)
a = K.exp(eij)
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
attention = K.sum(weighted_input, axis=1)
return attention
The naive code for combining the 2 matrices would be:
eij = np.zeros((batch_size,sequence_length,frame_size))
for i,one_seq in enumerate(seq):
for j,timestep in enumerate(one_seq):
eij[i,j] = timestep*vec[i]
I would appreciate help in implementing this with keras backend.
Thank you!
This seems to provide the result I want:
vec = vec_lstm(x)
seq = seq_lstm(x)
repeat_vec = K.repeat(vec,seq.shape[1])
eij = seq * repeat_vec