HAN
# -*- coding: utf-8 -*-
# @Time : 2019/7/4 9:37
# @Author : hejipei
# @File : keras_sentiment_HAN.py
""" """
from keras import backend as K
from keras import initializers, regularizers, constraints
from keras.engine.topology import Layer
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
# 1
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
# next add a Dense layer (for classification/regression) or whatever...
# 2
hidden = LSTM(64, return_sequences=True)(words)
sentence = Attention()(hidden)
# next add a Dense layer (for classification/regression) or whatever...
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
e = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))),
(-1, step_dim)) # e = K.dot(x, self.W)
if self.bias:
e += self.b
e = K.tanh(e)
a = K.exp(e)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
c = K.sum(a * x, axis=1)
return c
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
from keras import Input, Model
from keras.layers import Embedding, Dense, Bidirectional, CuDNNLSTM, TimeDistributed
# from attention import Attention
class HAN(object):
def __init__(self, maxlen_sentence, maxlen_word, max_features, embedding_dims,
class_num=1,
last_activation='sigmoid'):
self.maxlen_sentence = maxlen_sentence
self.maxlen_word = maxlen_word
self.max_features = max_features
self.embedding_dims = embedding_dims
self.class_num = class_num
self.last_activation = last_activation
def get_model(self):
# Word part
input_word = Input(shape=(self.maxlen_word,))
x_word = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen_word)(input_word)
x_word = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x_word) # LSTM or GRU
x_word = Attention(self.maxlen_word)(x_word)
model_word = Model(input_word, x_word)
# Sentence part
input = Input(shape=(self.maxlen_sentence, self.maxlen_word))
x_sentence = TimeDistributed(model_word)(input)
x_sentence = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x_sentence) # LSTM or GRU
x_sentence = Attention(self.maxlen_sentence)(x_sentence)
output = Dense(self.class_num, activation=self.last_activation)(x_sentence)
model = Model(inputs=input, outputs=output)
return model
# coding=utf-8
from keras.callbacks import EarlyStopping
from keras.datasets import imdb
from keras.preprocessing import sequence
# from han import HAN
max_features = 5000
maxlen_sentence = 16
maxlen_word = 25
batch_size = 32
embedding_dims = 50
epochs = 1
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x #sentence x #word)...')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen_sentence * maxlen_word)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen_sentence * maxlen_word)
x_train = x_train.reshape((len(x_train), maxlen_sentence, maxlen_word))
x_test = x_test.reshape((len(x_test), maxlen_sentence, maxlen_word))
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = HAN(maxlen_sentence, maxlen_word, max_features, embedding_dims).get_model()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Train...')
early_stopping = EarlyStopping(monitor='val_acc', patience=3, mode='max')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=[early_stopping],
validation_data=(x_test, y_test))
print('Test...')
result = model.predict(x_test)
RCNN
# -*- coding: utf-8 -*-
# @Time : 2019/7/4 9:26
# @Author : hejipei
# @File : keras_sentiment_rcnn.py
""" """
# coding=utf-8
from keras import Input, Model
from keras import backend as K
from keras.layers import Embedding, Dense, SimpleRNN, Lambda, Concatenate, Conv1D, GlobalMaxPooling1D
class RCNN(object):
def __init__(self, maxlen, max_features, embedding_dims,
class_num=1,
last_activation='sigmoid'):
self.maxlen = maxlen
self.max_features = max_features
self.embedding_dims = embedding_dims
self.class_num = class_num
self.last_activation = last_activation
def get_model(self):
input_current = Input((self.maxlen,))
input_left = Input((self.maxlen,))
input_right = Input((self.maxlen,))
embedder = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen)
embedding_current = embedder(input_current)
embedding_left = embedder(input_left)
embedding_right = embedder(input_right)
x_left = SimpleRNN(128, return_sequences=True)(embedding_left)
x_right = SimpleRNN(128, return_sequences=True, go_backwards=True)(embedding_right)
x_right = Lambda(lambda x: K.reverse(x, axes=1))(x_right)
x = Concatenate(axis=2)([x_left, embedding_current, x_right])
x = Conv1D(64, kernel_size=1, activation='tanh')(x)
x = GlobalMaxPooling1D()(x)
output = Dense(self.class_num, activation=self.last_activation)(x)
model = Model(inputs=[input_current, input_left, input_right], outputs=output)
return model
# coding=utf-8
import numpy as np
from keras.callbacks import EarlyStopping
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
epochs = 1
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)...')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Prepare input for model...')
x_train_current = x_train
x_train_left = np.hstack([np.expand_dims(x_train[:, 0], axis=1), x_train[:, 0:-1]])
x_train_right = np.hstack([x_train[:, 1:], np.expand_dims(x_train[:, -1], axis=1)])
x_test_current = x_test
x_test_left = np.hstack([np.expand_dims(x_test[:, 0], axis=1), x_test[:, 0:-1]])
x_test_right = np.hstack([x_test[:, 1:], np.expand_dims(x_test[:, -1], axis=1)])
print('x_train_current shape:', x_train_current.shape)
print('x_train_left shape:', x_train_left.shape)
print('x_train_right shape:', x_train_right.shape)
print('x_test_current shape:', x_test_current.shape)
print('x_test_left shape:', x_test_left.shape)
print('x_test_right shape:', x_test_right.shape)
print('Build model...')
model = RCNN(maxlen, max_features, embedding_dims).get_model()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Train...')
early_stopping = EarlyStopping(monitor='val_acc', patience=3, mode='max')
model.fit([x_train_current, x_train_left, x_train_right], y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=[early_stopping],
validation_data=([x_test_current, x_test_left, x_test_right], y_test))
print('Test...')
result = model.predict([x_test_current, x_test_left, x_test_right])