-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathencoder_layers.py
More file actions
65 lines (49 loc) · 2.12 KB
/
encoder_layers.py
File metadata and controls
65 lines (49 loc) · 2.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import LSTM
import tensorflow_addons as tfa
class TransformerBlock(layers.Layer):
def __init__(self, d_model, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=d_model
)
self.ffn = keras.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(d_model),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class MLP(layers.Layer):
def __init__(self, max_len, d_model, dropout_rate, *args, **kwargs):
super(MLP, self).__init__(*args, **kwargs)
self.mlp = keras.Sequential(
[
layers.Dense(units=max_len),
tfa.layers.GELU(),
layers.Dense(units=d_model),
layers.Dropout(rate=dropout_rate),
]
)
def call(self, inputs):
return self.mlp(inputs)
def LSTMEncoder(d_model, activation="tanh", dropout=0.1,**kwargs):
return LSTM(units=d_model, activation=activation, dropout=dropout, return_sequences=True)
def TransformerEncoder(d_model, num_heads, ff_dim, rate, **kwargs):
transformer = TransformerBlock(d_model, num_heads, ff_dim, rate)
return transformer
def MLPEncoder(max_len, d_model, dropout_rate, *args, **kwargs):
return MLP(max_len, d_model, dropout_rate, *args, **kwargs)
ENCODER_REGISTRY = {"lstm": LSTMEncoder, "transformer": TransformerEncoder}