-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransformer_encoder.py
More file actions
60 lines (48 loc) · 1.73 KB
/
transformer_encoder.py
File metadata and controls
60 lines (48 loc) · 1.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# transformer_encoder.py
import numpy as np
from encoder_block import EncoderBlock
from positional_encoding import positional_encoding
class TransformerEncoder:
def __init__(self, num_layers, d_model, num_heads, d_ff, max_seq_len):
"""
Args:
num_layers: number of encoder blocks to stack
d_model: input/output dimension
num_heads: number of attention heads
d_ff: feedforward hidden dimension
max_seq_len: maximum length of input sequences
"""
self.num_layers = num_layers
self.d_model = d_model
self.pos_encoding = positional_encoding(max_seq_len, d_model) # (max_seq_len, d_model)
self.encoder_blocks = [EncoderBlock(d_model, num_heads, d_ff) for _ in range(num_layers)]
def __call__(self, x, mask=None):
"""
Forward pass through Transformer Encoder.
Args:
x: (batch_size, seq_len, d_model)
mask: optional mask
Returns:
Encoded output
"""
batch_size, seq_len, _ = x.shape
# Add positional encoding
x += self.pos_encoding[:seq_len]
# Pass through stacked encoder blocks
for block in self.encoder_blocks:
x = block(x, mask)
return x
#testing trnasformer encoder
if __name__ == "__main__":
np.random.seed(0)
batch_size = 2
seq_len = 5
d_model = 8
num_heads = 2
d_ff = 32
num_layers = 2
max_seq_len = 10
x = np.random.randn(batch_size, seq_len, d_model)
transformer_encoder = TransformerEncoder(num_layers, d_model, num_heads, d_ff, max_seq_len)
output = transformer_encoder(x)
print("Transformer Encoder Output shape:", output.shape) # Should be (2, 5, 8)