-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtiny_LLM_model.py
More file actions
138 lines (117 loc) · 5.12 KB
/
tiny_LLM_model.py
File metadata and controls
138 lines (117 loc) · 5.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
from submodels.embedding import TokenEmbedding, PositionalEmbedding, SinusoidalPositionalEncoding
from submodels.transformer_decoder_block import TrandformerDecoderBlock
class TinyLLM(nn.Module):
"""
TinyLLM: a compact GPT-style decoder-only Transformer.
Args:
vocab_size: size of your vocabulary (e.g., char vocab)
block_size: max sequence length (context window)
n_layer: number of decoder blocks
n_head: number of attention heads per block
d_model: embedding dimension (must be divisible by n_head)
dropout: dropout prob (0.0–0.2 typical for small data)
tie_weights: tie LM head weights to token embedding (saves params)
Shapes:
forward(idx): idx (B, T) -> logits (B, T, vocab_size)
forward(idx, targets): also returns cross-entropy loss (scalar)
"""
def __init__(self,
vocab_size: int,
block_size: int = 256,
n_layer: int = 4,
n_head: int = 4,
d_model: int = 256,
dropout: float = 0.1,
tie_weights: bool = True):
super().__init__()
assert d_model % n_head == 0, "d_model must be divisible by n_head"
self.vocab_size = vocab_size
self.block_size = block_size
# Create embedding
self.token_emb = TokenEmbedding(vocab_size=vocab_size, d_model=d_model)
self.posistion_emb = SinusoidalPositionalEncoding(d_model=d_model, max_len=block_size)
self.drop = nn.Dropout(dropout)
# Create transformer decoder blocks
self.blocks = nn.ModuleList()
for _ in range(n_layer):
block = TrandformerDecoderBlock(
d_model=d_model,
n_heads=n_head,
dropout=dropout
)
self.blocks.append(block)
# Final layers
self.layer_norm = nn.LayerNorm(d_model)
self.lm_head = nn.Linear(d_model, vocab_size)
# tie the weights (uses one weight matrixes insted of 2)
if tie_weights:
self.lm_head.weight = self.token_emb.embedding.weight # weight tying
self.config = {
"vocab_size": vocab_size,
"block_size": block_size,
"n_layer": n_layer,
"n_head": n_head,
"d_model": d_model,
"dropout": dropout,
"tie_weights": tie_weights,
}
def forward(self, tokens: torch.Tensor, targets: torch.Tensor = None):
# Check so the sequence and block size matches
B, T = tokens.shape
if T > self.block_size:
raise ValueError(f"Sequence length {T} > block_size {self.block_size}")
# Embedding
#pos = torch.arange(0, T, device=tokens.device).unsqueeze(0) # (1,T)
x = self.token_emb(tokens) + self.posistion_emb(T) # (B,T,D)
x = self.drop(x)
# Transformersdecoder blocks
for decoder_blocks in self.blocks:
x = decoder_blocks(x)
# Lm head (output layer)
x = self.layer_norm(x)
logits = self.lm_head(x) # (B,T,vocab)
# Calculate loss if targets was given (for training)
loss = None
if targets is not None:
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)),
targets.reshape(-1))
return logits, loss
@torch.no_grad()
def generate(self, tokens, max_new_tokens, temperature=1.0, top_k=None):
"""
Autoregressive sampling.
tokens: (B, T_start) token IDs
"""
self.eval()
device = next(self.parameters()).device
# Coerce to (B, T) LongTensor on the right device
if isinstance(tokens, list):
tokens = torch.tensor(tokens, dtype=torch.long, device=device).unsqueeze(0)
elif torch.is_tensor(tokens):
tokens = tokens.to(device)
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
else:
raise TypeError("tokens must be list[int] or torch.Tensor")
for _ in range(max_new_tokens):
# Crop to maximun blocksize, since the model can't see futher back
token_cond = tokens[:, -self.block_size:]
# intefere and generatete (B,T,vocab)
logits, _ = self(token_cond)
# add tempature to controll randomness
logits = logits[:, -1, :] / max(temperature, 1e-6)
# If top_k: select the top k candidate to draw from, (avoids generate rare tokens)
if top_k is not None:
v, _ = torch.topk(logits, top_k)
logits[logits < v[:, [-1]]] = -float('inf')
# Make the grenerated vales to a distribution
probs = F.softmax(logits, dim=-1)
# Draw a token from the distribution
next_id = torch.multinomial(probs, num_samples=1) # (B,1)
# Append it to the sequence and continue
tokens = torch.cat([tokens, next_id], dim=1)
return tokens