-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtinygrad.py
More file actions
executable file
·71 lines (61 loc) · 1.75 KB
/
tinygrad.py
File metadata and controls
executable file
·71 lines (61 loc) · 1.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import time
import tinygrad
from tinygrad.tensor import Tensor
from tinygrad.nn import Linear
from tinygrad.nn.optim import SGD
from tinygrad.engine.jit import TinyJit
# Define Sequential AND Sigmoid using tinygrad:
class Sequential:
def __init__(self, *layers):
self.layers = layers
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
def parameters(self):
# check that each sub-layer has parameters() defined and returns a list
params = []
for layer in self.layers:
# If layer has a parameters() method, call it, otherwise assume no parameters.
if hasattr(layer, 'parameters'):
params += list(layer.parameters())
if hasattr(layer, "weight"):
params += [layer.weight]
if hasattr(layer, "bias"):
params += [layer.bias]
return params
class Sigmoid:
def __call__(self, x):
return x.sigmoid()
def parameters(self):
return []
# Build model
nn = Sequential(
Linear(512, 256),
Sigmoid(),
Linear(256, 128),
Sigmoid(),
)
print(nn.parameters())
inp = Tensor.randn(2, 512)
opt = SGD(nn.parameters(), lr=0.01)
def f():
opt.zero_grad()
out = nn(inp)
loss = out.sum() # or other loss as needed
loss.backward()
opt.step()
return
# Optionally JIT the function for maximum performance (compiles forward+backward+step)
@TinyJit
def f_jit():
return f()
def benchmark(func, name=""):
start = time.time()
func()
print(f"{name}: {time.time()-start:.4f}s")
def loop_many_jit(n):
for _ in range(n):
f_jit()
Tensor.training=True
benchmark(lambda: loop_many_jit(100_000), name="JIT Tinygrad Forward/Backward/Step x10k")