Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/autograd/basic_gradient.kg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
:" Basic Gradient Computation with KlongPy"
:" Run with: kgpy basic_gradient.kg"
:" Or with torch: USE_TORCH=1 kgpy basic_gradient.kg"
:" Or with torch: kgpy --backend torch basic_gradient.kg"
:" The :> operator uses PyTorch autograd when available, else falls back to numeric"

.p("KlongPy Autograd Examples")
Expand Down
29 changes: 26 additions & 3 deletions examples/autograd/basic_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,36 @@
This example demonstrates computing gradients of simple functions
using KlongPy's autograd capabilities with the PyTorch backend.

Run with: USE_TORCH=1 python basic_gradient.py
Run with: python basic_gradient.py --backend torch
"""
import argparse

from klongpy import KlongInterpreter
from klongpy.backends import list_backends
import numpy as np

# Create interpreter (uses torch backend when USE_TORCH=1)
klong = KlongInterpreter()
def parse_args():
parser = argparse.ArgumentParser(
description="KlongPy autograd example using configurable backends."
)
parser.add_argument(
"--backend",
choices=list_backends(),
default=None,
help="Array backend to use (default: numpy).",
)
parser.add_argument(
"--device",
default=None,
help="Torch device override (cpu, cuda, mps). Only applies to torch backend.",
)
return parser.parse_args()


args = parse_args()

# Create interpreter (use --backend torch to enable PyTorch)
klong = KlongInterpreter(backend=args.backend, device=args.device)

print("KlongPy Autograd Examples")
print("=" * 50)
Expand Down
2 changes: 1 addition & 1 deletion examples/autograd/black_scholes_greeks.kg
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
:" Instead of deriving Greek formulas by hand, we compute them"
:" automatically by differentiating the price function!"
:" This works for ANY pricing model, not just Black-Scholes."
:" Run with: USE_TORCH=1 kgpy black_scholes_greeks.kg"
:" Run with: kgpy --backend torch black_scholes_greeks.kg"
:" ============================================================"

.p("Black-Scholes Greeks via Autograd")
Expand Down
2 changes: 1 addition & 1 deletion examples/autograd/black_scholes_live.kg
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
:" ============================================================"
:" Fetches real option data and verifies our model price"
:" matches the market price."
:" Run with: USE_TORCH=1 kgpy black_scholes_live.kg"
:" Run with: kgpy --backend torch black_scholes_live.kg"
:" ============================================================"

.p("Black-Scholes Greeks - LIVE Market Verification")
Expand Down
46 changes: 44 additions & 2 deletions examples/autograd/compile_demo.kg
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
:" On macOS, install Xcode Command Line Tools: xcode-select --install"
:" On Linux, install build-essential: apt-get install build-essential"
:" "
:" Run with: USE_TORCH=1 kgpy compile_demo.kg"
:" Run with: kgpy --backend torch compile_demo.kg"
:" ============================================================"

.p("Function Compilation Demo")
Expand Down Expand Up @@ -86,6 +86,48 @@ poly::{(x^4)-(3*x^3)+(2*x^2)-x+1}
.p(" cpoly::.compile(poly;2.0)")
.p("")

:" ============================================================"
:" Live compilation demo"
:" ============================================================"

.p("Live Demo - Compile and Run:")
.p("----------------------------")
.p("")

:" Show available modes and backends"
info::.cmodes()
.d(" modes: ");.p(info?"modes")
.d(" backends: ");.p(info?"backends")
.p("")

:" Choose backend/mode for the demo"
:" Use compilebackend::inductor for real compilation (requires C++ toolchain)"
compilebackend::"eager"
compilemode::"default"

.d(" demo backend: ");.p(compilebackend)
.d(" demo mode: ");.p(compilemode)
.p("")

:" Compile with selectable options"
compileopts:::{};compileopts,"mode",,compilemode;compileopts,"backend",,compilebackend
cf::.compilex(f1;3.0;compileopts)
cpoly::.compilex(poly;2.0;compileopts)

.d(" f1(5) = ");.p(f1(5.0))
.d(" cf(5) = ");.p(cf(5.0))
.d(" poly(3) = ");.p(poly(3.0))
.d(" cpoly(3) = ");.p(cpoly(3.0))
.p("")

:" Extended compilation options (mode is ignored by eager backend)"
fastcf::.compilex(f1;3.0;:{["backend" "eager"]})
eagercf::.compilex(f1;3.0;:{["backend" "eager"]})

.d(" fastcf(7) = ");.p(fastcf(7.0))
.d(" eagercf(7)= ");.p(eagercf(7.0))
.p("")

:" ============================================================"
:" When to use compilation"
:" ============================================================"
Expand All @@ -107,7 +149,7 @@ poly::{(x^4)-(3*x^3)+(2*x^2)-x+1}
.p(" - Works best with tensor operations")
.p("")
.p("Requirements:")
.p(" - PyTorch backend (USE_TORCH=1)")
.p(" - PyTorch backend (--backend torch)")
.p(" - C++ compiler for inductor backend (clang++ or g++)")
.p(" - Use eager backend to bypass C++ requirement")
.p(" - On macOS: xcode-select --install")
Expand Down
14 changes: 7 additions & 7 deletions examples/autograd/differentiable_physics.kg
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
:" compute the miss distance, and use autograd to find"
:" the gradient with respect to launch angle."
:" "
:" Run with: USE_TORCH=1 kgpy differentiable_physics.kg"
:" Run with: kgpy --backend torch differentiable_physics.kg"
:" ============================================================"

.p("Differentiable Physics: Projectile Targeting")
Expand All @@ -32,7 +32,7 @@ v0sq::625.0 :" v0^2 precomputed"
tgtX::50.0

:" Launch angle (this is what we optimize)"
angle::0.5 :" Initial guess: ~28 degrees"
angle::1.2 :" Initial guess: ~69 degrees (way off!)"

.d("Target x: ");.p(tgtX)
.d("Initial velocity: ");.p(v0)
Expand Down Expand Up @@ -60,8 +60,8 @@ loss::{(range(x)-tgtX)^2}
.p("(Gradients computed through physics equations!)")
.p("")

lr::0.0001
epochs::200
lr::0.000003
epochs::300

:" Training step - update angle using gradient"
:" Constrain angle to valid range (0.1 to 1.4 radians)"
Expand All @@ -70,9 +70,9 @@ step::{grad::loss:>angle;angle::(0.1|(angle-(lr*grad)))&1.4;0}
:" Progress printing"
printProg::{.d("Epoch ");.d(x);.d(": angle=");.d(angle*57.3);.d(" deg, range=");.d(range(angle));.d(", miss=");.p(sqrt(loss(angle)))}

:" Training loop"
trainStep::{step();:[0=(x!20);printProg(x);0]}
trainStep'!epochs
:" Training loop (run in batches of 30, print progress between batches)"
batch::{step'!30;printProg(x*30)}
batch'!10

.p("")
.p("Optimization complete!")
Expand Down
2 changes: 1 addition & 1 deletion examples/autograd/gradcheck_demo.kg
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
:" ============================================================"
:" This example demonstrates how to verify that your autograd"
:" gradients are mathematically correct using torch.autograd.gradcheck."
:" Run with: USE_TORCH=1 kgpy gradcheck_demo.kg"
:" Run with: kgpy --backend torch gradcheck_demo.kg"
:" ============================================================"

.p("Gradient Verification Demo")
Expand Down
2 changes: 1 addition & 1 deletion examples/autograd/gradient_descent.kg
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
:" 3. Linear regression fitting"
:" 4. Quadratic curve fitting"
:" Run with: kgpy gradient_descent.kg"
:" Or with torch: USE_TORCH=1 kgpy gradient_descent.kg"
:" Or with torch: kgpy --backend torch gradient_descent.kg"
:" ============================================================"

.p("Gradient Descent with KlongPy Autograd")
Expand Down
9 changes: 5 additions & 4 deletions examples/autograd/linear_regression.kg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
:" Linear Regression with Gradient Descent using KlongPy"
:" Run with: kgpy linear_regression.kg"
:" Or with torch: USE_TORCH=1 kgpy linear_regression.kg"
:" Or with torch: kgpy --backend torch linear_regression.kg"
:" Uses the :> operator which falls back to numeric gradient without torch"

.p("Linear Regression with KlongPy Autograd")
Expand Down Expand Up @@ -53,10 +53,11 @@ epochs::30
printEpoch::{.d("Epoch ");.d(x);.d(": loss=");.d(lossW(w));.d(", w=");.d(w);.d(", b=");.p(b)}

:" Training step function"
step::{gw::lossW:>w;gb::lossB:>b;w::w-(lr*gw);b::b-(lr*gb);:[0=(x!5);printEpoch(x);0]}
step::{gw::lossW:>w;gb::lossB:>b;w::w-(lr*gw);b::b-(lr*gb);0}

:" Run training"
step'!epochs
:" Run training (batches of 5, print progress between batches)"
batch::{step'!5;printEpoch(x*5)}
batch'!6

.p("")
.p("Final parameters:")
Expand Down
80 changes: 50 additions & 30 deletions examples/autograd/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,36 @@
This example demonstrates training a simple linear regression model
using KlongPy's autograd capabilities with the PyTorch backend.

Run with: USE_TORCH=1 python linear_regression.py
Run with: python linear_regression.py --backend torch
"""
import argparse

from klongpy import KlongInterpreter
from klongpy.backends import list_backends
import numpy as np

def parse_args():
parser = argparse.ArgumentParser(
description="Linear regression with KlongPy autograd and selectable backends."
)
parser.add_argument(
"--backend",
choices=list_backends(),
default=None,
help="Array backend to use (default: numpy).",
)
parser.add_argument(
"--device",
default=None,
help="Torch device override (cpu, cuda, mps). Only applies to torch backend.",
)
return parser.parse_args()


args = parse_args()

# Create interpreter
klong = KlongInterpreter()
klong = KlongInterpreter(backend=args.backend, device=args.device)

print("Linear Regression with KlongPy Autograd")
print("=" * 50)
Expand All @@ -25,11 +48,12 @@

# Put data into klong context
klong['X'] = X
klong['y_true'] = y_true
klong['ytrue'] = y_true
klong['nsamples'] = float(n_samples)

# Initialize parameters
klong['w'] = np.array([0.0], dtype=np.float32) # weight
klong['b'] = np.array([0.0], dtype=np.float32) # bias
klong['w'] = 0.0 # weight
klong['b'] = 0.0 # bias

print("True parameters: w=2.0, b=3.0")
print("Initial parameters: w=0.0, b=0.0")
Expand All @@ -39,55 +63,51 @@
# Mean squared error loss
klong('''
predict::{(w*x)+b}
mse::{+/((predict(X))-y_true)^2}
grad_mse::∇mse
mse::{(+/((predict(X))-ytrue)^2)%nsamples}
''')

# Training parameters
learning_rate = 0.01
n_epochs = 100
n_epochs = 500

print(f"Training for {n_epochs} epochs with learning_rate={learning_rate}")
print("-" * 50)

for epoch in range(n_epochs):
# Compute loss
loss = float(klong('mse([w b])'))

# Compute gradients with respect to [w, b]
params = np.array([float(klong('*w')), float(klong('*b'))], dtype=np.float32)
klong['params'] = params
loss = float(klong('mse(0)'))

# We need to compute gradient of loss w.r.t. parameters
# For simplicity, compute numerical gradients
# Compute numerical gradients
cur_w = float(klong('w'))
cur_b = float(klong('b'))
eps = 1e-4

# Gradient w.r.t. w
klong['w'] = np.array([params[0] + eps], dtype=np.float32)
loss_plus = float(klong('mse([w b])'))
klong['w'] = np.array([params[0] - eps], dtype=np.float32)
loss_minus = float(klong('mse([w b])'))
klong['w'] = cur_w + eps
loss_plus = float(klong('mse(0)'))
klong['w'] = cur_w - eps
loss_minus = float(klong('mse(0)'))
grad_w = (loss_plus - loss_minus) / (2 * eps)

# Gradient w.r.t. b
klong['w'] = np.array([params[0]], dtype=np.float32)
klong['b'] = np.array([params[1] + eps], dtype=np.float32)
loss_plus = float(klong('mse([w b])'))
klong['b'] = np.array([params[1] - eps], dtype=np.float32)
loss_minus = float(klong('mse([w b])'))
klong['w'] = cur_w
klong['b'] = cur_b + eps
loss_plus = float(klong('mse(0)'))
klong['b'] = cur_b - eps
loss_minus = float(klong('mse(0)'))
grad_b = (loss_plus - loss_minus) / (2 * eps)

# Update parameters
new_w = params[0] - learning_rate * grad_w
new_b = params[1] - learning_rate * grad_b
new_w = cur_w - learning_rate * grad_w
new_b = cur_b - learning_rate * grad_b

klong['w'] = np.array([new_w], dtype=np.float32)
klong['b'] = np.array([new_b], dtype=np.float32)
klong['w'] = new_w
klong['b'] = new_b

if epoch % 10 == 0 or epoch == n_epochs - 1:
if epoch % 50 == 0 or epoch == n_epochs - 1:
print(f"Epoch {epoch:3d}: loss={loss:10.4f}, w={new_w:.4f}, b={new_b:.4f}")

print()
print("Final parameters:")
print(f" Learned: w={float(klong('*w')):.4f}, b={float(klong('*b')):.4f}")
print(f" Learned: w={float(klong('w')):.4f}, b={float(klong('b')):.4f}")
print(f" True: w=2.0000, b=3.0000")
2 changes: 1 addition & 1 deletion examples/autograd/neural_net.kg
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
:" - Single neuron learning AND gate"
:" - Function approximation with hidden layer"
:" Run with: kgpy neural_net.kg"
:" Or with torch: USE_TORCH=1 kgpy neural_net.kg"
:" Or with torch: kgpy --backend torch neural_net.kg"
:" ============================================================"

:" Import math functions from current backend (works with numpy and torch)"
Expand Down
6 changes: 3 additions & 3 deletions examples/autograd/numeric_vs_autograd.kg
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
:" ============================================================"
:" This example compares the two gradient operators:"
:" - ∇ (nabla): Numeric differentiation using finite differences"
:" - :> : Autograd (uses PyTorch when USE_TORCH=1, else numeric)"
:" - :> : Autograd (uses PyTorch when backend is torch (--backend torch), else numeric)"
:" Both operators work with any backend."
:" Run with: kgpy numeric_vs_autograd.kg"
:" Or with torch: USE_TORCH=1 kgpy numeric_vs_autograd.kg"
:" Or with torch: kgpy --backend torch numeric_vs_autograd.kg"
:" ============================================================"

.p("Comparing Gradient Operators")
Expand Down Expand Up @@ -113,6 +113,6 @@ autoGradP::poly10:>2
.p("")
.p(" :> (autograd):")
.p(" - Syntax: function:>point")
.p(" - Method: PyTorch autograd when USE_TORCH=1")
.p(" - Method: PyTorch autograd when backend is torch (--backend torch)")
.p(" - Falls back to numeric without torch")
.p(" - Exact gradients with torch, faster for complex functions")
2 changes: 1 addition & 1 deletion examples/autograd/optimizer_demo.kg
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
:" Optimizer Demo - Using Custom Optimizers in KlongPy"
:""
:" This example shows how to use the optimizer classes from optimizers.py"
:" Run with: USE_TORCH=1 kgpy optimizer_demo.kg"
:" Run with: kgpy --backend torch optimizer_demo.kg"

:" Import the optimizer class"
.pyf("./optimizers.py";"SGDOptimizer")
Expand Down
Loading
Loading