Basic Syntax
Hello World
// Basic variable declaration
let message = "Hello, Charl!"
let x = 42
let y = x * 2
// Function definition
fn greet(name: string) -> string {
return "Hello, " + name + "!"
}
let greeting = greet("World")
print(greeting)
Functions and Control Flow
// Function with explicit types
fn add(a: int, b: int) -> int {
return a + b
}
// Control flow
fn max(a: float, b: float) -> float {
if a > b {
return a
} else {
return b
}
}
// Loops
fn sum_to_n(n: int) -> int {
let total = 0
let i = 1
while i <= n {
let total = total + i
let i = i + 1
}
return total
}
let result = sum_to_n(10) // 55
Tensor Operations
Creating and Manipulating Tensors
// Create tensors with explicit shape
let a = tensor([1.0, 2.0, 3.0, 4.0], [4])
let b = tensor([5.0, 6.0, 7.0, 8.0], [4])
// Initialize tensors
let zeros = tensor_zeros([4])
let ones = tensor_ones([4])
let random = tensor_randn([4])
// Basic operations
let sum = tensor_add(a, b)
let diff = tensor_sub(a, b)
let product = tensor_mul(a, b)
let quotient = tensor_div(a, b)
// Aggregate operations (return tensors)
let total = tensor_sum(a)
let average = tensor_mean(a)
// Extract scalar values for printing
print("Sum:", tensor_item(total))
print("Mean:", tensor_item(average))
Matrix Operations
// Create matrices
let matrix_a = tensor_randn([3, 4])
let matrix_b = tensor_randn([4, 2])
// Matrix multiplication
let result = tensor_matmul(matrix_a, matrix_b)
// Transpose
let transposed = tensor_transpose(matrix_a)
// Reshape
let reshaped = tensor_reshape(matrix_a, [2, 6])
Neural Networks
Building a Simple Neural Network Layer
// Initialize network parameters
let input_size = 4
let hidden_size = 8
// Create weights and biases
let weights = tensor_randn([input_size, hidden_size])
let bias = tensor_zeros([hidden_size])
// Create input
let input = tensor([1.0, 0.5, 0.8, 0.3], [4])
// Forward pass: Linear layer
let linear_output = nn_linear(input, weights, bias)
// Apply activation functions
let relu_output = nn_relu(linear_output)
let sigmoid_output = nn_sigmoid(linear_output)
let tanh_output = nn_tanh(linear_output)
print("After ReLU:", relu_output)
Two-Layer Network
// Initialize parameters
let w1 = tensor_randn([4, 8])
let b1 = tensor_zeros([8])
let w2 = tensor_randn([8, 1])
let b2 = tensor_zeros([1])
// Create input
let input = tensor([1.0, 0.5, 0.8, 0.3], [4])
// Layer 1: Input -> Hidden (ReLU)
let hidden = nn_linear(input, w1, b1)
let activated = nn_relu(hidden)
// Layer 2: Hidden -> Output (Sigmoid)
let output = nn_linear(activated, w2, b2)
let prediction = nn_sigmoid(output)
print("Prediction:", prediction)
Training & Optimization
Computing Loss
// Mean Squared Error
let predictions = tensor([0.8, 0.6, 0.9], [3])
let targets = tensor([1.0, 0.5, 1.0], [3])
let mse = nn_mse_loss(predictions, targets)
print("MSE Loss:", tensor_item(mse))
// Cross-Entropy Loss (for classification)
let logits = tensor([2.0, 1.0, 0.1], [3])
let labels = tensor([1.0, 0.0, 0.0], [3])
let ce_loss = nn_cross_entropy_loss(logits, labels)
print("Cross-Entropy Loss:", tensor_item(ce_loss))
Using Optimizers
// Training data
let X = tensor([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], [4, 2])
let Y = tensor([0.0, 1.0, 1.0, 0.0], [4, 1])
// Initialize parameters with gradient tracking
let W1 = tensor_with_grad([0.5, -0.3, 0.2, 0.4], [2, 2])
let b1 = tensor_with_grad([0.1, -0.1], [2])
// Create optimizer - SGD with learning rate 0.1
let optimizer = sgd_create(0.1)
// Or use Adam optimizer
// let optimizer = adam_create(0.01)
// Or use RMSProp optimizer
// let optimizer = rmsprop_create(0.01)
let epoch = 0
while epoch < 50 {
// Forward pass
let h1 = nn_linear(X, W1, b1)
let pred = nn_sigmoid(h1)
// Compute loss
let loss = nn_mse_loss(pred, Y)
// Backward pass - computes gradients automatically
tensor_backward(loss)
// Update parameters with optimizer
let params = [W1, b1]
let updated = sgd_step(optimizer, params)
W1 = updated[0]
b1 = updated[1]
if epoch % 10 == 0 {
print("Epoch " + str(epoch) + ": Loss = " + str(tensor_item(loss)))
}
epoch = epoch + 1
}
Automatic Differentiation
// Create tensor with gradient tracking enabled
let x = tensor_with_grad([2.0, 3.0], [2])
let y = tensor_with_grad([4.0, 5.0], [2])
// Perform operations (computation graph is built automatically)
let z = tensor_add(x, y)
let result = tensor_mul(z, 2.0)
// Apply activation
let output = nn_sigmoid(result)
// Compute loss
let target = tensor([0.8, 0.9], [2])
let loss = nn_mse_loss(output, target)
// Backward pass - computes all gradients automatically
tensor_backward(loss)
// Access gradients
let grad_x = tensor_grad(x)
let grad_y = tensor_grad(y)
print("Gradient of x:", grad_x)
print("Gradient of y:", grad_y)
// Extract scalar value from loss tensor
let loss_value = tensor_item(loss)
print("Loss value:", loss_value)
Complete Examples
Explore detailed, step-by-step tutorials with full working code:
Basic Optimization
Learn gradient descent by minimizing a quadratic function: f(x) = (x - 5)²
View Tutorial
Neural Network Training
Build and train a complete 2-layer neural network with backpropagation
View Tutorial
XOR Problem
Solve the classic non-linearly separable XOR problem with a neural network
View Tutorial
Core Functions Available in v0.3.0
Charl includes these built-in functions for machine learning:
Tensor Creation & Operations
- • tensor() - Create tensor from data
- • tensor_zeros(), tensor_ones(), tensor_randn()
- • tensor_add(), tensor_sub(), tensor_mul(), tensor_div()
- • tensor_matmul() - Matrix multiplication
- • tensor_sum(), tensor_mean()
- • tensor_reshape(), tensor_transpose()
Automatic Differentiation
- • tensor_with_grad() - Enable gradient tracking
- • tensor_backward() - Compute gradients
- • tensor_grad() - Access gradients
- • tensor_item() - Extract scalar value
- • tensor_zero_grad() - Reset gradients
Neural Network Layers
- • nn_linear() - Linear/dense layer
- • nn_relu() - ReLU activation
- • nn_sigmoid() - Sigmoid activation
- • nn_tanh() - Tanh activation
- • nn_softmax() - Softmax activation
Loss & Optimization
- • nn_mse_loss() - Mean Squared Error
- • nn_cross_entropy_loss() - Cross-Entropy
- • sgd_create(), sgd_step() - SGD optimizer
- • adam_create(), adam_step() - Adam optimizer
- • rmsprop_create(), rmsprop_step() - RMSProp