Initial commit

This commit is contained in:
lluni 2023-01-15 23:18:58 +01:00
commit 961626616f
Signed by: lluni
GPG key ID: ACEEB468BC325D35
12 changed files with 569 additions and 0 deletions

View file

@ -0,0 +1,100 @@
use ndarray::Array1;
use ndarray_rand::rand_distr::num_traits::Pow;
pub enum Type {
Identity,
Logistic,
Tanh,
Relu,
LeakyRelu
}
pub fn parse_type(t: Type) -> (fn(&Array1<f64>) -> Array1<f64>, fn(&Array1<f64>) -> Array1<f64>) {
match t {
Type::Identity => (identity, identity_prime),
Type::Logistic => (logistic, logistic_prime),
Type::Tanh => (tanh, tanh_prime),
Type::Relu => (relu, relu_prime),
Type::LeakyRelu => (leaky_relu, leaky_relu_prime)
}
}
pub fn identity(matrix: &Array1<f64>) -> Array1<f64> {
matrix.to_owned()
}
pub fn identity_prime(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = 1.0;
}
result
}
fn sigmoid(x: f64) -> f64 {
1.0 / (1.0 + (-x).exp())
}
pub fn logistic(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = sigmoid(*x);
}
result
}
pub fn logistic_prime(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = sigmoid(*x * (1.0 - sigmoid(*x)));
}
result
}
pub fn tanh(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = (*x).tanh();
}
result
}
pub fn tanh_prime(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = 1.0 as f64 - (*x).tanh().pow(2);
}
result
}
pub fn relu(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = (*x).max(0.0);
}
result
}
pub fn relu_prime(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = if (*x) <= 0.0 {0.0} else {1.0};
}
result
}
pub fn leaky_relu(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = (*x).max(0.001 * (*x));
}
result
}
pub fn leaky_relu_prime(matrix: &Array1<f64>) -> Array1<f64> {
let mut result = matrix.clone();
for x in result.iter_mut() {
*x = if (*x) <= 0.0 {0.001} else {1.0};
}
result
}

View file

@ -0,0 +1,51 @@
use std::ops::MulAssign;
use ndarray::{Array1, ArrayView1};
pub enum Type {
MSE,
MAE
}
pub fn parse_type(t: Type) -> (fn(ArrayView1<f64>, ArrayView1<f64>) -> f64, fn(ArrayView1<f64>, ArrayView1<f64>) -> Array1<f64>) {
match t {
Type::MSE => (mse, mse_prime),
Type::MAE => (mae, mae_prime)
}
}
pub fn mse(y_true: ArrayView1<f64>, y_pred: ArrayView1<f64>) -> f64 {
let mut temp = &y_true - &y_pred;
temp.mul_assign(&temp.clone());
let mut sum = 0.0;
for i in 0..temp.len() {
sum += temp.get(i).unwrap();
}
sum / temp.len() as f64
}
pub fn mse_prime(y_true: ArrayView1<f64>, y_pred: ArrayView1<f64>) -> Array1<f64> {
let temp = &y_true - &y_pred;
temp / (y_true.len() as f64 / 2.0)
}
pub fn mae(y_true: ArrayView1<f64>, y_pred: ArrayView1<f64>) -> f64 {
let temp = &y_true - &y_pred;
let mut sum = 0.0;
for i in 0..temp.len() {
sum += temp.get(i).unwrap().abs();
}
sum / temp.len() as f64
}
pub fn mae_prime(y_true: ArrayView1<f64>, y_pred: ArrayView1<f64>) -> Array1<f64> {
let mut result = Array1::zeros(y_true.raw_dim());
for i in 0..result.len() {
if y_true.get(i).unwrap() < y_pred.get(i).unwrap() {
*result.get_mut(i).unwrap() = 1.0;
} else {
*result.get_mut(i).unwrap() = -1.0;
}
}
result
}

2
src/functions/mod.rs Normal file
View file

@ -0,0 +1,2 @@
pub mod activation_functions;
pub mod loss_functions;

View file

@ -0,0 +1,40 @@
use ndarray::{Array1, arr1, ArrayView1};
use crate::functions::activation_functions::*;
use super::Layer;
pub struct ActivationLayer {
input: Array1<f64>,
output: Array1<f64>,
activation: fn(&Array1<f64>) -> Array1<f64>,
activation_prime: fn(&Array1<f64>) -> Array1<f64>
}
impl ActivationLayer {
pub fn new(activation_fn: Type) -> Self {
let (activation, activation_prime) = parse_type(activation_fn);
ActivationLayer {
input: arr1(&[]),
output: arr1(&[]),
activation,
activation_prime
}
}
}
impl Layer for ActivationLayer {
fn forward_pass(&mut self, input: ArrayView1<f64>) -> Array1<f64> {
self.input = input.to_owned();
self.output = (self.activation)(&self.input);
self.output.clone()
}
fn backward_pass(&mut self, output_error: ArrayView1<f64>, _learning_rate: f64) -> Array1<f64> {
// (self.activation_prime)(&self.input).into_shape((1 as usize, output_error.len() as usize)).unwrap().dot(&output_error)
// (self.activation_prime)(&self.input) * &output_error
let mut temp = (self.activation_prime)(&self.input);
temp.zip_mut_with(&output_error, |x, y| *x *= y);
temp
}
}

104
src/layers/fc_layer.rs Normal file
View file

@ -0,0 +1,104 @@
extern crate ndarray;
use ndarray::{Array1, Array2, arr1, arr2, Array, ArrayView1, ShapeBuilder};
use ndarray_rand::RandomExt;
use ndarray_rand::rand_distr::{Normal, Uniform};
use super::Layer;
pub enum Initializer {
Zeros,
Ones,
Gaussian(f64, f64),
GaussianWFactor(f64, f64, f64),
Uniform(f64, f64)
}
impl Initializer {
pub fn init<Sh, D>(&self, shape: Sh) -> Array<f64, D>
where
Sh: ShapeBuilder<Dim = D>, D: ndarray::Dimension
{
match self {
Self::Zeros => Array::zeros(shape),
Self::Ones => Array::ones(shape),
Self::Gaussian(mean, stddev) => Array::random(shape, Normal::new(*mean, *stddev).unwrap()),
Self::GaussianWFactor(mean, stddev, factor)
=> Array::random(shape, Normal::new(*mean, *stddev).unwrap()) * *factor,
Self::Uniform(low, high) => Array::random(shape, Uniform::new(low, high))
}
}
}
pub struct FCLayer {
num_neurons: usize,
is_initialized: bool,
weight_initializer: Initializer,
bias_initializer: Initializer,
input: Array1<f64>,
output: Array1<f64>,
weights: Array2<f64>,
biases: Array1<f64>,
}
impl FCLayer {
pub fn new(num_neurons: usize, weight_initializer: Initializer, bias_initializer: Initializer) -> Self {
FCLayer {
num_neurons,
is_initialized: false,
weight_initializer,
bias_initializer,
input: arr1(&[]),
output: arr1(&[]),
weights: arr2(&[[]]),
biases: arr1(&[])
}
}
fn initialize(&mut self, input_size: usize) {
self.weights = self.weight_initializer.init((input_size, self.num_neurons));
self.biases = self.bias_initializer.init(self.num_neurons);
self.is_initialized = true;
}
}
impl Layer for FCLayer {
fn forward_pass(&mut self, input: ArrayView1<f64>) -> Array1<f64> {
if !self.is_initialized {
self.initialize(input.len());
}
self.input = input.to_owned();
self.output = self.input.dot(&self.weights) + &self.biases;
self.output.clone()
}
fn backward_pass(&mut self, output_error: ArrayView1<f64>, learning_rate: f64) -> Array1<f64> {
//let input_error = output_error.dot(&self.weights.clone().reversed_axes());
/* let input_error = stack(Axis(0), &vec![output_error; self.num_neurons]).unwrap().dot(&self.weights.clone().reversed_axes());
// let weights_error = self.input.clone().into_shape((1 as usize, self.num_neurons as usize)).unwrap().dot(&output_error);
// let weights_error = self.input.clone().reversed_axes().dot(&output_error);
// let mut weights_error = self.input.clone();
// weights_error.zip_mut_with(&output_error, |x, y| *x *= y);
let weights_error = self.input.clone().t().dot(&output_error.broadcast((self.input.len(),)).unwrap());
self.weights = &self.weights + learning_rate * weights_error;
self.biases = &self.biases + learning_rate * &output_error;
let len = input_error.len();
let a = input_error.into_shape((len, )).unwrap();
a */
/* let delta_weights = &self.output.t() * &output_error;
let delta_biases = output_error.sum_axis(Axis(0));
self.weights = &self.weights + learning_rate * delta_weights;
self.biases = &self.biases + learning_rate * delta_biases;
output_error.dot(&self.weights.t()) */
let input_error = output_error.dot(&self.weights.t());
let delta_weights =
self.input.to_owned().into_shape((self.input.len(), 1usize)).unwrap()
.dot(&output_error.into_shape((1usize, output_error.len())).unwrap());
self.weights = &self.weights + learning_rate * &delta_weights;
self.biases = &self.biases + learning_rate * &output_error;
input_error
}
}

9
src/layers/mod.rs Normal file
View file

@ -0,0 +1,9 @@
use ndarray::{Array1, ArrayView1};
pub mod activation_layer;
pub mod fc_layer;
pub trait Layer {
fn forward_pass(&mut self, input: ArrayView1<f64>) -> Array1<f64>;
fn backward_pass(&mut self, output_error: ArrayView1<f64>, learning_rate: f64) -> Array1<f64>;
}

77
src/lib.rs Normal file
View file

@ -0,0 +1,77 @@
pub mod functions;
pub mod layers;
use functions::loss_functions::{self, parse_type};
use layers::*;
use ndarray::{Array1, ArrayView1};
pub struct Network {
layers: Vec<Box<dyn Layer>>,
loss: fn(ArrayView1<f64>, ArrayView1<f64>) -> f64,
loss_prime: fn(ArrayView1<f64>, ArrayView1<f64>) -> Array1<f64>
}
impl Network {
pub fn new(loss_fn: loss_functions::Type) -> Self {
let (loss, loss_prime) = parse_type(loss_fn);
Network {
layers: vec![],
loss,
loss_prime
}
}
pub fn add_layer(&mut self, layer: Box<dyn Layer>) {
self.layers.push(layer);
}
pub fn predict(&mut self, inputs: Vec<Array1<f64>>) -> Vec<Array1<f64>> {
assert!(inputs.len() > 0);
let mut result = vec![];
for input in inputs.iter() {
let mut output = Array1::default(inputs[0].raw_dim());
output.assign(&input);
for layer in &mut self.layers {
output = layer.forward_pass(output.view());
}
result.push(output.to_owned());
}
result
}
pub fn fit(&mut self, x_train: Vec<Array1<f64>>, y_train: Vec<Array1<f64>>, epochs: usize, learning_rate: f64, trivial_optimize: bool) {
assert!(x_train.len() > 0);
assert!(x_train.len() == y_train.len());
let num_samples = x_train.len();
for i in 0..epochs {
let mut err = 0.0;
for j in 0..num_samples {
// forward propagation
let mut output = Array1::default(x_train[0].raw_dim());
output.assign(&x_train[j]);
for layer in self.layers.iter_mut() {
output = layer.forward_pass(output.view());
}
// compute loss
err += (self.loss)(y_train[j].view(), output.view());
// backward propagation
let mut error = (self.loss_prime)(y_train[j].view(), output.view());
for layer in self.layers.iter_mut().rev() {
if trivial_optimize {
error = layer.backward_pass(error.view(), learning_rate / (i+1) as f64);
} else {
error = layer.backward_pass(error.view(), learning_rate);
}
}
}
// calculate average error on all samples
err /= num_samples as f64;
println!("epoch {}/{} error={}", i+1, epochs, err);
}
}
}