diff --git a/src/functions/loss_functions.rs b/src/functions/loss_functions.rs index a5d8a94..383ae21 100644 --- a/src/functions/loss_functions.rs +++ b/src/functions/loss_functions.rs @@ -1,4 +1,4 @@ -use ndarray::{Array1, ArrayView1}; +use ndarray::Array1; pub enum Type { MSE, @@ -6,8 +6,8 @@ pub enum Type { } type LossFuncTuple = ( - fn(ArrayView1, ArrayView1) -> f64, - fn(ArrayView1, ArrayView1) -> Array1, + fn(&Array1, &Array1) -> f64, + fn(&Array1, &Array1) -> Array1, ); pub fn parse_type(t: Type) -> LossFuncTuple { @@ -17,23 +17,23 @@ pub fn parse_type(t: Type) -> LossFuncTuple { } } -pub fn mse(y_true: ArrayView1, y_pred: ArrayView1) -> f64 { - let mut temp = &y_true - &y_pred; +pub fn mse(y_true: &Array1, y_pred: &Array1) -> f64 { + let mut temp = y_true - y_pred; temp.mapv_inplace(|x| x * x); let mut sum = 0.0; - for i in 0..temp.len() { - sum += temp.get(i).unwrap(); + for entry in temp.iter() { + sum += entry; } sum / temp.len() as f64 } -pub fn mse_prime(y_true: ArrayView1, y_pred: ArrayView1) -> Array1 { - let temp = &y_true - &y_pred; +pub fn mse_prime(y_true: &Array1, y_pred: &Array1) -> Array1 { + let temp = y_true - y_pred; temp / (y_true.len() as f64 / 2.0) } -pub fn mae(y_true: ArrayView1, y_pred: ArrayView1) -> f64 { - let temp = &y_true - &y_pred; +pub fn mae(y_true: &Array1, y_pred: &Array1) -> f64 { + let temp = y_true - y_pred; let mut sum = 0.0; for i in 0..temp.len() { sum += temp.get(i).unwrap().abs(); @@ -41,7 +41,7 @@ pub fn mae(y_true: ArrayView1, y_pred: ArrayView1) -> f64 { sum / temp.len() as f64 } -pub fn mae_prime(y_true: ArrayView1, y_pred: ArrayView1) -> Array1 { +pub fn mae_prime(y_true: &Array1, y_pred: &Array1) -> Array1 { let mut result = Array1::zeros(y_true.raw_dim()); for i in 0..result.len() { if y_true.get(i).unwrap() < y_pred.get(i).unwrap() { diff --git a/src/layers/activation_layer.rs b/src/layers/activation_layer.rs index 47a0c03..48e2502 100644 --- a/src/layers/activation_layer.rs +++ b/src/layers/activation_layer.rs @@ -1,4 +1,4 @@ -use ndarray::{arr1, Array1, ArrayView1}; +use ndarray::{arr1, Array1}; use super::Layer; use crate::functions::activation_functions::*; @@ -23,15 +23,15 @@ impl ActivationLayer { } impl Layer for ActivationLayer { - fn forward_pass(&mut self, input: ArrayView1) -> Array1 { - self.input = input.to_owned(); + fn forward_pass(&mut self, input: Array1) -> Array1 { + self.input = input; // output isn't needed elsewhere // self.output = (self.activation)(&self.input); // self.output.clone() (self.activation)(&self.input) } - fn backward_pass(&mut self, output_error: ArrayView1, _learning_rate: f64) -> Array1 { + fn backward_pass(&mut self, output_error: Array1, _learning_rate: f64) -> Array1 { (self.activation_prime)(&self.input) * output_error } } diff --git a/src/layers/fc_layer.rs b/src/layers/fc_layer.rs index af0330d..8f4da68 100644 --- a/src/layers/fc_layer.rs +++ b/src/layers/fc_layer.rs @@ -1,6 +1,6 @@ extern crate ndarray; -use ndarray::{arr1, arr2, Array, Array1, Array2, ArrayView1, ShapeBuilder}; +use ndarray::{arr1, arr2, Array, Array1, Array2, ShapeBuilder}; use ndarray_rand::rand_distr::{Normal, Uniform}; use ndarray_rand::RandomExt; @@ -71,30 +71,25 @@ impl FCLayer { } impl Layer for FCLayer { - fn forward_pass(&mut self, input: ArrayView1) -> Array1 { + fn forward_pass(&mut self, input: Array1) -> Array1 { if !self.is_initialized { self.initialize(input.len()); } - self.input = input.to_owned(); + self.input = input; // output isn't needed elsewhere // self.output = self.input.dot(&self.weights) + &self.biases; // self.output.clone() self.input.dot(&self.weights) + &self.biases } - fn backward_pass(&mut self, output_error: ArrayView1, learning_rate: f64) -> Array1 { + fn backward_pass(&mut self, output_error: Array1, learning_rate: f64) -> Array1 { let input_error = output_error.dot(&self.weights.t()); let delta_weights = self .input - .to_owned() - .into_shape((self.input.len(), 1usize)) + .to_shape((self.input.len(), 1usize)) .unwrap() - .dot( - &output_error - .into_shape((1usize, output_error.len())) - .unwrap(), - ); + .dot(&output_error.to_shape((1usize, output_error.len())).unwrap()); self.weights = &self.weights + learning_rate * &delta_weights; self.biases = &self.biases + learning_rate * &output_error; input_error diff --git a/src/layers/mod.rs b/src/layers/mod.rs index 53c3eee..cbcfb28 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -1,9 +1,9 @@ -use ndarray::{Array1, ArrayView1}; +use ndarray::Array1; pub mod activation_layer; pub mod fc_layer; pub trait Layer { - fn forward_pass(&mut self, input: ArrayView1) -> Array1; - fn backward_pass(&mut self, output_error: ArrayView1, learning_rate: f64) -> Array1; + fn forward_pass(&mut self, input: Array1) -> Array1; + fn backward_pass(&mut self, output_error: Array1, learning_rate: f64) -> Array1; } diff --git a/src/lib.rs b/src/lib.rs index bbff9d5..2b3f2af 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,12 +3,12 @@ pub mod layers; use functions::loss_functions::{self, parse_type}; use layers::*; -use ndarray::{Array1, ArrayView1}; +use ndarray::Array1; pub struct Network { layers: Vec>, - loss: fn(ArrayView1, ArrayView1) -> f64, - loss_prime: fn(ArrayView1, ArrayView1) -> Array1, + loss: fn(&Array1, &Array1) -> f64, + loss_prime: fn(&Array1, &Array1) -> Array1, } impl Network { @@ -30,10 +30,9 @@ impl Network { let mut result = vec![]; for input in inputs.iter() { - let mut output = Array1::default(inputs[0].raw_dim()); - output.assign(input); + let mut output = input.to_owned(); for layer in &mut self.layers { - output = layer.forward_pass(output.view()); + output = layer.forward_pass(output); } result.push(output); } @@ -57,22 +56,21 @@ impl Network { let mut err = 0.0; for j in 0..num_samples { // forward propagation - let mut output = Array1::default(x_train[0].raw_dim()); - output.assign(&x_train[j]); + let mut output = x_train[j].to_owned(); for layer in self.layers.iter_mut() { - output = layer.forward_pass(output.view()); + output = layer.forward_pass(output); } // compute loss - err += (self.loss)(y_train[j].view(), output.view()); + err += (self.loss)(&y_train[j], &output); // backward propagation - let mut error = (self.loss_prime)(y_train[j].view(), output.view()); + let mut error = (self.loss_prime)(&y_train[j], &output); for layer in self.layers.iter_mut().rev() { if trivial_optimize { - error = layer.backward_pass(error.view(), learning_rate / (i + 1) as f64); + error = layer.backward_pass(error, learning_rate / (i + 1) as f64); } else { - error = layer.backward_pass(error.view(), learning_rate); + error = layer.backward_pass(error, learning_rate); } } }