Skip to content

Commit

Permalink
Adding logistic regression & optimizing the gradient descent algorithm
Browse files Browse the repository at this point in the history
  • Loading branch information
prkbuilds committed Oct 24, 2024
1 parent 274ca13 commit 21850fe
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 1 deletion.
1 change: 1 addition & 0 deletions DIRECTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@
* [Cholesky](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/cholesky.rs)
* [K Means](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/k_means.rs)
* [Linear Regression](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/linear_regression.rs)
* [Logistic Regression](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/logistic_regression.rs)
* Loss Function
* [Average Margin Ranking Loss](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/loss_function/average_margin_ranking_loss.rs)
* [Hinge Loss](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/loss_function/hinge_loss.rs)
Expand Down
62 changes: 62 additions & 0 deletions src/machine_learning/logistic_regression.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
use super::optimization::gradient_descent;
use std::f64::consts::E;

/// Returns the wieghts after performing Logistic regression on the input data points.
pub fn logistic_regression(
data_points: Vec<(Vec<f64>, f64)>,
iterations: usize,
learning_rate: f64,
) -> Option<Vec<f64>> {
if data_points.is_empty() {
return None;
}

let num_features = data_points[0].0.len();
let mut params = vec![0.0; num_features];

let derivative_fn = |params: &[f64]| derivative(params, &data_points);

gradient_descent(derivative_fn, &mut params, learning_rate, iterations as i32);

Some(params)
}

fn derivative(params: &[f64], data_points: &[(Vec<f64>, f64)]) -> Vec<f64> {
let num_features = params.len();
let mut gradients = vec![0.0; num_features];

for (features, y_i) in data_points {
let z = params.iter().zip(features).map(|(p, x)| p * x).sum::<f64>();
let prediction = 1.0 / (1.0 + E.powf(-z));

for (i, x_i) in features.iter().enumerate() {
gradients[i] += (prediction - y_i) * x_i;
}
}

gradients
}

#[cfg(test)]
mod test {
use super::*;

#[test]
fn test_logistic_regression() {
let data = vec![
(vec![0.0, 0.0], 0.0),
(vec![1.0, 1.0], 1.0),
(vec![2.0, 2.0], 1.0),
];
let result = logistic_regression(data, 10000, 0.1);
assert!(result.is_some());
let params = result.unwrap();
assert!((params[0] - 6.902976808251308).abs() < 1e-6);
assert!((params[1] - 2000.4659358334482).abs() < 1e-6);
}

#[test]
fn test_empty_list_logistic_regression() {
assert_eq!(logistic_regression(vec![], 10000, 0.1), None);
}
}
2 changes: 2 additions & 0 deletions src/machine_learning/mod.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
mod cholesky;
mod k_means;
mod linear_regression;
mod logistic_regression;
mod loss_function;
mod optimization;

pub use self::cholesky::cholesky;
pub use self::k_means::k_means;
pub use self::linear_regression::linear_regression;
pub use self::logistic_regression::logistic_regression;
pub use self::loss_function::average_margin_ranking_loss;
pub use self::loss_function::hng_loss;
pub use self::loss_function::huber_loss;
Expand Down
2 changes: 1 addition & 1 deletion src/machine_learning/optimization/gradient_descent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
/// A reference to the optimized parameter vector `x`.

pub fn gradient_descent(
derivative_fn: fn(&[f64]) -> Vec<f64>,
derivative_fn: impl Fn(&[f64]) -> Vec<f64>,
x: &mut Vec<f64>,
learning_rate: f64,
num_iterations: i32,
Expand Down

0 comments on commit 21850fe

Please sign in to comment.