Skip to content

Commit

Permalink
Improve names and documentation (#50)
Browse files Browse the repository at this point in the history
* Link tutorial from top level doc
* Add top level 1d gd example and link to blog.
* Rename WdIterable to Weight and similarly other structs.
* Also make WriteToFile private for now.
  • Loading branch information
daniel-vainsencher authored Jun 1, 2021
1 parent f442997 commit a3372da
Show file tree
Hide file tree
Showing 9 changed files with 229 additions and 116 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "iterative_methods"
version = "0.2.0"
version = "0.2.1"
authors = ["Daniel Vainsencher <[email protected]>", "Daniel Fox <[email protected]>"]
edition = "2018"
description = "Iterative methods and associated utilities as StreamingIterators."
Expand Down
39 changes: 39 additions & 0 deletions examples/derivative_descent.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
//! Example from top level crate documentation
use iterative_methods::derivative_descent::*;
use iterative_methods::*;
use streaming_iterator::*;

fn main() {
// Problem: minimize the convex parabola f(x) = x^2 + x
let function = |x| x * x + x;

// An iterative solution by gradient descent
let derivative = |x| 2.0 * x + 1.0;
let step_size = 0.2;
let x_0 = 2.0;

// Au naturale:
let mut x = x_0;
for i in 0..10 {
x -= step_size * derivative(x);
println!("x_{} = {:.2}; f(x_{}) = {:.4}", i, x, i, x * x + x);
}

// Using replaceable components:
let dd = DerivativeDescent::new(function, derivative, step_size, x_0);
let dd = enumerate(dd);
let mut dd = dd.take(10);
while let Some(&Numbered {
item: Some(ref curr),
count,
}) = dd.next()
{
println!(
"x_{} = {:.2}; f(x_{}) = {:.4}",
count,
curr.x,
count,
curr.value()
);
}
}
39 changes: 0 additions & 39 deletions examples/fibonacci.rs

This file was deleted.

4 changes: 2 additions & 2 deletions examples/weighting_samples.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ use streaming_iterator::*;
use utils::*;

fn wd_iterable_counter_demo() {
println!("\n\n -----WdIterable Counter Demo----- \n\n");
println!("\n\n -----Weight Counter Demo----- \n\n");

let counter_stream = Counter::new();
let mut counter_stream_copy = counter_stream.clone();
let mut wd_iter = WdIterable {
let mut wd_iter = Weight {
it: counter_stream,
f: expose_w,
wd: Some(new_datum(0., 0.)),
Expand Down
3 changes: 2 additions & 1 deletion src/algorithms.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ mod tests {

pub fn solve_approximately(p: LinearSystem) -> V {
let solution = ConjugateGradient::for_problem(&p).take(20);
last(solution.map(|s| s.x_k.clone())).expect("CGIterable should always return a solution.")
last(solution.map(|s| s.x_k.clone()))
.expect("ConjugateGradient should always return a solution.")
}

pub fn show_progress(p: LinearSystem) {
Expand Down
34 changes: 16 additions & 18 deletions src/conjugate_gradient.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,22 @@
//! Implementation of conjugate gradient
//! following [lecture notes](http://www.math.psu.edu/shen_w/524/CG_lecture.pdf)
//! by Shen. Thanks Shen!
//!
//! Pseudo code:
//! Set r_0 = A*x_0 - b and p_0 =-r_0, k=0
//! while r_k != 0:
//! alpha_k = ||r_k||^2 / ||p_k||^2_A
//! x_{k+1} = x_k + alpha_k*p_k
//! r_{k+1} = r_K + alpha_k*A*p_k
//! beta_k = ||r_{k+1}||^2 / ||r_k||^2
//! p_{k+1} = -r_k + beta_k * p_k
//! k += 1
use crate::utils::{LinearSystem, M, S, V};
use ndarray::ArrayBase;
use std::f64::{MIN_POSITIVE, NAN};
use streaming_iterator::*;
// Scalar, Vector and Matrix types:

/// Implementation of conjugate gradient
/// following
/// http://www.math.psu.edu/shen_w/524/CG_lecture.pdf.
/// Thanks Shen!
// Pseudo code:
// Set r_0 = A*x_0 - b and p_0 =-r_0, k=0
// while r_k != 0:
// alpha_k = ||r_k||^2 / ||p_k||^2_A
// x_{k+1} = x_k + alpha_k*p_k
// r_{k+1} = r_K + alpha_k*A*p_k
// beta_k = ||r_{k+1}||^2 / ||r_k||^2
// p_{k+1} = -r_k + beta_k * p_k
// k += 1

// A few notes:
//
Expand All @@ -38,7 +36,7 @@ use streaming_iterator::*;

/// Store the state of a conjugate gradient computation.
///
/// This implementation deviates from the psuedo code given in this
/// This implementation deviates from the pseudocode given in this
/// module slightly: the prelude to the loop is run in initialization,
/// and advance implements the loop. Since advance is always called
/// before get, the currently calculated quantities (suffixed by `_k`)
Expand Down
49 changes: 49 additions & 0 deletions src/derivative_descent.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
//! Library code for example from crate top-level documentation
use super::*;

#[derive(Debug, Clone)]
pub struct DerivativeDescent<V, D>
where
V: Fn(f64) -> f64,
D: Fn(f64) -> f64,
{
pub value: V,
pub derivative: D,
pub step_size: f64,
pub x: f64,
}

impl<V, D> DerivativeDescent<V, D>
where
V: Fn(f64) -> f64,
D: Fn(f64) -> f64,
{
pub fn new(value: V, derivative: D, step_size: f64, x_0: f64) -> DerivativeDescent<V, D> {
DerivativeDescent {
value,
derivative,
step_size,
x: x_0,
}
}

pub fn value(&self) -> f64 {
(&self.value)(self.x)
}
}

impl<V, D> StreamingIterator for DerivativeDescent<V, D>
where
V: Fn(f64) -> f64,
D: Fn(f64) -> f64,
{
type Item = DerivativeDescent<V, D>;
fn advance(&mut self) {
self.x -= self.step_size * (self.derivative)(self.x);
}

fn get(&self) -> Option<&Self::Item> {
Some(&self)
}
}
Loading

0 comments on commit a3372da

Please sign in to comment.