mlx_rs/optimizers/
adadelta.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
use std::rc::Rc;

use crate::{
    array,
    ops::sqrt,
    utils::{get_mut_or_insert_with, Updatable},
    Array,
};
use mlx_internal_macros::{generate_builder, Buildable};

use crate::error::AdaDeltaBuildError;

use super::*;

generate_builder! {
    /// The AdaDelta optimizer with a learning rate
    ///
    /// Please refer to the original paper for more details:
    ///
    /// [1]: Zeiler, M.D., 2012. ADADELTA: an adaptive learning rate method. arXiv preprint arXiv:1212.5701.
    #[derive(Debug, Clone, Buildable)]
    #[buildable(root = crate)]
    #[builder(
        build_with = build_adadelta,
        err = AdaDeltaBuildError,
        root = crate
    )]
    pub struct AdaDelta {
        /// The learning rate
        #[builder(ty_override = f32)]
        pub lr: Array,

        /// The coefficient used for computing a running average of squared gradients. Default to
        /// [`AdaDelta::DEFAULT_RHO`].
        #[builder(optional, ty_override = f32, default = AdaDelta::DEFAULT_RHO)]
        pub rho: Array,

        /// The epsilon added to the denominator to improve numerical stability. Default to
        /// [`AdaDelta::DEFAULT_EPS`].
        #[builder(optional, ty_override = f32, default = AdaDelta::DEFAULT_EPS)]
        pub eps: Array,

        /// Inner state
        #[builder(ignore)]
        pub state: State<(Array, Array)>,
    }
}

/// Builds a new [`AdaDelta`] optimizer
fn build_adadelta(builder: AdaDeltaBuilder) -> Result<AdaDelta, AdaDeltaBuildError> {
    let rho = builder.rho;
    let eps = builder.eps;

    if rho < 0.0 {
        return Err(AdaDeltaBuildError::NegativeRho);
    }

    if eps < 0.0 {
        return Err(AdaDeltaBuildError::NegativeEps);
    }

    Ok(AdaDelta {
        lr: array!(builder.lr),
        rho: array!(rho),
        eps: array!(eps),
        state: State::new(),
    })
}

impl AdaDelta {
    /// Default value for `rho`
    pub const DEFAULT_RHO: f32 = 0.99;

    /// Default value for `eps`
    pub const DEFAULT_EPS: f32 = 1e-6;
}

impl Optimizer for AdaDelta {
    type State = State<(Array, Array)>;

    fn state(&self) -> &Self::State {
        &self.state
    }

    fn state_mut(&mut self) -> &mut Self::State {
        &mut self.state
    }

    fn update_single(
        &mut self,
        key: &Rc<str>,
        gradient: &Array,
        parameter: &mut Array,
    ) -> crate::error::Result<()> {
        let (v, u) = get_mut_or_insert_with(&mut self.state, key, || (array!(0.0), array!(0.0)));

        let one_minus_rho = array!(1.0).subtract(&self.rho)?;
        let first_term = self.rho.multiply(&v)?;
        let second_term = one_minus_rho.multiply(gradient.square()?)?;
        let v_new = first_term.add(&second_term)?;

        let num = sqrt(&u.add(&self.eps)?)?;
        let den = sqrt(&v_new.add(&self.eps)?)?;
        let d = num.divide(&den)?.multiply(gradient)?;
        let first_term = self.rho.multiply(&u)?;
        let second_term = one_minus_rho.multiply(d.square()?)?;
        let u_new = first_term.add(&second_term)?;

        let param_new = parameter.subtract(self.lr.multiply(d)?)?;

        *parameter = param_new;

        *v = v_new;
        *u = u_new;

        Ok(())
    }
}

impl Updatable for AdaDelta {
    fn updatable_states(&self) -> impl IntoIterator<Item = &Array> {
        use itertools::Itertools;

        self.state
            .iter()
            .sorted_by(|a, b| a.0.cmp(b.0))
            .flat_map(|(_, (v, u))| vec![v, u])
    }

    fn updatable_states_mut(&mut self) -> impl IntoIterator<Item = &mut Array> {
        use itertools::Itertools;

        self.state
            .iter_mut()
            .sorted_by(|a, b| a.0.cmp(b.0))
            .flat_map(|(_, (v, u))| vec![v, u])
    }
}

impl_updatable_for_mut_optimizer!(AdaDelta);