diff --git a/Cargo.toml b/Cargo.toml index a94e35f3..acbf5cf1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,9 +6,6 @@ version = "0.0.0" [features] macros = ["linalg_macros"] -[dependencies.assign] -git = "https://github.com/japaric/assign.rs" - [dependencies.blas] git = "https://github.com/japaric/blas.rs" diff --git a/src/lib.rs b/src/lib.rs index f362d2b5..2790275a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,7 +76,7 @@ //! A[1, :] += 1; //! //! // Rust -//! A.row_mut(1).add_assign(1) +//! A.row_mut(1) += 1; //! ``` //! //! Subtract sub-matrices @@ -86,7 +86,7 @@ //! A[1:3, 2:4] -= B[:2, 1:3] //! //! // Rust -//! A.slice_mut((1..3, 2..4)).sub_assign(B.slice((..2, 1..3))); +//! A.slice_mut((1..3, 2..4)) -= B.slice((..2, 1..3)); //! ``` //! //! - Index assignment @@ -219,7 +219,7 @@ //! z.set(y); //! //! // z = y - theta * X -//! z.sub_assign(theta * X); +//! z -= theta * X; //! //! .. //! @@ -236,6 +236,7 @@ #![deny(missing_docs)] #![deny(warnings)] #![feature(advanced_slice_patterns)] +#![feature(augmented_assignments)] #![feature(collections)] #![feature(core)] #![feature(filling_drop)] @@ -243,7 +244,6 @@ #![feature(unique)] #![feature(unsafe_no_drop_flag)] -extern crate assign; extern crate blas; extern crate cast; extern crate complex; diff --git a/src/ops/add/col.rs b/src/ops/add/col.rs index 0b9f8a18..7c25b168 100644 --- a/src/ops/add/col.rs +++ b/src/ops/add/col.rs @@ -1,6 +1,5 @@ use std::ops::Add; -use assign::AddAssign; use blas::{Axpy, Gemm, Gemv, Transpose}; use complex::Complex; use onezero::{One, Zero}; @@ -238,7 +237,7 @@ macro_rules! scalar { type Output = ColVec<$t>; fn add(self, mut rhs: ColVec<$t>) -> ColVec<$t> { - rhs.add_assign(self); + rhs += self; rhs } } @@ -247,7 +246,7 @@ macro_rules! scalar { type Output = ColVec<$t>; fn add(self, mut rhs: ColVec<$t>) -> ColVec<$t> { - rhs.add_assign(self); + rhs += self; rhs } } diff --git a/src/ops/add/mat.rs b/src/ops/add/mat.rs index f6492c18..687cb82d 100644 --- a/src/ops/add/mat.rs +++ b/src/ops/add/mat.rs @@ -1,6 +1,5 @@ use std::ops::Add; -use assign::AddAssign; use blas::{Axpy, Gemm, Transpose}; use complex::Complex; use onezero::{One, Zero}; @@ -185,7 +184,7 @@ macro_rules! scalar { type Output = Mat<$t>; fn add(self, mut rhs: Mat<$t>) -> Mat<$t> { - rhs.add_assign(self); + rhs += self; rhs } } @@ -194,7 +193,7 @@ macro_rules! scalar { type Output = Mat<$t>; fn add(self, mut rhs: Mat<$t>) -> Mat<$t> { - rhs.add_assign(self); + rhs += self; rhs } } @@ -203,7 +202,7 @@ macro_rules! scalar { type Output = Transposed>; fn add(self, mut rhs: Transposed>) -> Transposed> { - rhs.add_assign(self); + rhs += self; rhs } } @@ -212,7 +211,7 @@ macro_rules! scalar { type Output = Transposed>; fn add(self, mut rhs: Transposed>) -> Transposed> { - rhs.add_assign(self); + rhs += self; rhs } } diff --git a/src/ops/add/mod.rs b/src/ops/add/mod.rs index d4bb0aba..ec611e47 100644 --- a/src/ops/add/mod.rs +++ b/src/ops/add/mod.rs @@ -8,7 +8,7 @@ macro_rules! assign { type Output = $lhs; fn add(mut self, rhs: $rhs) -> $lhs { - self.add_assign(rhs); + self += rhs; self } } @@ -17,7 +17,7 @@ macro_rules! assign { type Output = $lhs; fn add(self, mut rhs: $lhs) -> $lhs { - rhs.add_assign(self); + rhs += self; rhs } } @@ -29,7 +29,7 @@ macro_rules! assign { type Output = $lhs; fn add(mut self, rhs: $rhs) -> $lhs { - self.add_assign(rhs); + self += rhs; self } } diff --git a/src/ops/add/row.rs b/src/ops/add/row.rs index 408ea1a4..458fc54e 100644 --- a/src/ops/add/row.rs +++ b/src/ops/add/row.rs @@ -1,6 +1,5 @@ use std::ops::Add; -use assign::AddAssign; use blas::{Axpy, Gemm, Gemv}; use complex::Complex; use onezero::{One, Zero}; @@ -93,7 +92,7 @@ macro_rules! scalar { type Output = RowVec<$t>; fn add(self, mut rhs: RowVec<$t>) -> RowVec<$t> { - rhs.add_assign(self); + rhs += self; rhs } } @@ -102,7 +101,7 @@ macro_rules! scalar { type Output = RowVec<$t>; fn add(self, mut rhs: RowVec<$t>) -> RowVec<$t> { - rhs.add_assign(self); + rhs += self; rhs } } diff --git a/src/ops/add_assign/col.rs b/src/ops/add_assign/col.rs index f549d702..4665d4ce 100644 --- a/src/ops/add_assign/col.rs +++ b/src/ops/add_assign/col.rs @@ -1,4 +1,5 @@ -use assign::AddAssign; +use std::ops::AddAssign; + use blas::{Axpy, Gemm, Gemv, Transpose}; use onezero::{One, Zero}; @@ -112,7 +113,7 @@ impl<'a, 'b, T> AddAssign>> for ColMut<'b, T> where T: Axpy { // Secondary implementations impl<'a, 'b, T> AddAssign> for ColMut<'b, T> where T: Axpy + One { fn add_assign(&mut self, rhs: Col) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -120,7 +121,7 @@ impl<'a, 'b, 'c, T> AddAssign, Col<'b, T>>> for ColMut<'c, T: Gemm + Gemv + One + Zero, { fn add_assign(&mut self, rhs: Product, Col>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -129,7 +130,7 @@ AddAssign>, Col<'b, T>>> for ColMut<'c, T> wher T: Gemv + One, { fn add_assign(&mut self, rhs: Product>, Col>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -137,7 +138,7 @@ impl<'a, 'b, 'c, T> AddAssign, Col<'b, T>>> for ColMut<'c, T: Gemv + One, { fn add_assign(&mut self, rhs: Product, Col>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -146,7 +147,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> AddAssign<$rhs> for $lhs where $(T: $bound),+ { fn add_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).add_assign(rhs.slice(..)) + self.slice_mut(..) += rhs.slice(..) } } )+ @@ -161,7 +162,7 @@ forward!(ColMut<'a, T> { impl<'a, T> AddAssign for ColMut<'a, T> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.add_assign(&rhs) + *self += &rhs } } @@ -181,12 +182,12 @@ forward!(ColVec { impl<'a, T> AddAssign<&'a T> for ColVec where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.slice_mut(..).add_assign(rhs) + self.slice_mut(..) += rhs } } impl AddAssign for ColVec where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.slice_mut(..).add_assign(&rhs) + self.slice_mut(..) += &rhs } } diff --git a/src/ops/add_assign/diag.rs b/src/ops/add_assign/diag.rs index 582387ae..645af4cf 100644 --- a/src/ops/add_assign/diag.rs +++ b/src/ops/add_assign/diag.rs @@ -1,4 +1,5 @@ -use assign::AddAssign; +use std::ops::AddAssign; + use blas::Axpy; use onezero::One; @@ -26,6 +27,6 @@ impl<'a, 'b, T> AddAssign<&'a T> for DiagMut<'b, T> where T: Axpy + One { // "Forwarding" implementations impl<'a, T> AddAssign for DiagMut<'a, T> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.add_assign(&rhs) + *self += &rhs } } diff --git a/src/ops/add_assign/mat.rs b/src/ops/add_assign/mat.rs index 2c65a332..b95a4adc 100644 --- a/src/ops/add_assign/mat.rs +++ b/src/ops/add_assign/mat.rs @@ -1,4 +1,5 @@ -use assign::AddAssign; +use std::ops::AddAssign; + use blas::{Axpy, Gemm, Transpose}; use onezero::{One, Zero}; @@ -123,7 +124,7 @@ impl<'a, 'b, T> AddAssign>> for SubMatMut<'b, T> where T: A // Secondary implementations impl<'a, 'b, T> AddAssign<&'a T> for Transposed> where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.0.add_assign(rhs) + self.0 += rhs } } @@ -131,7 +132,7 @@ impl<'a, 'b, T> AddAssign> for Transposed> where T: Gemm + One + Zero, { fn add_assign(&mut self, rhs: Chain) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -139,7 +140,7 @@ impl<'a, 'b, T> AddAssign>> for Transposed> T: Gemm + One + Zero, { fn add_assign(&mut self, rhs: Scaled>) { - self.0.add_assign(rhs.t()) + self.0 += rhs.t() } } @@ -147,13 +148,13 @@ impl<'a, 'b, T> AddAssign>>> for Transposed>>) { - self.0.add_assign(Scaled(rhs.0, (rhs.1).0)) + self.0 += Scaled(rhs.0, (rhs.1).0) } } impl<'a, 'b, T> AddAssign>> for Transposed> where T: Axpy { fn add_assign(&mut self, rhs: Scaled>) { - self.0.add_assign(rhs.t()) + self.0 += rhs.t() } } @@ -161,7 +162,7 @@ impl<'a, 'b, T> AddAssign>> for Transposed>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -169,25 +170,25 @@ impl<'a, 'b, T> AddAssign> for Transposed> where T: Axpy + One, { fn add_assign(&mut self, rhs: SubMat<'a, T>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } impl<'a, 'b, T> AddAssign> for SubMatMut<'b, T> where T: Gemm + One + Zero { fn add_assign(&mut self, rhs: Chain) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } impl<'a, 'b, T> AddAssign>> for SubMatMut<'b, T> where T: Axpy + One { fn add_assign(&mut self, rhs: Transposed>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } impl<'a, 'b, T> AddAssign> for SubMatMut<'b, T> where T: Axpy + One { fn add_assign(&mut self, rhs: SubMat) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -196,7 +197,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> AddAssign<$rhs> for $lhs where $(T: $bound),+ { fn add_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).add_assign(rhs.slice(..)) + self.slice_mut(..) += rhs.slice(..) } } )+ @@ -219,13 +220,13 @@ forward!(Mat { impl<'a, T> AddAssign<&'a T> for Mat where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.slice_mut(..).add_assign(rhs) + self.slice_mut(..) += rhs } } impl AddAssign for Mat where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.slice_mut(..).add_assign(&rhs) + self.slice_mut(..) += &rhs } } @@ -245,13 +246,13 @@ forward!(Transposed> { impl<'a, T> AddAssign<&'a T> for Transposed> where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.slice_mut(..).add_assign(rhs) + self.slice_mut(..) += rhs } } impl AddAssign for Transposed> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.slice_mut(..).add_assign(&rhs) + self.slice_mut(..) += &rhs } } @@ -265,7 +266,7 @@ forward!(Transposed> { impl<'a, T> AddAssign for Transposed> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.0.add_assign(&rhs) + self.0 += &rhs } } @@ -279,6 +280,6 @@ forward!(SubMatMut<'a, T> { impl<'a, T> AddAssign for SubMatMut<'a, T> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.add_assign(&rhs) + *self += &rhs } } diff --git a/src/ops/add_assign/row.rs b/src/ops/add_assign/row.rs index 475408b0..46aad62d 100644 --- a/src/ops/add_assign/row.rs +++ b/src/ops/add_assign/row.rs @@ -1,4 +1,5 @@ -use assign::AddAssign; +use std::ops::AddAssign; + use blas::{Axpy, Gemm, Gemv}; use onezero::{One, Zero}; @@ -25,7 +26,7 @@ use {Chain, Product, Row, RowMut, RowVec, Scaled, Transposed, SubMat}; // `row <- row + row * mat === row^t <- row^t + mat^t * row^t` impl<'a, 'b, T> AddAssign<&'a T> for RowMut<'b, T> where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.slice_mut(..).t().add_assign(rhs) + self.slice_mut(..).t() += rhs } } @@ -33,7 +34,7 @@ impl<'a, 'b, T> AddAssign>> for RowMut<'b, T> where T: Axpy, { fn add_assign(&mut self, rhs: Scaled>) { - self.slice_mut(..).t().add_assign(rhs.t()) + self.slice_mut(..).t() += rhs.t() } } @@ -41,7 +42,7 @@ impl<'a, 'b, 'c, T> AddAssign, Chain<'b, T>>>> for Row T: Gemm + Gemv + One + Zero, { fn add_assign(&mut self, rhs: Scaled, Chain>>) { - self.slice_mut(..).t().add_assign(rhs.t()) + self.slice_mut(..).t() += rhs.t() } } @@ -50,7 +51,7 @@ AddAssign, Transposed>>>> for RowMut<'c, T: Gemv + One, { fn add_assign(&mut self, rhs: Scaled, Transposed>>>) { - self.slice_mut(..).t().add_assign(rhs.t()) + self.slice_mut(..).t() += rhs.t() } } @@ -58,7 +59,7 @@ impl<'a, 'b, 'c, T> AddAssign, SubMat<'b, T>>>> for Ro T: Gemv + One, { fn add_assign(&mut self, rhs: Scaled, SubMat>>) { - self.slice_mut(..).t().add_assign(rhs.t()) + self.slice_mut(..).t() += rhs.t() } } @@ -66,7 +67,7 @@ impl<'a, 'b, T> AddAssign> for RowMut<'b, T> where T: Axpy + One, { fn add_assign(&mut self, rhs: Row) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -74,7 +75,7 @@ impl<'a, 'b, 'c, T> AddAssign, Chain<'b, T>>> for RowMut<'c, T: Gemm + Gemv + One + Zero, { fn add_assign(&mut self, rhs: Product, Chain>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -83,7 +84,7 @@ AddAssign, Transposed>>> for RowMut<'c, T> wher T: Gemv + One, { fn add_assign(&mut self, rhs: Product, Transposed>>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -91,7 +92,7 @@ impl<'a, 'b, 'c, T> AddAssign, SubMat<'b, T>>> for RowMut<'c, T: Gemv + One, { fn add_assign(&mut self, rhs: Product, SubMat>) { - self.add_assign(Scaled(T::one(), rhs)) + *self += Scaled(T::one(), rhs) } } @@ -100,7 +101,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> AddAssign<$rhs> for $lhs where $(T: $bound),+ { fn add_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).add_assign(rhs.slice(..)) + self.slice_mut(..) += rhs.slice(..) } } )+ @@ -115,7 +116,7 @@ forward!(RowMut<'a, T> { impl<'a, T> AddAssign for RowMut<'a, T> where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.add_assign(&rhs) + *self += &rhs } } @@ -135,12 +136,12 @@ forward!(RowVec { impl<'a, T> AddAssign<&'a T> for RowVec where T: Axpy + One { fn add_assign(&mut self, rhs: &T) { - self.slice_mut(..).add_assign(rhs) + self.slice_mut(..) += rhs } } impl AddAssign for RowVec where T: Axpy + One { fn add_assign(&mut self, rhs: T) { - self.slice_mut(..).add_assign(&rhs) + self.slice_mut(..) += &rhs } } diff --git a/src/ops/div_assign/col.rs b/src/ops/div_assign/col.rs index 92628e18..a325484c 100644 --- a/src/ops/div_assign/col.rs +++ b/src/ops/div_assign/col.rs @@ -1,6 +1,5 @@ -use std::ops::Div; +use std::ops::{Div, DivAssign}; -use assign::{DivAssign, MulAssign}; use blas::Scal; use onezero::One; @@ -13,7 +12,7 @@ impl<'a, T, A> DivAssign for ColMut<'a, T> where T: Scal, { fn div_assign(&mut self, alpha: A) { - self.mul_assign(A::one() / alpha) + *self *= A::one() / alpha } } @@ -23,6 +22,6 @@ impl DivAssign for ColVec where T: Scal, { fn div_assign(&mut self, alpha: A) { - self.slice_mut(..).div_assign(alpha) + self.slice_mut(..) /= alpha } } diff --git a/src/ops/div_assign/mat.rs b/src/ops/div_assign/mat.rs index 6cb74a09..c609f76e 100644 --- a/src/ops/div_assign/mat.rs +++ b/src/ops/div_assign/mat.rs @@ -1,4 +1,5 @@ -use assign::{DivAssign, MulAssign}; +use std::ops::DivAssign; + use complex::Complex; use onezero::One; @@ -13,28 +14,28 @@ macro_rules! scale { fn div_assign(&mut self, alpha: $rhs) { let _1: $rhs = One::one(); - self.mul_assign(_1 / alpha) + *self *= _1 / alpha } } // NOTE Secondary impl<'a> DivAssign<$rhs> for Transposed> { fn div_assign(&mut self, alpha: $rhs) { - self.0.div_assign(alpha) + self.0 /= alpha } } // NOTE Forward impl DivAssign<$rhs> for Transposed> { fn div_assign(&mut self, alpha: $rhs) { - self.slice_mut(..).div_assign(alpha) + self.slice_mut(..) /= alpha } } // NOTE Forward impl DivAssign<$rhs> for Mat<$lhs> { fn div_assign(&mut self, alpha: $rhs) { - self.slice_mut(..).div_assign(alpha) + self.slice_mut(..) /= alpha } } )+ diff --git a/src/ops/div_assign/row.rs b/src/ops/div_assign/row.rs index efecad19..4b9a96c6 100644 --- a/src/ops/div_assign/row.rs +++ b/src/ops/div_assign/row.rs @@ -1,6 +1,5 @@ -use std::ops::Div; +use std::ops::{Div, DivAssign}; -use assign::{DivAssign, MulAssign}; use blas::Scal; use onezero::One; @@ -13,7 +12,7 @@ impl<'a, T, A> DivAssign for RowMut<'a, T> where T: Scal, { fn div_assign(&mut self, alpha: A) { - self.mul_assign(A::one() / alpha) + *self *= A::one() / alpha } } @@ -23,6 +22,6 @@ impl DivAssign for RowVec where T: Scal, { fn div_assign(&mut self, alpha: A) { - self.slice_mut(..).div_assign(alpha) + self.slice_mut(..) /= alpha } } diff --git a/src/ops/mul_assign/col.rs b/src/ops/mul_assign/col.rs index 820e12c0..72e100bc 100644 --- a/src/ops/mul_assign/col.rs +++ b/src/ops/mul_assign/col.rs @@ -1,4 +1,5 @@ -use assign::MulAssign; +use std::ops::MulAssign; + use blas::Scal; use ops; @@ -18,6 +19,6 @@ impl<'a, T, A> MulAssign for ColMut<'a, T> where T: Scal { impl MulAssign for ColVec where T: Scal { fn mul_assign(&mut self, alpha: A) { - self.slice_mut(..).mul_assign(alpha) + self.slice_mut(..) *= alpha } } diff --git a/src/ops/mul_assign/mat.rs b/src/ops/mul_assign/mat.rs index 72e3de42..fe5257e4 100644 --- a/src/ops/mul_assign/mat.rs +++ b/src/ops/mul_assign/mat.rs @@ -1,6 +1,5 @@ -use std::ops::Mul; +use std::ops::{Mul, MulAssign}; -use assign::MulAssign; use complex::Complex; use Forward; @@ -44,7 +43,7 @@ impl<'a, 'b, A, B> MulAssign>> for Transposed, { fn mul_assign(&mut self, rhs: Transposed>) { - self.0.mul_assign(rhs.0) + self.0 *= rhs.0 } } @@ -54,7 +53,7 @@ impl<'a, 'b, A, B> MulAssign> for Transposed> whe B: Copy + Mul, { fn mul_assign(&mut self, rhs: SubMat<'a, A>) { - self.0.mul_assign(rhs.t()) + self.0 *= rhs.t() } } @@ -65,7 +64,7 @@ macro_rules! forward { A: Copy + Mul, B: Copy, { fn mul_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).mul_assign(rhs.slice(..)) + self.slice_mut(..) *= rhs.slice(..) } } )+ @@ -125,21 +124,21 @@ macro_rules! scale { // NOTE Secondary impl<'a> MulAssign<$rhs> for Transposed> { fn mul_assign(&mut self, alpha: $rhs) { - self.0.mul_assign(alpha) + self.0 *= alpha } } // NOTE Forward impl MulAssign<$rhs> for Transposed> { fn mul_assign(&mut self, alpha: $rhs) { - self.slice_mut(..).mul_assign(alpha) + self.slice_mut(..) *= alpha } } // NOTE Forward impl MulAssign<$rhs> for Mat<$lhs> { fn mul_assign(&mut self, alpha: $rhs) { - self.slice_mut(..).mul_assign(alpha) + self.slice_mut(..) *= alpha } } )+ diff --git a/src/ops/mul_assign/row.rs b/src/ops/mul_assign/row.rs index a71f231d..cf7da690 100644 --- a/src/ops/mul_assign/row.rs +++ b/src/ops/mul_assign/row.rs @@ -1,4 +1,5 @@ -use assign::MulAssign; +use std::ops::MulAssign; + use blas::Scal; use ops; @@ -18,6 +19,6 @@ impl<'a, T, A> MulAssign for RowMut<'a, T> where T: Scal { impl MulAssign for RowVec where T: Scal { fn mul_assign(&mut self, alpha: A) { - self.slice_mut(..).mul_assign(alpha) + self.slice_mut(..) *= alpha } } diff --git a/src/ops/sub/col.rs b/src/ops/sub/col.rs index 883e23f9..b470ab2c 100644 --- a/src/ops/sub/col.rs +++ b/src/ops/sub/col.rs @@ -1,6 +1,5 @@ use std::ops::{Neg, Sub}; -use assign::SubAssign; use blas::{Axpy, Gemm, Gemv, Transpose}; use onezero::{One, Zero}; diff --git a/src/ops/sub/mat.rs b/src/ops/sub/mat.rs index 1d5e417d..f1bb8528 100644 --- a/src/ops/sub/mat.rs +++ b/src/ops/sub/mat.rs @@ -1,6 +1,5 @@ use std::ops::{Neg, Sub}; -use assign::SubAssign; use blas::{Axpy, Gemm, Transpose}; use onezero::{One, Zero}; diff --git a/src/ops/sub/mod.rs b/src/ops/sub/mod.rs index cb59b6d4..b188842f 100644 --- a/src/ops/sub/mod.rs +++ b/src/ops/sub/mod.rs @@ -8,7 +8,7 @@ macro_rules! assign { type Output = $lhs; fn sub(mut self, rhs: $rhs) -> $lhs { - self.sub_assign(rhs); + self -= rhs; self } } diff --git a/src/ops/sub/row.rs b/src/ops/sub/row.rs index 95ffb810..03c8f66c 100644 --- a/src/ops/sub/row.rs +++ b/src/ops/sub/row.rs @@ -1,6 +1,5 @@ use std::ops::{Neg, Sub}; -use assign::SubAssign; use blas::{Axpy, Gemm, Gemv}; use onezero::{One, Zero}; diff --git a/src/ops/sub_assign/col.rs b/src/ops/sub_assign/col.rs index 25d37d63..2840eb26 100644 --- a/src/ops/sub_assign/col.rs +++ b/src/ops/sub_assign/col.rs @@ -1,6 +1,5 @@ -use std::ops::Neg; +use std::ops::{Neg, SubAssign}; -use assign::SubAssign; use blas::{Axpy, Gemm, Gemv, Transpose}; use onezero::{One, Zero}; @@ -120,7 +119,7 @@ impl<'a, 'b, T> SubAssign>> for ColMut<'b, T> where T: Axpy + // Secondary implementations impl<'a, 'b, T> SubAssign> for ColMut<'b, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: Col) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -128,7 +127,7 @@ impl<'a, 'b, 'c, T> SubAssign, Col<'b, T>>> for ColMut<'c, T: Gemm + Gemv + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Product, Col>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -137,7 +136,7 @@ SubAssign>, Col<'b, T>>> for ColMut<'c, T> wher T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Product>, Col>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -145,7 +144,7 @@ impl<'a, 'b, 'c, T> SubAssign, Col<'b, T>>> for ColMut<'c, T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Product, Col>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -154,7 +153,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> SubAssign<$rhs> for $lhs where T: Neg, $(T: $bound),+ { fn sub_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).sub_assign(rhs.slice(..)) + self.slice_mut(..) -= rhs.slice(..) } } )+ @@ -169,7 +168,7 @@ forward!(ColMut<'a, T> { impl<'a, T> SubAssign for ColMut<'a, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.sub_assign(&rhs) + *self -= &rhs } } @@ -189,12 +188,12 @@ forward!(ColVec { impl<'a, T> SubAssign<&'a T> for ColVec where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: &T) { - self.slice_mut(..).sub_assign(rhs) + self.slice_mut(..) -= rhs } } impl SubAssign for ColVec where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.slice_mut(..).sub_assign(&rhs) + self.slice_mut(..) -= &rhs } } diff --git a/src/ops/sub_assign/diag.rs b/src/ops/sub_assign/diag.rs index 3293e351..16f92a39 100644 --- a/src/ops/sub_assign/diag.rs +++ b/src/ops/sub_assign/diag.rs @@ -1,6 +1,5 @@ -use std::ops::Neg; +use std::ops::{Neg, SubAssign}; -use assign::SubAssign; use blas::Axpy; use onezero::One; @@ -28,6 +27,6 @@ impl<'a, 'b, T> SubAssign<&'a T> for DiagMut<'b, T> where T: Axpy + Neg SubAssign for DiagMut<'a, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.sub_assign(&rhs) + *self -= &rhs } } diff --git a/src/ops/sub_assign/mat.rs b/src/ops/sub_assign/mat.rs index 6d044915..474cc1a0 100644 --- a/src/ops/sub_assign/mat.rs +++ b/src/ops/sub_assign/mat.rs @@ -1,6 +1,5 @@ -use std::ops::Neg; +use std::ops::{Neg, SubAssign}; -use assign::SubAssign; use blas::{Axpy, Gemm, Transpose}; use onezero::{One, Zero}; @@ -135,7 +134,7 @@ impl<'a, 'b, T> SubAssign<&'a T> for Transposed> where T: Axpy + Neg + One, { fn sub_assign(&mut self, rhs: &T) { - self.0.sub_assign(rhs) + self.0 -= rhs } } @@ -143,7 +142,7 @@ impl<'a, 'b, T> SubAssign> for Transposed> where T: Gemm + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Chain) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -151,7 +150,7 @@ impl<'a, 'b, T> SubAssign>> for Transposed> T: Gemm + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Scaled>) { - self.0.sub_assign(rhs.t()) + self.0 -= rhs.t() } } @@ -159,7 +158,7 @@ impl<'a, 'b, T> SubAssign>>> for Transposed, { fn sub_assign(&mut self, rhs: Scaled>>) { - self.0.sub_assign(Scaled(rhs.0, (rhs.1).0)) + self.0 -= Scaled(rhs.0, (rhs.1).0) } } @@ -167,7 +166,7 @@ impl<'a, 'b, T> SubAssign>> for Transposed T: Axpy + Neg, { fn sub_assign(&mut self, rhs: Scaled>) { - self.0.sub_assign(rhs.t()) + self.0 -= rhs.t() } } @@ -175,7 +174,7 @@ impl<'a, 'b, T> SubAssign>> for Transposed + One, { fn sub_assign(&mut self, rhs: Transposed>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -183,7 +182,7 @@ impl<'a, 'b, T> SubAssign> for Transposed> where T: Axpy + Neg + One, { fn sub_assign(&mut self, rhs: SubMat<'a, T>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -191,7 +190,7 @@ impl<'a, 'b, T> SubAssign> for SubMatMut<'b, T> where T: Gemm + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Chain) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -199,13 +198,13 @@ impl<'a, 'b, T> SubAssign>> for SubMatMut<'b, T> where T: Axpy + Neg + One, { fn sub_assign(&mut self, rhs: Transposed>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } impl<'a, 'b, T> SubAssign> for SubMatMut<'b, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: SubMat) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -214,7 +213,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> SubAssign<$rhs> for $lhs where T: Neg, $(T: $bound),+ { fn sub_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).sub_assign(rhs.slice(..)) + self.slice_mut(..) -= rhs.slice(..) } } )+ @@ -237,13 +236,13 @@ forward!(Mat { impl<'a, T> SubAssign<&'a T> for Mat where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: &T) { - self.slice_mut(..).sub_assign(rhs) + self.slice_mut(..) -= rhs } } impl SubAssign for Mat where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.slice_mut(..).sub_assign(&rhs) + self.slice_mut(..) -= &rhs } } @@ -263,13 +262,13 @@ forward!(Transposed> { impl<'a, T> SubAssign<&'a T> for Transposed> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: &T) { - self.slice_mut(..).sub_assign(rhs) + self.slice_mut(..) -= rhs } } impl SubAssign for Transposed> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.slice_mut(..).sub_assign(&rhs) + self.slice_mut(..) -= &rhs } } @@ -283,7 +282,7 @@ forward!(Transposed> { impl<'a, T> SubAssign for Transposed> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.0.sub_assign(&rhs) + self.0 -= &rhs } } @@ -297,6 +296,6 @@ forward!(SubMatMut<'a, T> { impl<'a, T> SubAssign for SubMatMut<'a, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.sub_assign(&rhs) + *self -= &rhs } } diff --git a/src/ops/sub_assign/row.rs b/src/ops/sub_assign/row.rs index 08eec6c2..dc57a233 100644 --- a/src/ops/sub_assign/row.rs +++ b/src/ops/sub_assign/row.rs @@ -1,6 +1,5 @@ -use std::ops::Neg; +use std::ops::{Neg, SubAssign}; -use assign::SubAssign; use blas::{Axpy, Gemm, Gemv}; use onezero::{One, Zero}; @@ -27,7 +26,7 @@ use {Chain, Product, Row, RowMut, RowVec, Scaled, Transposed, SubMat}; // `row <- row + row * mat === row^t <- row^t + mat^t * row^t` impl<'a, 'b, T> SubAssign<&'a T> for RowMut<'b, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: &T) { - self.slice_mut(..).t().sub_assign(rhs) + self.slice_mut(..).t() -= rhs } } @@ -35,7 +34,7 @@ impl<'a, 'b, T> SubAssign>> for RowMut<'b, T> where T: Axpy + Neg, { fn sub_assign(&mut self, rhs: Scaled>) { - self.slice_mut(..).t().sub_assign(rhs.t()) + self.slice_mut(..).t() -= rhs.t() } } @@ -43,7 +42,7 @@ impl<'a, 'b, 'c, T> SubAssign, Chain<'b, T>>>> for Row T: Gemm + Gemv + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Scaled, Chain>>) { - self.slice_mut(..).t().sub_assign(rhs.t()) + self.slice_mut(..).t() -= rhs.t() } } @@ -52,7 +51,7 @@ SubAssign, Transposed>>>> for RowMut<'c, T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Scaled, Transposed>>>) { - self.slice_mut(..).t().sub_assign(rhs.t()) + self.slice_mut(..).t() -= rhs.t() } } @@ -60,7 +59,7 @@ impl<'a, 'b, 'c, T> SubAssign, SubMat<'b, T>>>> for Ro T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Scaled, SubMat>>) { - self.slice_mut(..).t().sub_assign(rhs.t()) + self.slice_mut(..).t() -= rhs.t() } } @@ -68,7 +67,7 @@ impl<'a, 'b, T> SubAssign> for RowMut<'b, T> where T: Axpy + Neg + One, { fn sub_assign(&mut self, rhs: Row) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -76,7 +75,7 @@ impl<'a, 'b, 'c, T> SubAssign, Chain<'b, T>>> for RowMut<'c, T: Gemm + Gemv + Neg + One + Zero, { fn sub_assign(&mut self, rhs: Product, Chain>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -85,7 +84,7 @@ SubAssign, Transposed>>> for RowMut<'c, T> wher T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Product, Transposed>>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -93,7 +92,7 @@ impl<'a, 'b, 'c, T> SubAssign, SubMat<'b, T>>> for RowMut<'c, T: Gemv + Neg + One, { fn sub_assign(&mut self, rhs: Product, SubMat>) { - self.sub_assign(Scaled(T::one(), rhs)) + *self -= Scaled(T::one(), rhs) } } @@ -102,7 +101,7 @@ macro_rules! forward { $( impl<'a, 'b, 'c, T> SubAssign<$rhs> for $lhs where T: Neg, $(T: $bound),+ { fn sub_assign(&mut self, rhs: $rhs) { - self.slice_mut(..).sub_assign(rhs.slice(..)) + self.slice_mut(..) -= rhs.slice(..) } } )+ @@ -117,7 +116,7 @@ forward!(RowMut<'a, T> { impl<'a, T> SubAssign for RowMut<'a, T> where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.sub_assign(&rhs) + *self -= &rhs } } @@ -137,12 +136,12 @@ forward!(RowVec { impl<'a, T> SubAssign<&'a T> for RowVec where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: &T) { - self.slice_mut(..).sub_assign(rhs) + self.slice_mut(..) -= rhs } } impl SubAssign for RowVec where T: Axpy + Neg + One { fn sub_assign(&mut self, rhs: T) { - self.slice_mut(..).sub_assign(&rhs) + self.slice_mut(..) -= &rhs } } diff --git a/src/prelude.rs b/src/prelude.rs index 6b76e62c..11d66c66 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -6,36 +6,31 @@ pub use ColVec; pub use Mat; pub use RowVec; -pub use assign::AddAssign as __linalg_0; -pub use assign::DivAssign as __linalg_1; -pub use assign::MulAssign as __linalg_2; -pub use assign::SubAssign as __linalg_3; - -pub use traits::Eval as __linalg_4; -pub use traits::HSplit as __linalg_5; -pub use traits::HSplitMut as __linalg_6; -pub use traits::Iter as __linalg_7; -pub use traits::IterMut as __linalg_8; -pub use traits::Matrix as __linalg_9; -pub use traits::MatrixCol as __linalg_10; -pub use traits::MatrixColMut as __linalg_11; -pub use traits::MatrixCols as __linalg_12; -pub use traits::MatrixColsMut as __linalg_13; -pub use traits::MatrixDiag as __linalg_14; -pub use traits::MatrixDiagMut as __linalg_15; -pub use traits::MatrixHStripes as __linalg_16; -pub use traits::MatrixHStripesMut as __linalg_17; -pub use traits::MatrixInverse as __linalg_18; -pub use traits::MatrixRow as __linalg_19; -pub use traits::MatrixRowMut as __linalg_20; -pub use traits::MatrixRows as __linalg_21; -pub use traits::MatrixRowsMut as __linalg_22; -pub use traits::MatrixVStripes as __linalg_23; -pub use traits::MatrixVStripesMut as __linalg_24; -pub use traits::Norm as __linalg_25; -pub use traits::Set as __linalg_26; -pub use traits::Slice as __linalg_27; -pub use traits::SliceMut as __linalg_28; -pub use traits::Transpose as __linalg_29; -pub use traits::VSplit as __linalg_30; -pub use traits::VSplitMut as __linalg_31; +pub use traits::Eval as __linalg_0; +pub use traits::HSplit as __linalg_1; +pub use traits::HSplitMut as __linalg_2; +pub use traits::Iter as __linalg_3; +pub use traits::IterMut as __linalg_4; +pub use traits::Matrix as __linalg_5; +pub use traits::MatrixCol as __linalg_6; +pub use traits::MatrixColMut as __linalg_7; +pub use traits::MatrixCols as __linalg_8; +pub use traits::MatrixColsMut as __linalg_9; +pub use traits::MatrixDiag as __linalg_10; +pub use traits::MatrixDiagMut as __linalg_11; +pub use traits::MatrixHStripes as __linalg_12; +pub use traits::MatrixHStripesMut as __linalg_13; +pub use traits::MatrixInverse as __linalg_14; +pub use traits::MatrixRow as __linalg_15; +pub use traits::MatrixRowMut as __linalg_16; +pub use traits::MatrixRows as __linalg_17; +pub use traits::MatrixRowsMut as __linalg_18; +pub use traits::MatrixVStripes as __linalg_19; +pub use traits::MatrixVStripesMut as __linalg_20; +pub use traits::Norm as __linalg_21; +pub use traits::Set as __linalg_22; +pub use traits::Slice as __linalg_23; +pub use traits::SliceMut as __linalg_24; +pub use traits::Transpose as __linalg_25; +pub use traits::VSplit as __linalg_26; +pub use traits::VSplitMut as __linalg_27; diff --git a/tests/mul_assign_mat.rs b/tests/mul_assign_mat.rs index a7607418..22a77359 100644 --- a/tests/mul_assign_mat.rs +++ b/tests/mul_assign_mat.rs @@ -8,6 +8,7 @@ //! //! for any valid `i`, `j` +#![feature(augmented_assignments)] #![feature(custom_attribute)] #![feature(plugin)] #![plugin(quickcheck_macros)] @@ -47,7 +48,7 @@ mod nn { let b = ::setup::rand::mat::<$ty>((srow + nrows, scol + ncols)); let b = b.slice((srow.., scol..)); - a.mul_assign(b); + a *= b; test_approx_eq! { a[(row, col)], @@ -87,7 +88,7 @@ mod nt { let b = ::setup::rand::mat::<$ty>((srow + ncols, scol + nrows)); let b = b.slice((srow.., scol..)).t(); - a.mul_assign(b); + a *= b; test_approx_eq! { a[(row, col)], @@ -127,7 +128,7 @@ mod tn { let b = ::setup::rand::mat::<$ty>((srow + nrows, scol + ncols)); let b = b.slice((srow.., scol..)); - a.mul_assign(b); + a *= b; test_approx_eq! { a[(row, col)], @@ -167,7 +168,7 @@ mod tt { let b = ::setup::rand::mat::<$ty>((srow + ncols, scol + nrows)); let b = b.slice((srow.., scol..)).t(); - a.mul_assign(b); + a *= b; test_approx_eq! { a[(row, col)],