All Classes Namespaces Functions Variables Typedefs Enumerator Groups Pages
BDCSVD.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
5 // research report written by Ming Gu and Stanley C.Eisenstat
6 // The code variable names correspond to the names they used in their
7 // report
8 //
9 // Copyright (C) 2013 Gauthier Brun <[email protected]>
10 // Copyright (C) 2013 Nicolas Carre <[email protected]>
11 // Copyright (C) 2013 Jean Ceccato <[email protected]>
12 // Copyright (C) 2013 Pierre Zoppitelli <[email protected]>
13 //
14 // Source Code Form is subject to the terms of the Mozilla
15 // Public License v. 2.0. If a copy of the MPL was not distributed
16 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
17 
18 #ifndef EIGEN_BDCSVD_H
19 #define EIGEN_BDCSVD_H
20 
21 #define EPSILON 0.0000000000000001
22 
23 #define ALGOSWAP 32
24 
25 namespace Eigen {
37 template<typename _MatrixType>
38 class BDCSVD : public SVDBase<_MatrixType>
39 {
40  typedef SVDBase<_MatrixType> Base;
41 
42 public:
43  using Base::rows;
44  using Base::cols;
45 
46  typedef _MatrixType MatrixType;
47  typedef typename MatrixType::Scalar Scalar;
48  typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
49  typedef typename MatrixType::Index Index;
50  enum {
51  RowsAtCompileTime = MatrixType::RowsAtCompileTime,
52  ColsAtCompileTime = MatrixType::ColsAtCompileTime,
53  DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
54  MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
55  MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
56  MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
57  MatrixOptions = MatrixType::Options
58  };
59 
60  typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
61  MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
62  MatrixUType;
63  typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
64  MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
65  MatrixVType;
66  typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
67  typedef typename internal::plain_row_type<MatrixType>::type RowType;
68  typedef typename internal::plain_col_type<MatrixType>::type ColType;
69  typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
70  typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
71  typedef Matrix<RealScalar, Dynamic, 1> VectorType;
72 
79  : SVDBase<_MatrixType>::SVDBase(),
80  algoswap(ALGOSWAP)
81  {}
82 
83 
90  BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
91  : SVDBase<_MatrixType>::SVDBase(),
92  algoswap(ALGOSWAP)
93  {
94  allocate(rows, cols, computationOptions);
95  }
96 
107  BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
108  : SVDBase<_MatrixType>::SVDBase(),
109  algoswap(ALGOSWAP)
110  {
111  compute(matrix, computationOptions);
112  }
113 
114  ~BDCSVD()
115  {
116  }
127  SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
128 
135  SVDBase<MatrixType>& compute(const MatrixType& matrix)
136  {
137  return compute(matrix, this->m_computationOptions);
138  }
139 
140  void setSwitchSize(int s)
141  {
142  eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
143  algoswap = s;
144  }
145 
146 
156  template<typename Rhs>
157  inline const internal::solve_retval<BDCSVD, Rhs>
158  solve(const MatrixBase<Rhs>& b) const
159  {
160  eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
162  "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
163  return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
164  }
165 
166 
167  const MatrixUType& matrixU() const
168  {
169  eigen_assert(this->m_isInitialized && "SVD is not initialized.");
170  if (isTranspose){
171  eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
172  return this->m_matrixV;
173  }
174  else
175  {
176  eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
177  return this->m_matrixU;
178  }
179 
180  }
181 
182 
183  const MatrixVType& matrixV() const
184  {
185  eigen_assert(this->m_isInitialized && "SVD is not initialized.");
186  if (isTranspose){
187  eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
188  return this->m_matrixU;
189  }
190  else
191  {
192  eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
193  return this->m_matrixV;
194  }
195  }
196 
197 private:
198  void allocate(Index rows, Index cols, unsigned int computationOptions);
199  void divide (Index firstCol, Index lastCol, Index firstRowW,
200  Index firstColW, Index shift);
201  void deflation43(Index firstCol, Index shift, Index i, Index size);
202  void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
203  void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
204  void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
205 
206 protected:
207  MatrixXr m_naiveU, m_naiveV;
208  MatrixXr m_computed;
209  Index nRec;
210  int algoswap;
211  bool isTranspose, compU, compV;
212 
213 }; //end class BDCSVD
214 
215 
216 // Methode to allocate ans initialize matrix and attributs
217 template<typename MatrixType>
218 void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
219 {
220  isTranspose = (cols > rows);
221  if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
222  m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
223  if (isTranspose){
224  compU = this->computeU();
225  compV = this->computeV();
226  }
227  else
228  {
229  compV = this->computeU();
230  compU = this->computeV();
231  }
232  if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
233  else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
234 
235  if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
236 
237 
238  //should be changed for a cleaner implementation
239  if (isTranspose){
240  bool aux;
241  if (this->computeU()||this->computeV()){
242  aux = this->m_computeFullU;
243  this->m_computeFullU = this->m_computeFullV;
244  this->m_computeFullV = aux;
245  aux = this->m_computeThinU;
246  this->m_computeThinU = this->m_computeThinV;
247  this->m_computeThinV = aux;
248  }
249  }
250 }// end allocate
251 
252 // Methode which compute the BDCSVD for the int
253 template<>
254 SVDBase<Matrix<int, Dynamic, Dynamic> >&
255 BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
256  allocate(matrix.rows(), matrix.cols(), computationOptions);
257  this->m_nonzeroSingularValues = 0;
258  m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
259  for (int i=0; i<this->m_diagSize; i++) {
260  this->m_singularValues.coeffRef(i) = 0;
261  }
262  if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
263  if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
264  this->m_isInitialized = true;
265  return *this;
266 }
267 
268 
269 // Methode which compute the BDCSVD
270 template<typename MatrixType>
271 SVDBase<MatrixType>&
272 BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
273 {
274  allocate(matrix.rows(), matrix.cols(), computationOptions);
275  using std::abs;
276 
277  //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
278  MatrixType copy;
279  if (isTranspose) copy = matrix.adjoint();
280  else copy = matrix;
281 
282  internal::UpperBidiagonalization<MatrixX > bid(copy);
283 
284  //**** step 2 Divide
285  // this is ugly and has to be redone (care of complex cast)
286  MatrixXr temp;
287  temp = bid.bidiagonal().toDenseMatrix().transpose();
288  m_computed.setZero();
289  for (int i=0; i<this->m_diagSize - 1; i++) {
290  m_computed(i, i) = temp(i, i);
291  m_computed(i + 1, i) = temp(i + 1, i);
292  }
293  m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
294  divide(0, this->m_diagSize - 1, 0, 0, 0);
295 
296  //**** step 3 copy
297  for (int i=0; i<this->m_diagSize; i++) {
298  RealScalar a = abs(m_computed.coeff(i, i));
299  this->m_singularValues.coeffRef(i) = a;
300  if (a == 0){
301  this->m_nonzeroSingularValues = i;
302  break;
303  }
304  else if (i == this->m_diagSize - 1)
305  {
306  this->m_nonzeroSingularValues = i + 1;
307  break;
308  }
309  }
310  copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
311  this->m_isInitialized = true;
312  return *this;
313 }// end compute
314 
315 
316 template<typename MatrixType>
317 void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
318  if (this->computeU()){
319  MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
320  temp.real() = naiveU;
321  if (this->m_computeThinU){
322  this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
323  this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) =
324  temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
325  this->m_matrixU = householderU * this->m_matrixU ;
326  }
327  else
328  {
329  this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
330  this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
331  this->m_matrixU = householderU * this->m_matrixU ;
332  }
333  }
334  if (this->computeV()){
335  MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
336  temp.real() = naiveV;
337  if (this->m_computeThinV){
338  this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
339  this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) =
340  temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
341  this->m_matrixV = householderV * this->m_matrixV ;
342  }
343  else
344  {
345  this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
346  this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
347  this->m_matrixV = householderV * this->m_matrixV;
348  }
349  }
350 }
351 
352 // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
353 // place of the submatrix we are currently working on.
354 
355 //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
356 //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
357 // lastCol + 1 - firstCol is the size of the submatrix.
358 //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
359 //@param firstRowW : Same as firstRowW with the column.
360 //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
361 // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
362 template<typename MatrixType>
363 void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
364  Index firstColW, Index shift)
365 {
366  // requires nbRows = nbCols + 1;
367  using std::pow;
368  using std::sqrt;
369  using std::abs;
370  const Index n = lastCol - firstCol + 1;
371  const Index k = n/2;
372  RealScalar alphaK;
373  RealScalar betaK;
374  RealScalar r0;
375  RealScalar lambda, phi, c0, s0;
376  MatrixXr l, f;
377  // We use the other algorithm which is more efficient for small
378  // matrices.
379  if (n < algoswap){
380  JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
381  ComputeFullU | (ComputeFullV * compV)) ;
382  if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
383  else
384  {
385  m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
386  m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
387  }
388  if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
389  m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
390  for (int i=0; i<n; i++)
391  {
392  m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
393  }
394  return;
395  }
396  // We use the divide and conquer algorithm
397  alphaK = m_computed(firstCol + k, firstCol + k);
398  betaK = m_computed(firstCol + k + 1, firstCol + k);
399  // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
400  // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
401  // right submatrix before the left one.
402  divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
403  divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
404  if (compU)
405  {
406  lambda = m_naiveU(firstCol + k, firstCol + k);
407  phi = m_naiveU(firstCol + k + 1, lastCol + 1);
408  }
409  else
410  {
411  lambda = m_naiveU(1, firstCol + k);
412  phi = m_naiveU(0, lastCol + 1);
413  }
414  r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
415  + abs(betaK * phi) * abs(betaK * phi));
416  if (compU)
417  {
418  l = m_naiveU.row(firstCol + k).segment(firstCol, k);
419  f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
420  }
421  else
422  {
423  l = m_naiveU.row(1).segment(firstCol, k);
424  f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
425  }
426  if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
427  if (r0 == 0)
428  {
429  c0 = 1;
430  s0 = 0;
431  }
432  else
433  {
434  c0 = alphaK * lambda / r0;
435  s0 = betaK * phi / r0;
436  }
437  if (compU)
438  {
439  MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
440  // we shiftW Q1 to the right
441  for (Index i = firstCol + k - 1; i >= firstCol; i--)
442  {
443  m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
444  }
445  // we shift q1 at the left with a factor c0
446  m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
447  // last column = q1 * - s0
448  m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
449  // first column = q2 * s0
450  m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
451  m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
452  // q2 *= c0
453  m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
454  }
455  else
456  {
457  RealScalar q1 = (m_naiveU(0, firstCol + k));
458  // we shift Q1 to the right
459  for (Index i = firstCol + k - 1; i >= firstCol; i--)
460  {
461  m_naiveU(0, i + 1) = m_naiveU(0, i);
462  }
463  // we shift q1 at the left with a factor c0
464  m_naiveU(0, firstCol) = (q1 * c0);
465  // last column = q1 * - s0
466  m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
467  // first column = q2 * s0
468  m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
469  // q2 *= c0
470  m_naiveU(1, lastCol + 1) *= c0;
471  m_naiveU.row(1).segment(firstCol + 1, k).setZero();
472  m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
473  }
474  m_computed(firstCol + shift, firstCol + shift) = r0;
475  m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
476  m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
477 
478 
479  // the line below do the deflation of the matrix for the third part of the algorithm
480  // Here the deflation is commented because the third part of the algorithm is not implemented
481  // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
482 
483  deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
484 
485  // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
486  JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n),
487  ComputeFullU | (ComputeFullV * compV)) ;
488  if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
489  else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
490 
491  if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
492  m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
493  for (int i=0; i<n; i++)
494  m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
495  // end of the third part
496 
497 
498 }// end divide
499 
500 
501 // page 12_13
502 // i >= 1, di almost null and zi non null.
503 // We use a rotation to zero out zi applied to the left of M
504 template <typename MatrixType>
505 void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
506  using std::abs;
507  using std::sqrt;
508  using std::pow;
509  RealScalar c = m_computed(firstCol + shift, firstCol + shift);
510  RealScalar s = m_computed(i, firstCol + shift);
511  RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
512  if (r == 0){
513  m_computed(i, i)=0;
514  return;
515  }
516  c/=r;
517  s/=r;
518  m_computed(firstCol + shift, firstCol + shift) = r;
519  m_computed(i, firstCol + shift) = 0;
520  m_computed(i, i) = 0;
521  if (compU){
522  m_naiveU.col(firstCol).segment(firstCol,size) =
523  c * m_naiveU.col(firstCol).segment(firstCol, size) -
524  s * m_naiveU.col(i).segment(firstCol, size) ;
525 
526  m_naiveU.col(i).segment(firstCol, size) =
527  (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
528  (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
529  }
530 }// end deflation 43
531 
532 
533 // page 13
534 // i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
535 // We apply two rotations to have zj = 0;
536 template <typename MatrixType>
537 void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
538  using std::abs;
539  using std::sqrt;
540  using std::conj;
541  using std::pow;
542  RealScalar c = m_computed(firstColm, firstColm + j - 1);
543  RealScalar s = m_computed(firstColm, firstColm + i - 1);
544  RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
545  if (r==0){
546  m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
547  return;
548  }
549  c/=r;
550  s/=r;
551  m_computed(firstColm + i, firstColm) = r;
552  m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
553  m_computed(firstColm + j, firstColm) = 0;
554  if (compU){
555  m_naiveU.col(firstColu + i).segment(firstColu, size) =
556  c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
557  s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
558 
559  m_naiveU.col(firstColu + j).segment(firstColu, size) =
560  (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
561  (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
562  }
563  if (compV){
564  m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
565  c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
566  s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
567 
568  m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
569  (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
570  (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
571  }
572 }// end deflation 44
573 
574 
575 
576 template <typename MatrixType>
577 void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
578  //condition 4.1
579  RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
580  const Index length = lastCol + 1 - firstCol;
581  if (m_computed(firstCol + shift, firstCol + shift) < EPS){
582  m_computed(firstCol + shift, firstCol + shift) = EPS;
583  }
584  //condition 4.2
585  for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
586  if (std::abs(m_computed(i, firstCol + shift)) < EPS){
587  m_computed(i, firstCol + shift) = 0;
588  }
589  }
590 
591  //condition 4.3
592  for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
593  if (m_computed(i, i) < EPS){
594  deflation43(firstCol, shift, i, length);
595  }
596  }
597 
598  //condition 4.4
599 
600  Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
601  //we stock the final place of each line
602  Index *permutation = new Index[length];
603 
604  for (Index p =1; p < length; p++) {
605  if (i> firstCol + shift + k){
606  permutation[p] = j;
607  j++;
608  } else if (j> lastCol + shift)
609  {
610  permutation[p] = i;
611  i++;
612  }
613  else
614  {
615  if (m_computed(i, i) < m_computed(j, j)){
616  permutation[p] = j;
617  j++;
618  }
619  else
620  {
621  permutation[p] = i;
622  i++;
623  }
624  }
625  }
626  //we do the permutation
627  RealScalar aux;
628  //we stock the current index of each col
629  //and the column of each index
630  Index *realInd = new Index[length];
631  Index *realCol = new Index[length];
632  for (int pos = 0; pos< length; pos++){
633  realCol[pos] = pos + firstCol + shift;
634  realInd[pos] = pos;
635  }
636  const Index Zero = firstCol + shift;
637  VectorType temp;
638  for (int i = 1; i < length - 1; i++){
639  const Index I = i + Zero;
640  const Index realI = realInd[i];
641  const Index j = permutation[length - i] - Zero;
642  const Index J = realCol[j];
643 
644  //diag displace
645  aux = m_computed(I, I);
646  m_computed(I, I) = m_computed(J, J);
647  m_computed(J, J) = aux;
648 
649  //firstrow displace
650  aux = m_computed(I, Zero);
651  m_computed(I, Zero) = m_computed(J, Zero);
652  m_computed(J, Zero) = aux;
653 
654  // change columns
655  if (compU) {
656  temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
657  m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
658  m_naiveU.col(J - shift).segment(firstCol, length + 1);
659  m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
660  }
661  else
662  {
663  temp = m_naiveU.col(I - shift).segment(0, 2);
664  m_naiveU.col(I - shift).segment(0, 2) <<
665  m_naiveU.col(J - shift).segment(0, 2);
666  m_naiveU.col(J - shift).segment(0, 2) << temp;
667  }
668  if (compV) {
669  const Index CWI = I + firstColW - Zero;
670  const Index CWJ = J + firstColW - Zero;
671  temp = m_naiveV.col(CWI).segment(firstRowW, length);
672  m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
673  m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
674  }
675 
676  //update real pos
677  realCol[realI] = J;
678  realCol[j] = I;
679  realInd[J - Zero] = realI;
680  realInd[I - Zero] = j;
681  }
682  for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
683  if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
684  deflation44(firstCol ,
685  firstCol + shift,
686  firstRowW,
687  firstColW,
688  i - Zero,
689  i + 1 - Zero,
690  length);
691  }
692  }
693  delete [] permutation;
694  delete [] realInd;
695  delete [] realCol;
696 
697 }//end deflation
698 
699 
700 namespace internal{
701 
702 template<typename _MatrixType, typename Rhs>
703 struct solve_retval<BDCSVD<_MatrixType>, Rhs>
704  : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
705 {
706  typedef BDCSVD<_MatrixType> BDCSVDType;
707  EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
708 
709  template<typename Dest> void evalTo(Dest& dst) const
710  {
711  eigen_assert(rhs().rows() == dec().rows());
712  // A = U S V^*
713  // So A^{ - 1} = V S^{ - 1} U^*
714  Index diagSize = (std::min)(dec().rows(), dec().cols());
715  typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
716  Index nonzeroSingVals = dec().nonzeroSingularValues();
717  invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
718  invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
719 
720  dst = dec().matrixV().leftCols(diagSize)
721  * invertedSingVals.asDiagonal()
722  * dec().matrixU().leftCols(diagSize).adjoint()
723  * rhs();
724  return;
725  }
726 };
727 
728 } //end namespace internal
729 
737 /*
738 template<typename Derived>
739 BDCSVD<typename MatrixBase<Derived>::PlainObject>
740 MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
741 {
742  return BDCSVD<PlainObject>(*this, computationOptions);
743 }
744 */
745 
746 } // end namespace Eigen
747 
748 #endif
bool computeU() const
Definition: SVDBase.h:155
const internal::solve_retval< BDCSVD, Rhs > solve(const MatrixBase< Rhs > &b) const
Definition: BDCSVD.h:158
SVDBase< MatrixType > & compute(const MatrixType &matrix, unsigned int computationOptions)
Method performing the decomposition of given matrix using custom options.
Definition: BDCSVD.h:272
BDCSVD(const MatrixType &matrix, unsigned int computationOptions=0)
Constructor performing the decomposition of given matrix.
Definition: BDCSVD.h:107
Mother class of SVD classes algorithms.
Definition: SVDBase.h:46
class Bidiagonal Divide and Conquer SVD
Definition: BDCSVD.h:38
BDCSVD()
Default Constructor.
Definition: BDCSVD.h:78
bool computeV() const
Definition: SVDBase.h:157
BDCSVD(Index rows, Index cols, unsigned int computationOptions=0)
Default Constructor with memory preallocation.
Definition: BDCSVD.h:90
SVDBase< MatrixType > & compute(const MatrixType &matrix)
Method performing the decomposition of given matrix using current options.
Definition: BDCSVD.h:135