Eigen  3.2.7
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index Index;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit,
55  CoeffReadCost = NumTraits<Scalar>::ReadCost,
56  SupportedAccessPatterns = InnerRandomAccessPattern
57  };
58 };
59 
60 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
61 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
62 {
63  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
64  typedef typename nested<MatrixType>::type MatrixTypeNested;
65  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
66 
67  typedef _Scalar Scalar;
68  typedef Dense StorageKind;
69  typedef _Index Index;
70  typedef MatrixXpr XprKind;
71 
72  enum {
73  RowsAtCompileTime = Dynamic,
74  ColsAtCompileTime = 1,
75  MaxRowsAtCompileTime = Dynamic,
76  MaxColsAtCompileTime = 1,
77  Flags = 0,
78  CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
79  };
80 };
81 
82 } // end namespace internal
83 
84 template<typename _Scalar, int _Options, typename _Index>
86  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
87 {
88  public:
89  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
90  EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
91  EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
92 
94  using Base::IsRowMajor;
95  typedef internal::CompressedStorage<Scalar,Index> Storage;
96  enum {
97  Options = _Options
98  };
99 
100  protected:
101 
103 
104  Index m_outerSize;
105  Index m_innerSize;
106  Index* m_outerIndex;
107  Index* m_innerNonZeros; // optional, if null then the data is compressed
108  Storage m_data;
109 
110  Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
111  const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
112 
113  public:
114 
116  inline bool isCompressed() const { return m_innerNonZeros==0; }
117 
119  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
121  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
122 
124  inline Index innerSize() const { return m_innerSize; }
126  inline Index outerSize() const { return m_outerSize; }
127 
131  inline const Scalar* valuePtr() const { return &m_data.value(0); }
135  inline Scalar* valuePtr() { return &m_data.value(0); }
136 
140  inline const Index* innerIndexPtr() const { return &m_data.index(0); }
144  inline Index* innerIndexPtr() { return &m_data.index(0); }
145 
149  inline const Index* outerIndexPtr() const { return m_outerIndex; }
153  inline Index* outerIndexPtr() { return m_outerIndex; }
154 
158  inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
162  inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
163 
165  inline Storage& data() { return m_data; }
167  inline const Storage& data() const { return m_data; }
168 
171  inline Scalar coeff(Index row, Index col) const
172  {
173  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
174 
175  const Index outer = IsRowMajor ? row : col;
176  const Index inner = IsRowMajor ? col : row;
177  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
178  return m_data.atInRange(m_outerIndex[outer], end, inner);
179  }
180 
189  inline Scalar& coeffRef(Index row, Index col)
190  {
191  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
192 
193  const Index outer = IsRowMajor ? row : col;
194  const Index inner = IsRowMajor ? col : row;
195 
196  Index start = m_outerIndex[outer];
197  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
198  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
199  if(end<=start)
200  return insert(row,col);
201  const Index p = m_data.searchLowerIndex(start,end-1,inner);
202  if((p<end) && (m_data.index(p)==inner))
203  return m_data.value(p);
204  else
205  return insert(row,col);
206  }
207 
220  Scalar& insert(Index row, Index col)
221  {
222  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
223 
224  if(isCompressed())
225  {
227  }
228  return insertUncompressed(row,col);
229  }
230 
231  public:
232 
233  class InnerIterator;
234  class ReverseInnerIterator;
235 
237  inline void setZero()
238  {
239  m_data.clear();
240  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
241  if(m_innerNonZeros)
242  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
243  }
244 
246  inline Index nonZeros() const
247  {
248  if(m_innerNonZeros)
249  return innerNonZeros().sum();
250  return static_cast<Index>(m_data.size());
251  }
252 
256  inline void reserve(Index reserveSize)
257  {
258  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
259  m_data.reserve(reserveSize);
260  }
261 
262  #ifdef EIGEN_PARSED_BY_DOXYGEN
263 
266  template<class SizesType>
267  inline void reserve(const SizesType& reserveSizes);
268  #else
269  template<class SizesType>
270  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
271  {
272  EIGEN_UNUSED_VARIABLE(enableif);
273  reserveInnerVectors(reserveSizes);
274  }
275  template<class SizesType>
276  inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
277  #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
278  typename
279  #endif
280  SizesType::Scalar())
281  {
282  EIGEN_UNUSED_VARIABLE(enableif);
283  reserveInnerVectors(reserveSizes);
284  }
285  #endif // EIGEN_PARSED_BY_DOXYGEN
286  protected:
287  template<class SizesType>
288  inline void reserveInnerVectors(const SizesType& reserveSizes)
289  {
290  if(isCompressed())
291  {
292  std::size_t totalReserveSize = 0;
293  // turn the matrix into non-compressed mode
294  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
295  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
296 
297  // temporarily use m_innerSizes to hold the new starting points.
298  Index* newOuterIndex = m_innerNonZeros;
299 
300  Index count = 0;
301  for(Index j=0; j<m_outerSize; ++j)
302  {
303  newOuterIndex[j] = count;
304  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
305  totalReserveSize += reserveSizes[j];
306  }
307  m_data.reserve(totalReserveSize);
308  Index previousOuterIndex = m_outerIndex[m_outerSize];
309  for(Index j=m_outerSize-1; j>=0; --j)
310  {
311  Index innerNNZ = previousOuterIndex - m_outerIndex[j];
312  for(Index i=innerNNZ-1; i>=0; --i)
313  {
314  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
315  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
316  }
317  previousOuterIndex = m_outerIndex[j];
318  m_outerIndex[j] = newOuterIndex[j];
319  m_innerNonZeros[j] = innerNNZ;
320  }
321  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
322 
323  m_data.resize(m_outerIndex[m_outerSize]);
324  }
325  else
326  {
327  Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
328  if (!newOuterIndex) internal::throw_std_bad_alloc();
329 
330  Index count = 0;
331  for(Index j=0; j<m_outerSize; ++j)
332  {
333  newOuterIndex[j] = count;
334  Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
335  Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
336  count += toReserve + m_innerNonZeros[j];
337  }
338  newOuterIndex[m_outerSize] = count;
339 
340  m_data.resize(count);
341  for(Index j=m_outerSize-1; j>=0; --j)
342  {
343  Index offset = newOuterIndex[j] - m_outerIndex[j];
344  if(offset>0)
345  {
346  Index innerNNZ = m_innerNonZeros[j];
347  for(Index i=innerNNZ-1; i>=0; --i)
348  {
349  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
350  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
351  }
352  }
353  }
354 
355  std::swap(m_outerIndex, newOuterIndex);
356  std::free(newOuterIndex);
357  }
358 
359  }
360  public:
361 
362  //--- low level purely coherent filling ---
363 
374  inline Scalar& insertBack(Index row, Index col)
375  {
376  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
377  }
378 
381  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
382  {
383  eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
384  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
385  Index p = m_outerIndex[outer+1];
386  ++m_outerIndex[outer+1];
387  m_data.append(0, inner);
388  return m_data.value(p);
389  }
390 
393  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
394  {
395  Index p = m_outerIndex[outer+1];
396  ++m_outerIndex[outer+1];
397  m_data.append(0, inner);
398  return m_data.value(p);
399  }
400 
403  inline void startVec(Index outer)
404  {
405  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
406  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
407  m_outerIndex[outer+1] = m_outerIndex[outer];
408  }
409 
413  inline void finalize()
414  {
415  if(isCompressed())
416  {
417  Index size = static_cast<Index>(m_data.size());
418  Index i = m_outerSize;
419  // find the last filled column
420  while (i>=0 && m_outerIndex[i]==0)
421  --i;
422  ++i;
423  while (i<=m_outerSize)
424  {
425  m_outerIndex[i] = size;
426  ++i;
427  }
428  }
429  }
430 
431  //---
432 
433  template<typename InputIterators>
434  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
435 
436  void sumupDuplicates();
437 
438  //---
439 
442  Scalar& insertByOuterInner(Index j, Index i)
443  {
444  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
445  }
446 
450  {
451  if(isCompressed())
452  return;
453 
454  Index oldStart = m_outerIndex[1];
455  m_outerIndex[1] = m_innerNonZeros[0];
456  for(Index j=1; j<m_outerSize; ++j)
457  {
458  Index nextOldStart = m_outerIndex[j+1];
459  Index offset = oldStart - m_outerIndex[j];
460  if(offset>0)
461  {
462  for(Index k=0; k<m_innerNonZeros[j]; ++k)
463  {
464  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
465  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
466  }
467  }
468  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
469  oldStart = nextOldStart;
470  }
471  std::free(m_innerNonZeros);
472  m_innerNonZeros = 0;
473  m_data.resize(m_outerIndex[m_outerSize]);
474  m_data.squeeze();
475  }
476 
478  void uncompress()
479  {
480  if(m_innerNonZeros != 0)
481  return;
482  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
483  for (Index i = 0; i < m_outerSize; i++)
484  {
485  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
486  }
487  }
488 
490  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
491  {
492  prune(default_prunning_func(reference,epsilon));
493  }
494 
502  template<typename KeepFunc>
503  void prune(const KeepFunc& keep = KeepFunc())
504  {
505  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
506  // TODO also implement a unit test
507  makeCompressed();
508 
509  Index k = 0;
510  for(Index j=0; j<m_outerSize; ++j)
511  {
512  Index previousStart = m_outerIndex[j];
513  m_outerIndex[j] = k;
514  Index end = m_outerIndex[j+1];
515  for(Index i=previousStart; i<end; ++i)
516  {
517  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
518  {
519  m_data.value(k) = m_data.value(i);
520  m_data.index(k) = m_data.index(i);
521  ++k;
522  }
523  }
524  }
525  m_outerIndex[m_outerSize] = k;
526  m_data.resize(k,0);
527  }
528 
532  void conservativeResize(Index rows, Index cols)
533  {
534  // No change
535  if (this->rows() == rows && this->cols() == cols) return;
536 
537  // If one dimension is null, then there is nothing to be preserved
538  if(rows==0 || cols==0) return resize(rows,cols);
539 
540  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
541  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
542  Index newInnerSize = IsRowMajor ? cols : rows;
543 
544  // Deals with inner non zeros
545  if (m_innerNonZeros)
546  {
547  // Resize m_innerNonZeros
548  Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
549  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
550  m_innerNonZeros = newInnerNonZeros;
551 
552  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
553  m_innerNonZeros[i] = 0;
554  }
555  else if (innerChange < 0)
556  {
557  // Inner size decreased: allocate a new m_innerNonZeros
558  m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
559  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
560  for(Index i = 0; i < m_outerSize; i++)
561  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
562  }
563 
564  // Change the m_innerNonZeros in case of a decrease of inner size
565  if (m_innerNonZeros && innerChange < 0)
566  {
567  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
568  {
569  Index &n = m_innerNonZeros[i];
570  Index start = m_outerIndex[i];
571  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
572  }
573  }
574 
575  m_innerSize = newInnerSize;
576 
577  // Re-allocate outer index structure if necessary
578  if (outerChange == 0)
579  return;
580 
581  Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
582  if (!newOuterIndex) internal::throw_std_bad_alloc();
583  m_outerIndex = newOuterIndex;
584  if (outerChange > 0)
585  {
586  Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
587  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
588  m_outerIndex[i] = last;
589  }
590  m_outerSize += outerChange;
591  }
592 
596  void resize(Index rows, Index cols)
597  {
598  const Index outerSize = IsRowMajor ? rows : cols;
599  m_innerSize = IsRowMajor ? cols : rows;
600  m_data.clear();
601  if (m_outerSize != outerSize || m_outerSize==0)
602  {
603  std::free(m_outerIndex);
604  m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
605  if (!m_outerIndex) internal::throw_std_bad_alloc();
606 
607  m_outerSize = outerSize;
608  }
609  if(m_innerNonZeros)
610  {
611  std::free(m_innerNonZeros);
612  m_innerNonZeros = 0;
613  }
614  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
615  }
616 
619  void resizeNonZeros(Index size)
620  {
621  // TODO remove this function
622  m_data.resize(size);
623  }
624 
626  const Diagonal<const SparseMatrix> diagonal() const { return *this; }
627 
629  inline SparseMatrix()
630  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
631  {
632  check_template_parameters();
633  resize(0, 0);
634  }
635 
637  inline SparseMatrix(Index rows, Index cols)
638  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
639  {
640  check_template_parameters();
641  resize(rows, cols);
642  }
643 
645  template<typename OtherDerived>
647  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
648  {
649  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
650  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
651  check_template_parameters();
652  *this = other.derived();
653  }
654 
656  template<typename OtherDerived, unsigned int UpLo>
658  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
659  {
660  check_template_parameters();
661  *this = other;
662  }
663 
665  inline SparseMatrix(const SparseMatrix& other)
666  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
667  {
668  check_template_parameters();
669  *this = other.derived();
670  }
671 
673  template<typename OtherDerived>
674  SparseMatrix(const ReturnByValue<OtherDerived>& other)
675  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
676  {
677  check_template_parameters();
678  initAssignment(other);
679  other.evalTo(*this);
680  }
681 
684  inline void swap(SparseMatrix& other)
685  {
686  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
687  std::swap(m_outerIndex, other.m_outerIndex);
688  std::swap(m_innerSize, other.m_innerSize);
689  std::swap(m_outerSize, other.m_outerSize);
690  std::swap(m_innerNonZeros, other.m_innerNonZeros);
691  m_data.swap(other.m_data);
692  }
693 
696  inline void setIdentity()
697  {
698  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
699  this->m_data.resize(rows());
700  Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
701  Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
702  Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
703  std::free(m_innerNonZeros);
704  m_innerNonZeros = 0;
705  }
706  inline SparseMatrix& operator=(const SparseMatrix& other)
707  {
708  if (other.isRValue())
709  {
710  swap(other.const_cast_derived());
711  }
712  else if(this!=&other)
713  {
714  initAssignment(other);
715  if(other.isCompressed())
716  {
717  memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
718  m_data = other.m_data;
719  }
720  else
721  {
722  Base::operator=(other);
723  }
724  }
725  return *this;
726  }
727 
728  #ifndef EIGEN_PARSED_BY_DOXYGEN
729  template<typename Lhs, typename Rhs>
730  inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
731  { return Base::operator=(product); }
732 
733  template<typename OtherDerived>
734  inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
735  {
736  initAssignment(other);
737  return Base::operator=(other.derived());
738  }
739 
740  template<typename OtherDerived>
741  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
742  { return Base::operator=(other.derived()); }
743  #endif
744 
745  template<typename OtherDerived>
746  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
747 
748  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
749  {
750  EIGEN_DBG_SPARSE(
751  s << "Nonzero entries:\n";
752  if(m.isCompressed())
753  for (Index i=0; i<m.nonZeros(); ++i)
754  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
755  else
756  for (Index i=0; i<m.outerSize(); ++i)
757  {
758  Index p = m.m_outerIndex[i];
759  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
760  Index k=p;
761  for (; k<pe; ++k)
762  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
763  for (; k<m.m_outerIndex[i+1]; ++k)
764  s << "(_,_) ";
765  }
766  s << std::endl;
767  s << std::endl;
768  s << "Outer pointers:\n";
769  for (Index i=0; i<m.outerSize(); ++i)
770  s << m.m_outerIndex[i] << " ";
771  s << " $" << std::endl;
772  if(!m.isCompressed())
773  {
774  s << "Inner non zeros:\n";
775  for (Index i=0; i<m.outerSize(); ++i)
776  s << m.m_innerNonZeros[i] << " ";
777  s << " $" << std::endl;
778  }
779  s << std::endl;
780  );
781  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
782  return s;
783  }
784 
786  inline ~SparseMatrix()
787  {
788  std::free(m_outerIndex);
789  std::free(m_innerNonZeros);
790  }
791 
792 #ifndef EIGEN_PARSED_BY_DOXYGEN
793 
794  Scalar sum() const;
795 #endif
796 
797 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
798 # include EIGEN_SPARSEMATRIX_PLUGIN
799 # endif
800 
801 protected:
802 
803  template<typename Other>
804  void initAssignment(const Other& other)
805  {
806  resize(other.rows(), other.cols());
807  if(m_innerNonZeros)
808  {
809  std::free(m_innerNonZeros);
810  m_innerNonZeros = 0;
811  }
812  }
813 
816  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
817 
820  class SingletonVector
821  {
822  Index m_index;
823  Index m_value;
824  public:
825  typedef Index value_type;
826  SingletonVector(Index i, Index v)
827  : m_index(i), m_value(v)
828  {}
829 
830  Index operator[](Index i) const { return i==m_index ? m_value : 0; }
831  };
832 
835  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
836 
837 public:
840  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
841  {
842  const Index outer = IsRowMajor ? row : col;
843  const Index inner = IsRowMajor ? col : row;
844 
845  eigen_assert(!isCompressed());
846  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
847 
848  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
849  m_data.index(p) = inner;
850  return (m_data.value(p) = 0);
851  }
852 
853 private:
854  static void check_template_parameters()
855  {
856  EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
857  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
858  }
859 
860  struct default_prunning_func {
861  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
862  inline bool operator() (const Index&, const Index&, const Scalar& value) const
863  {
864  return !internal::isMuchSmallerThan(value, reference, epsilon);
865  }
866  Scalar reference;
867  RealScalar epsilon;
868  };
869 };
870 
871 template<typename Scalar, int _Options, typename _Index>
872 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
873 {
874  public:
875  InnerIterator(const SparseMatrix& mat, Index outer)
876  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
877  {
878  if(mat.isCompressed())
879  m_end = mat.m_outerIndex[outer+1];
880  else
881  m_end = m_id + mat.m_innerNonZeros[outer];
882  }
883 
884  inline InnerIterator& operator++() { m_id++; return *this; }
885 
886  inline const Scalar& value() const { return m_values[m_id]; }
887  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
888 
889  inline Index index() const { return m_indices[m_id]; }
890  inline Index outer() const { return m_outer; }
891  inline Index row() const { return IsRowMajor ? m_outer : index(); }
892  inline Index col() const { return IsRowMajor ? index() : m_outer; }
893 
894  inline operator bool() const { return (m_id < m_end); }
895 
896  protected:
897  const Scalar* m_values;
898  const Index* m_indices;
899  const Index m_outer;
900  Index m_id;
901  Index m_end;
902 };
903 
904 template<typename Scalar, int _Options, typename _Index>
905 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
906 {
907  public:
908  ReverseInnerIterator(const SparseMatrix& mat, Index outer)
909  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
910  {
911  if(mat.isCompressed())
912  m_id = mat.m_outerIndex[outer+1];
913  else
914  m_id = m_start + mat.m_innerNonZeros[outer];
915  }
916 
917  inline ReverseInnerIterator& operator--() { --m_id; return *this; }
918 
919  inline const Scalar& value() const { return m_values[m_id-1]; }
920  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
921 
922  inline Index index() const { return m_indices[m_id-1]; }
923  inline Index outer() const { return m_outer; }
924  inline Index row() const { return IsRowMajor ? m_outer : index(); }
925  inline Index col() const { return IsRowMajor ? index() : m_outer; }
926 
927  inline operator bool() const { return (m_id > m_start); }
928 
929  protected:
930  const Scalar* m_values;
931  const Index* m_indices;
932  const Index m_outer;
933  Index m_id;
934  const Index m_start;
935 };
936 
937 namespace internal {
938 
939 template<typename InputIterator, typename SparseMatrixType>
940 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
941 {
942  EIGEN_UNUSED_VARIABLE(Options);
943  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
944  typedef typename SparseMatrixType::Scalar Scalar;
945  typedef typename SparseMatrixType::Index Index;
946  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
947 
948  if(begin!=end)
949  {
950  // pass 1: count the nnz per inner-vector
951  Matrix<Index,Dynamic,1> wi(trMat.outerSize());
952  wi.setZero();
953  for(InputIterator it(begin); it!=end; ++it)
954  {
955  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
956  wi(IsRowMajor ? it->col() : it->row())++;
957  }
958 
959  // pass 2: insert all the elements into trMat
960  trMat.reserve(wi);
961  for(InputIterator it(begin); it!=end; ++it)
962  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
963 
964  // pass 3:
965  trMat.sumupDuplicates();
966  }
967 
968  // pass 4: transposed copy -> implicit sorting
969  mat = trMat;
970 }
971 
972 }
973 
974 
1012 template<typename Scalar, int _Options, typename _Index>
1013 template<typename InputIterators>
1014 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1015 {
1016  internal::set_from_triplets(begin, end, *this);
1017 }
1018 
1020 template<typename Scalar, int _Options, typename _Index>
1022 {
1023  eigen_assert(!isCompressed());
1024  // TODO, in practice we should be able to use m_innerNonZeros for that task
1025  Matrix<Index,Dynamic,1> wi(innerSize());
1026  wi.fill(-1);
1027  Index count = 0;
1028  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1029  for(Index j=0; j<outerSize(); ++j)
1030  {
1031  Index start = count;
1032  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1033  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1034  {
1035  Index i = m_data.index(k);
1036  if(wi(i)>=start)
1037  {
1038  // we already meet this entry => accumulate it
1039  m_data.value(wi(i)) += m_data.value(k);
1040  }
1041  else
1042  {
1043  m_data.value(count) = m_data.value(k);
1044  m_data.index(count) = m_data.index(k);
1045  wi(i) = count;
1046  ++count;
1047  }
1048  }
1049  m_outerIndex[j] = start;
1050  }
1051  m_outerIndex[m_outerSize] = count;
1052 
1053  // turn the matrix into compressed form
1054  std::free(m_innerNonZeros);
1055  m_innerNonZeros = 0;
1056  m_data.resize(m_outerIndex[m_outerSize]);
1057 }
1058 
1059 template<typename Scalar, int _Options, typename _Index>
1060 template<typename OtherDerived>
1061 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
1062 {
1063  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1064  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1065 
1066  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
1067  if (needToTranspose)
1068  {
1069  // two passes algorithm:
1070  // 1 - compute the number of coeffs per dest inner vector
1071  // 2 - do the actual copy/eval
1072  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1073  typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
1074  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1075  OtherCopy otherCopy(other.derived());
1076 
1077  SparseMatrix dest(other.rows(),other.cols());
1078  Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
1079 
1080  // pass 1
1081  // FIXME the above copy could be merged with that pass
1082  for (Index j=0; j<otherCopy.outerSize(); ++j)
1083  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1084  ++dest.m_outerIndex[it.index()];
1085 
1086  // prefix sum
1087  Index count = 0;
1088  Matrix<Index,Dynamic,1> positions(dest.outerSize());
1089  for (Index j=0; j<dest.outerSize(); ++j)
1090  {
1091  Index tmp = dest.m_outerIndex[j];
1092  dest.m_outerIndex[j] = count;
1093  positions[j] = count;
1094  count += tmp;
1095  }
1096  dest.m_outerIndex[dest.outerSize()] = count;
1097  // alloc
1098  dest.m_data.resize(count);
1099  // pass 2
1100  for (Index j=0; j<otherCopy.outerSize(); ++j)
1101  {
1102  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1103  {
1104  Index pos = positions[it.index()]++;
1105  dest.m_data.index(pos) = j;
1106  dest.m_data.value(pos) = it.value();
1107  }
1108  }
1109  this->swap(dest);
1110  return *this;
1111  }
1112  else
1113  {
1114  if(other.isRValue())
1115  initAssignment(other.derived());
1116  // there is no special optimization
1117  return Base::operator=(other.derived());
1118  }
1119 }
1120 
1121 template<typename _Scalar, int _Options, typename _Index>
1122 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1123 {
1124  eigen_assert(!isCompressed());
1125 
1126  const Index outer = IsRowMajor ? row : col;
1127  const Index inner = IsRowMajor ? col : row;
1128 
1129  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1130  Index innerNNZ = m_innerNonZeros[outer];
1131  if(innerNNZ>=room)
1132  {
1133  // this inner vector is full, we need to reallocate the whole buffer :(
1134  reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
1135  }
1136 
1137  Index startId = m_outerIndex[outer];
1138  Index p = startId + m_innerNonZeros[outer];
1139  while ( (p > startId) && (m_data.index(p-1) > inner) )
1140  {
1141  m_data.index(p) = m_data.index(p-1);
1142  m_data.value(p) = m_data.value(p-1);
1143  --p;
1144  }
1145  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
1146 
1147  m_innerNonZeros[outer]++;
1148 
1149  m_data.index(p) = inner;
1150  return (m_data.value(p) = 0);
1151 }
1152 
1153 template<typename _Scalar, int _Options, typename _Index>
1154 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1155 {
1156  eigen_assert(isCompressed());
1157 
1158  const Index outer = IsRowMajor ? row : col;
1159  const Index inner = IsRowMajor ? col : row;
1160 
1161  Index previousOuter = outer;
1162  if (m_outerIndex[outer+1]==0)
1163  {
1164  // we start a new inner vector
1165  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1166  {
1167  m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
1168  --previousOuter;
1169  }
1170  m_outerIndex[outer+1] = m_outerIndex[outer];
1171  }
1172 
1173  // here we have to handle the tricky case where the outerIndex array
1174  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1175  // the 2nd inner vector...
1176  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1177  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1178 
1179  size_t startId = m_outerIndex[outer];
1180  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1181  size_t p = m_outerIndex[outer+1];
1182  ++m_outerIndex[outer+1];
1183 
1184  double reallocRatio = 1;
1185  if (m_data.allocatedSize()<=m_data.size())
1186  {
1187  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1188  if (m_data.size()==0)
1189  {
1190  m_data.reserve(32);
1191  }
1192  else
1193  {
1194  // we need to reallocate the data, to reduce multiple reallocations
1195  // we use a smart resize algorithm based on the current filling ratio
1196  // in addition, we use double to avoid integers overflows
1197  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1198  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1199  // furthermore we bound the realloc ratio to:
1200  // 1) reduce multiple minor realloc when the matrix is almost filled
1201  // 2) avoid to allocate too much memory when the matrix is almost empty
1202  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1203  }
1204  }
1205  m_data.resize(m_data.size()+1,reallocRatio);
1206 
1207  if (!isLastVec)
1208  {
1209  if (previousOuter==-1)
1210  {
1211  // oops wrong guess.
1212  // let's correct the outer offsets
1213  for (Index k=0; k<=(outer+1); ++k)
1214  m_outerIndex[k] = 0;
1215  Index k=outer+1;
1216  while(m_outerIndex[k]==0)
1217  m_outerIndex[k++] = 1;
1218  while (k<=m_outerSize && m_outerIndex[k]!=0)
1219  m_outerIndex[k++]++;
1220  p = 0;
1221  --k;
1222  k = m_outerIndex[k]-1;
1223  while (k>0)
1224  {
1225  m_data.index(k) = m_data.index(k-1);
1226  m_data.value(k) = m_data.value(k-1);
1227  k--;
1228  }
1229  }
1230  else
1231  {
1232  // we are not inserting into the last inner vec
1233  // update outer indices:
1234  Index j = outer+2;
1235  while (j<=m_outerSize && m_outerIndex[j]!=0)
1236  m_outerIndex[j++]++;
1237  --j;
1238  // shift data of last vecs:
1239  Index k = m_outerIndex[j]-1;
1240  while (k>=Index(p))
1241  {
1242  m_data.index(k) = m_data.index(k-1);
1243  m_data.value(k) = m_data.value(k-1);
1244  k--;
1245  }
1246  }
1247  }
1248 
1249  while ( (p > startId) && (m_data.index(p-1) > inner) )
1250  {
1251  m_data.index(p) = m_data.index(p-1);
1252  m_data.value(p) = m_data.value(p-1);
1253  --p;
1254  }
1255 
1256  m_data.index(p) = inner;
1257  return (m_data.value(p) = 0);
1258 }
1259 
1260 } // end namespace Eigen
1261 
1262 #endif // EIGEN_SPARSEMATRIX_H
Index rows() const
Definition: SparseMatrix.h:119
Index * outerIndexPtr()
Definition: SparseMatrix.h:153
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:171
Index cols() const
Definition: SparseMatrix.h:121
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:596
const Scalar * valuePtr() const
Definition: SparseMatrix.h:131
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:532
A versatible sparse matrix representation.
Definition: SparseMatrix.h:85
bool isCompressed() const
Definition: SparseMatrix.h:116
RowXpr row(Index i)
Definition: SparseMatrixBase.h:750
const Diagonal< const SparseMatrix > diagonal() const
Definition: SparseMatrix.h:626
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:684
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:104
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:637
void makeCompressed()
Definition: SparseMatrix.h:449
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:49
const Index * innerNonZeroPtr() const
Definition: SparseMatrix.h:158
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:88
const int Dynamic
Definition: Constants.h:21
Definition: Constants.h:264
const Index * outerIndexPtr() const
Definition: SparseMatrix.h:149
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:189
void setIdentity()
Definition: SparseMatrix.h:696
Index * innerNonZeroPtr()
Definition: SparseMatrix.h:162
Index nonZeros() const
Definition: SparseMatrix.h:246
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:239
Derived & derived()
Definition: EigenBase.h:34
const unsigned int LvalueBit
Definition: Constants.h:131
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:657
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:665
Index size() const
Definition: SparseMatrixBase.h:164
Index * innerIndexPtr()
Definition: SparseMatrix.h:144
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:220
Index outerSize() const
Definition: SparseMatrix.h:126
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:674
SparseMatrix()
Definition: SparseMatrix.h:629
~SparseMatrix()
Definition: SparseMatrix.h:786
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1014
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:646
Definition: Constants.h:266
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:490
const unsigned int RowMajorBit
Definition: Constants.h:53
Scalar * valuePtr()
Definition: SparseMatrix.h:135
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:64
Index innerSize() const
Definition: SparseMatrix.h:124
The matrix class, also used for vectors and row-vectors.
Definition: Matrix.h:127
void reserve(Index reserveSize)
Definition: SparseMatrix.h:256
Sparse matrix.
Definition: MappedSparseMatrix.h:31
void uncompress()
Definition: SparseMatrix.h:478
void setZero()
Definition: SparseMatrix.h:237
const Index * innerIndexPtr() const
Definition: SparseMatrix.h:140
ColXpr col(Index i)
Definition: SparseMatrixBase.h:733
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:503