*** empty log message ***

This commit is contained in:
jiri 2010-01-07 16:10:12 +00:00
parent bd843de786
commit 5f5f0343a6
7 changed files with 517 additions and 27 deletions

View File

@ -18,7 +18,7 @@
//
//for autotools
//
#include "config.h"
//#include "config.h" //this would force the user of the library to have config.h
////////////////////////////////////////////////////////////////////////////
//LA traits classes and generally needed includes
@ -219,6 +219,8 @@ static void copy(complex<C> *dest, complex<C> *src, unsigned int n) {memcpy(dest
static void clear(complex<C> *dest, unsigned int n) {memset(dest,0,n*sizeof(complex<C>));}
static void copyonwrite(complex<C> &x) {};
static void clearme(complex<C> &x) {x=0;};
static inline complex<C> conjugate(complex<C> &x) {return complex<C>(x.real(),-x.imag());};
static inline C realpart(complex<C> &x) {return x.real();}
};
//non-complex scalars
@ -241,6 +243,8 @@ static void copy(C *dest, C *src, unsigned int n) {memcpy(dest,src,n*sizeof(C));
static void clear(C *dest, unsigned int n) {memset(dest,0,n*sizeof(C));}
static void copyonwrite(C &x) {};
static void clearme(complex<C> &x) {x=0;};
static inline C conjugate(C&x) {return x;};
static inline C realpart(C &x) {return x;}
};
@ -260,10 +264,10 @@ static inline bool bigger(const C &x, const C &y) {return x>y;} \
static inline bool smaller(const C &x, const C &y) {return x<y;} \
static inline normtype norm (const X<C> &x) {return x.norm();} \
static inline void axpy (X<C>&s, const X<C> &x, const C c) {s.axpy(c,x);} \
static void put(int fd, const C &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions,transp);} \
static void get(int fd, C &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions,transp);} \
static void multiput(unsigned int n,int fd, const C *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(unsigned int n,int fd, C *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions,transp);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions,transp);} \
static void multiput(unsigned int n,int fd, const X<C> *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(unsigned int n,int fd, X<C> *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void copy(C *dest, C *src, unsigned int n) {for(unsigned int i=0; i<n; ++i) dest[i]=src[i];} \
static void clear(C *dest, unsigned int n) {for(unsigned int i=0; i<n; ++i) dest[i].clear();}\
static void copyonwrite(X<C> &x) {x.copyonwrite();}\
@ -292,10 +296,10 @@ static inline bool bigger(const C &x, const C &y) {return x>y;} \
static inline bool smaller(const C &x, const C &y) {return x<y;} \
static inline normtype norm (const X<C> &x) {return x.norm();} \
static inline void axpy (X<C>&s, const X<C> &x, const C c) {s.axpy(c,x);} \
static void put(int fd, const C &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions);} \
static void get(int fd, C &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions);} \
static void multiput(unsigned int n,int fd, const C *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(unsigned int n,int fd, C *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions);} \
static void multiput(unsigned int n,int fd, const X<C> *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(unsigned int n,int fd, X<C> *x, bool dimensions=1) {for(unsigned int i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void copy(C *dest, C *src, unsigned int n) {for(unsigned int i=0; i<n; ++i) dest[i]=src[i];} \
static void clear(C *dest, unsigned int n) {for(unsigned int i=0; i<n; ++i) dest[i].clear();} \
static void copyonwrite(X<C> &x) {x.copyonwrite();} \

3
mat.h
View File

@ -128,7 +128,9 @@ public:
const T trace() const;
//members concerning sparse matrix
SparseSMat<T> & operator*(const SparseSMat<T> &rhs) const;
explicit NRMat(const SparseMat<T> &rhs); // dense from sparse
explicit NRMat(const SparseSMat<T> &rhs); // dense from sparse
NRMat & operator+=(const SparseMat<T> &rhs);
NRMat & operator-=(const SparseMat<T> &rhs);
void gemm(const T &beta, const SparseMat<T> &a, const char transa, const NRMat &b, const char transb, const T &alpha);//this = alpha*op( A )*op( B ) + beta*this
@ -146,6 +148,7 @@ public:
#include "vec.h"
#include "smat.h"
#include "sparsemat.h"
#include "sparsesmat.h"
namespace LA {
// ctors

View File

@ -949,6 +949,50 @@ return r;
}
//Cholesky interface
extern "C" void FORNAME(dpotrf)(const char *UPLO, const int *N, double *A, const int *LDA, int *INFO);
extern "C" void FORNAME(zpotrf)(const char *UPLO, const int *N, complex<double> *A, const int *LDA, int *INFO);
void cholesky(NRMat<double> &a, bool upper)
{
if(a.nrows()!=a.ncols()) laerror("matrix must be square in Cholesky");
int lda=a.ncols();
int n=a.nrows();
char uplo=upper?'U':'L';
int info;
a.copyonwrite();
FORNAME(dpotrf)(&uplo, &n, a, &lda, &info);
if(info) {std::cerr << "Lapack error "<<info<<std::endl; laerror("error in Cholesky");}
//zero the other triangle and switch to C array order
if(upper)
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(j,i)=a(i,j); a(i,j)=0.;}
else
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=a(j,i); a(j,i)=0.;}
}
void cholesky(NRMat<complex<double> > &a, bool upper)
{
if(a.nrows()!=a.ncols()) laerror("matrix must be square in Cholesky");
int lda=a.ncols();
int n=a.nrows();
char uplo=upper?'U':'L';
int info;
a.copyonwrite();
a.transposeme();//switch to Fortran order
FORNAME(zpotrf)(&uplo, &n, a, &lda, &info);
if(info) {std::cerr << "Lapack error "<<info<<std::endl; laerror("error in Cholesky");}
//zero the other triangle and switch to C array order
if(upper)
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(j,i)=a(i,j); a(i,j)=0.;}
else
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=a(j,i); a(j,i)=0.;}
}
#ifdef obsolete
void gendiagonalize(NRMat<double> &a, NRVec<double> &w, NRMat<double> b, int n)

View File

@ -135,6 +135,10 @@ extern const NRMat< complex<double> > realmatrix (const NRMat<double>&);
extern const NRMat< complex<double> > imagmatrix (const NRMat<double>&);
extern const NRMat< complex<double> > complexmatrix (const NRMat<double>&, const NRMat<double>&);
//Cholesky decomposition
extern void cholesky(NRMat<double> &a, bool upper=1);
extern void cholesky(NRMat<complex<double> > &a, bool upper=1);
//inverse by means of linear solve, preserving rhs intact
template<typename T>
const NRMat<T> inverse(NRMat<T> a, T *det=0)

View File

@ -27,6 +27,45 @@
namespace LA {
//dense times sparse (not necessarily symmetric)
template <typename T>
SparseSMat<T> & NRMat<T>::operator*(const SparseSMat<T> &rhs) const
{
SparseSMat<T> r(nn,rhs.ncols());
if(mm!=rhs.nrows()) laerror("incompatible sizes in NRMat*SparseSMat");
for(SPMatindex k=0; k<nn; ++k) //summation loop
{
std::map<SPMatindex,T> * kl = rhs.line(k);
if(kl)
{
//gather the data
typename std::map<SPMatindex,T>::iterator p;
int i,j;
NRVec<T> kline(kl->size());
NRVec<SPMatindex> klineind(kl->size());
for(p=kl->begin(), i=0; p!=kl->end(); ++p,++i)
{
klineind[i] = p->first;
kline[i] = p->second;
}
NRVec<T> kcol = column(k);
//multiply
NRMat<T> prod=kcol.otimes(kline,false,1.);
//scatter the results
for(i=0; i<prod.nrows(); ++i) for(j=0; j<prod.ncols(); ++j)
add(i,klineind[j],prod(i,j),false);
}
}
r.simplify();
return r;
}
//matrix is assummed symmetric, no transposition, but possibly make conjugation
template <typename T>
void SparseSMat<T>::gemm(const T beta, const SparseSMat &a, const char transa, const SparseSMat &b, const char transb, const T alpha)
{
@ -46,16 +85,14 @@ for(SPMatindex k=0; k<nn; ++k) //summation loop
//gather the data
typename std::map<SPMatindex,T>::iterator p;
int i,j;
for(p=a.v[k]->begin(), i=0; p!=a.v[k]->end(); ++p,++i)
{
ai[i] = p->first;
av[i] = p->second;
}
for(p=b.v[k]->begin(), i=0; p!=b.v[k]->end(); ++p,++i)
{
bi[i] = p->first;
bv[i] = p->second;
}
if(tolower(transa)=='c')
for(p=a.v[k]->begin(), i=0; p!=a.v[k]->end(); ++p,++i) { ai[i] = p->first; av[i] = LA_traits<T>::conjugate(p->second); }
else
for(p=a.v[k]->begin(), i=0; p!=a.v[k]->end(); ++p,++i) { ai[i] = p->first; av[i] = p->second; }
if(tolower(transb)=='c')
for(p=b.v[k]->begin(), i=0; p!=b.v[k]->end(); ++p,++i) { bi[i] = p->first; bv[i] = LA_traits<T>::conjugate(p->second); }
else
for(p=b.v[k]->begin(), i=0; p!=b.v[k]->end(); ++p,++i) { bi[i] = p->first; bv[i] = p->second; }
//make multiply via blas
NRMat<T> prod=av.otimes(bv,false,alpha);
@ -96,8 +133,13 @@ copyonwrite();
for(SPMatindex i=0; i<nn; ++i) if(x.v[i])
{
if(!v[i]) v[i] = new std::map<SPMatindex,T>;
typename std::map<SPMatindex,T>::iterator p;
for(p=x.v[i]->begin(); p!=x.v[i]->end(); ++p) (*v[i])[p->first] = p->second * alpha;
typename std::map<SPMatindex,T>::iterator p,q;
for(p=x.v[i]->begin(); p!=x.v[i]->end(); ++p)
{
q=v[i]->find(p->first);
if(q!=v[i]->end()) q->second += p->second * alpha;
else (*v[i])[p->first] = p->second * alpha;
}
}
simplify();
}
@ -165,22 +207,101 @@ for(SPMatindex i=0; i<nn; ++i)
return *this;
}
template <class T>
typename LA_traits<T>::normtype SparseSMat<T>::norm(const T scalar) const
{
typename LA_traits<T>::normtype sum=0;
for(SPMatindex i=0; i<nn; ++i)
if(v[i]) //line present
{
typename std::map<SPMatindex,T>::iterator p;
bool diagonal_present=false;
for(p=v[i]->begin(); p!=v[i]->end(); ++p) //loop over all existing elements
{
if(i==p->first) {diagonal_present=true; sum += LA_traits<T>::sqrabs(p->second - scalar);}
else sum += LA_traits<T>::sqrabs(p->second);
}
if(!diagonal_present) sum += LA_traits<T>::sqrabs(scalar); //there was zero on the diagonal
}
else sum += LA_traits<T>::sqrabs(scalar); //missing whole line, subtracted diagonal element contributes
return std::sqrt(sum);
}
//get diagonal, do not construct a new object, but store in existing one
template <class T>
const T* SparseSMat<T>::diagonalof(NRVec<T> &r, const bool divide, bool cache) const
{
if(nn!=r.size()) laerror("incompatible vector size in diagonalof()");
NRVec<T> *rr;
r.copyonwrite();
if(divide) {rr=new NRVec<T>(nn); *rr=(T)0;}
else {r=(T)0; rr=&r;}
for(SPMatindex i=0; i<nn; ++i)
if(v[i])
{
typename std::map<SPMatindex,T>::iterator p;
p= v[i]->find(i);
if(p!=v[i]->end()) sum += LA_traits<T>::sqrabs(p->second - scalar);
else sum += LA_traits<T>::sqrabs(scalar);
if(p!=v[i]->end()) (*rr)[i] += p->second;
}
else sum += LA_traits<T>::sqrabs(scalar); //missing diagonal element
if(divide)
{
for(unsigned int i=0; i<nn; ++i) if((*rr)[i]!=0.) r[i]/=(*rr)[i];
delete rr;
}
return divide?NULL:&r[0];
}
return std::sqrt(sum);
template <class T>
void SparseSMat<T>::get(int fd, bool dimen, bool transp) {
errno=0;
SPMatindex dim[2];
if(dimen) {
if(2*sizeof(SPMatindex)!=read(fd,&dim,2*sizeof(SPMatindex))) laerror("read() error in SparseSMat::get ");
if(dim[0]!=dim[1]) laerror("SparseSMat must be square (nonsquare read in ::get)");
resize(dim[0]);
}
else copyonwrite();
do {
if(2*sizeof(SPMatindex)!=read(fd,&dim,2*sizeof(SPMatindex))) laerror("read() error 2 in SparseSMat::get");
if(dim[0]==(SPMatindex) -1 || dim[1]==(SPMatindex) -1) break;
typename LA_traits_io<T>::IOtype tmp;
LA_traits<T>::get(fd,tmp,dimen,transp); // general way to work when elem is some complex class again
if(transp) add(dim[0],dim[1],tmp,false); else add(dim[1],dim[0],tmp,false);
}
while(1);
}
template <class T>
void SparseSMat<T>::put(int fd, bool dimen, bool transp) const {
errno=0;
if(dimen) {
if(sizeof(SPMatindex)!=write(fd,&nn,sizeof(SPMatindex))) laerror("cannot write() 1 in SparseSMat::put");
if(sizeof(SPMatindex)!=write(fd,&nn,sizeof(SPMatindex))) laerror("cannot write() 2 in SparseSMat::put");
}
typename SparseSMat<T>::iterator p(*this);
for(; p.notend(); ++p) {
if(sizeof(SPMatindex)!=write(fd,&(p->row),sizeof(SPMatindex))) laerror("cannot write() 3 in SparseSMat::put");
if(sizeof(SPMatindex)!=write(fd,&(p->col),sizeof(SPMatindex))) laerror("cannot write() 4 in SparseSMat::put");
typename LA_traits_io<T>::IOtype tmp = p->elem;
LA_traits<T>::put(fd,tmp,dimen,transp); // general way to work when elem is some non-scalar class again
}
SPMatindex sentinel[2];
sentinel[0] = sentinel[1] = (SPMatindex) -1;
if(2*sizeof(SPMatindex) != write(fd,&sentinel,2*sizeof(SPMatindex))) laerror("cannot write() 5 in SparseSMat::put");
}
@ -195,6 +316,9 @@ template SparseSMat<T> & SparseSMat<T>::operator=(const T &a); \
template SparseSMat<T> & SparseSMat<T>::operator+=(const T &a); \
template SparseSMat<T> & SparseSMat<T>::operator-=(const T &a); \
template LA_traits<T>::normtype SparseSMat<T>::norm(const T scalar) const; \
template const T* SparseSMat<T>::diagonalof(NRVec<T> &r, const bool divide, bool cache) const; \
template void SparseSMat<T>::get(int fd, bool dimen, bool transp); \
template void SparseSMat<T>::put(int fd, bool dimen, bool transp) const; \
INSTANTIZE(double)
@ -205,4 +329,10 @@ INSTANTIZE(complex<double>)
template class SparseSMat<double>;
template class SparseSMat<complex<double> >;
/*activate this if needed
template void SparseSMat<NRMat<double> >::put(int fd, bool dimen, bool transp) const;
template void SparseSMat<NRMat<double> >::get(int fd, bool dimen, bool transp);
*/
}//namespace

View File

@ -31,10 +31,13 @@
#include "vec.h"
#include "mat.h"
#include "smat.h"
#include "qsort.h"
#include <map>
#include <list>
#define CHOLESKYEPSILON 1e-16
namespace LA {
//symmetric sparse matrix class with a representation for efficient exponentiatiation
@ -55,9 +58,11 @@ public:
SparseSMat(const SparseSMat &rhs);
explicit SparseSMat(const SparseMat<T> &rhs);
explicit SparseSMat(const NRSMat<T> &rhs);
explicit SparseSMat(const NRMat<T> &rhs);
SparseSMat & operator=(const SparseSMat &rhs);
void copyonwrite();
void resize(const SPMatindex n);
std::map<SPMatindex,T> *line(SPMatindex n) const {return v[n];};
void clear() {resize(nn);}
unsigned long long simplify();
~SparseSMat();
@ -74,15 +79,20 @@ public:
void axpy(const T alpha, const SparseSMat &x, const bool transp=0); // this+= a*x
inline SparseSMat & operator+=(const SparseSMat &rhs) {axpy((T)1,rhs); return *this;};
inline SparseSMat & operator-=(const SparseSMat &rhs) {axpy((T)-1,rhs); return *this;};
const T* diagonalof(NRVec<T> &, const bool divide=0, bool cache=false) const; //get diagonal
void gemv(const T beta, NRVec<T> &r, const char trans, const T alpha, const NRVec<T> &x) const;
inline const NRVec<T> operator*(const NRVec<T> &rhs) const {NRVec<T> result(nn); this->gemv((T)0,result,'n',(T)1,rhs); return result;};
typename LA_traits<T>::normtype norm(const T scalar=(T)0) const;
inline const SparseSMat operator*(const SparseSMat &rhs) const {SparseSMat<T> r(nn); r.gemm(0,*this,'n',rhs,'n',1); return r;}; //!!!NOT A GENERAL ROUTINE, JUST FOR THE CASES WHEN THE RESULT STAYS SYMMETRIC
void gemm(const T beta, const SparseSMat &a, const char transa, const SparseSMat &b, const char transb, const T alpha); //this := alpha*op( A )*op( B ) + beta*this !!!NOT A GENERAL ROUTINE, JUST FOR THE CASES WHEN THE RESULT STAYS SYMMETRIC
inline void add(const SPMatindex n, const SPMatindex m, const T elem, const bool both=true);
inline unsigned long long length() {return simplify();};
void transposeme() const {};
void get(int fd, bool dimen, bool transp);
void put(int fd, bool dimen, bool transp) const;
int nrows() const {return nn;}
int ncols() const {return nn;}
SparseSMat<T> cholesky(void) const;
class iterator {//not efficient, just for output to ostreams
private:
@ -145,6 +155,7 @@ public:
iterator end() const {return iterator(NULL);}
};
template <typename T>
SparseSMat<T>::SparseSMat(const SPMatindex n)
:nn(n),
@ -165,6 +176,19 @@ int i,j;
for(i=0; i<nn; ++i) for(j=0; j<=i; ++j) if(std::abs(rhs(i,j))>SPARSEEPSILON) (*this).add(i,j,rhs(i,j),true);
}
template <typename T>
SparseSMat<T>::SparseSMat(const NRMat<T> &rhs)
:nn(rhs.nrows()),
count(new int(1))
{
if(rhs.nrows()!=rhs.ncols()) laerror("non-square matrix in SparseSMat constructor from NRMat");
v= new std::map<SPMatindex,T> * [nn];
memset(v,0,nn*sizeof(std::map<SPMatindex,T> *));
int i,j;
for(i=0; i<nn; ++i) for(j=0; j<nn; ++j) if(std::abs(rhs(i,j))>SPARSEEPSILON) (*this).add(i,j,rhs(i,j),false);
}
template <typename T>
SparseSMat<T>::SparseSMat(const SparseSMat &rhs)
{
@ -189,6 +213,27 @@ for(; p.notend(); ++p) if(p->row <= p->col) (*this)(p->row,p->col)=p->elem;
#undef nn2
//construct dense from sparse
template <typename T>
NRMat<T>::NRMat(const SparseSMat<T> &rhs) :
nn(rhs.nrows()),
mm(rhs.ncols()),
count(new int(1))
{
#ifdef MATPTR
v = new T*[nn];
v[0] = new T[mm*nn];
for (int i=1; i<nn; i++) v[i] = v[i-1] + mm;
#else
v = new T[mm*nn];
#endif
memset(&(*this)(0,0),0,mm*nn*sizeof(T));
typename SparseSMat<T>::iterator p(rhs);
for(; p.notend(); ++p) (*this)(p->row,p->col)= p->elem;
}
template <typename T>
SparseSMat<T>::~SparseSMat()
{
@ -360,5 +405,133 @@ std::istream& operator>>(std::istream &s, SparseSMat<T> &x)
}
//Cholesky decomposition, pivoted, positive semidefinite, not in place
//it is NOT checked that the input matrix is symmetric/hermitean
//result.transpose(true)*result reproduces the original matrix
//Due to pivoting the result is upper triangular only before permutation
//
template <typename T>
SparseSMat<T> SparseSMat<T>::cholesky(void) const
{
//we need real values for sorting, if T is already real it makes just an unnecessary copy of one vector
NRVec<typename LA_traits<T>::normtype> diagreal(nn);
{
NRVec<T> diag(nn); diagonalof(diag);
for(SPMatindex i=0; i<nn; ++i) diagreal[i]=LA_traits<T>::realpart(diag[i]);
}
NRVec<int> pivot(nn);
for(int i=0; i<nn; ++i) pivot[i]=i;
//pivot by sorting
//!this is actually not fully correct approach, since the pivoting should be done during the Cholesky process
//Now it can happen that some elements will vanish in the process, while there will be some remaining ones later
diagreal.sort(1,0,nn-1,pivot);
//prepare inverse permutation
NRVec<int> invpivot(nn);
for(int i=0; i<nn; ++i) invpivot[pivot[i]]=i;
//std::cout <<"sorted diagonal\n"<<diagreal;
//std::cout <<"pivot\n"<<pivot;
//copy-permute upper triangle
SparseSMat<T> r;
r.nn=nn;
r.count = new int(1);
r.v = new std::map<SPMatindex,T> * [nn];
for(SPMatindex i=0; i<nn; ++i)
if(v[pivot[i]])
{
r.v[i]= new typename std::map<SPMatindex,T>;
typename std::map<SPMatindex,T>::iterator p;
for(p=v[pivot[i]]->begin(); p!=v[pivot[i]]->end(); ++p)
{
if(invpivot[p->first] >= i)
{
(*r.v[i])[invpivot[p->first]] = p->second;
}
}
}
else
r.v[i]= NULL;
//std::cout <<"Permuted upper triangle matrix\n"<<r;
//SparseSMat<T> r0(r);r.copyonwrite();
//perform complex, positive semidefinite Cholesky with thresholding by SPARSEEPSILON
SPMatindex i,j,k;
int rank=0;
for(k=0; k<nn; ++k)
if(r.v[k])
{
typename std::map<SPMatindex,T>::iterator p;
p= r.v[k]->find(k);
if(p==r.v[k]->end()) continue; //must not break due to the a priori pivoting
if(LA_traits<T>::realpart(p->second) < CHOLESKYEPSILON) continue; //must not break due to the a priori pivoting
++rank;
typename LA_traits<T>::normtype tmp = std::sqrt(LA_traits<T>::realpart(p->second));
p->second = tmp;
NRVec<T> linek(0.,nn);
for(p=r.v[k]->begin(); p!=r.v[k]->end(); ++p)
{
if(p->first > k) p->second /= tmp;
linek[p->first] = p->second;
}
for(j=k+1; j<nn; ++j)
if(r.v[j])
{
T akj = LA_traits<T>::conjugate(linek[j]);
NRVec<int> linek_used(0,nn);
if(std::abs(akj)>SPARSEEPSILON)
{
for(p=r.v[j]->begin(); p!=r.v[j]->end(); ++p)
{
p->second -= akj * linek[p->first];
linek_used[p->first]=1;
}
//subtract also elements nonzero in line k but non-existent in line j
for(i=j; i<nn; ++i)
if(!linek_used[i] && std::abs(linek[i]) > SPARSEEPSILON)
{
(*r.v[j])[i] = -akj * linek[i];
}
}
}
}
/*
SparseSMat<T> br(nn);
br.gemm(0,r,'c',r,'n',1);
//cancel low triangle from br
for(k=0; k<nn; ++k)
if(br.v[k])
{
typename std::map<SPMatindex,T>::iterator p;
for(p=br.v[k]->begin(); p!=br.v[k]->end(); ++p)
if(p->first <k) p->second=0.;
}
std::cout << "Error before permute = " <<(br-r0).norm()<<std::endl;
*/
//permute the result back;
for(k=0; k<nn; ++k)
if(r.v[k])
{
typename std::map<SPMatindex,T>::iterator p;
typename std::map<SPMatindex,T> *vnew = new typename std::map<SPMatindex,T>;
for(p=r.v[k]->begin(); p!=r.v[k]->end(); ++p)
{
if(std::abs(p->second) > SPARSEEPSILON) (*vnew)[pivot[p->first]] = p->second;
}
delete r.v[k];
r.v[k]=vnew;
}
return r;
}
}//namespace
#endif //_SPARSESMAT_H_

134
t.cc
View File

@ -1424,7 +1424,7 @@ NRMat<double> r2(rx);
cout <<"Error = "<<(r2-rd).norm()<<endl;
}
if(1)
if(0)
{
SparseSMat<complex<double> > h;
cin>>h;
@ -1442,5 +1442,137 @@ cout <<"errorx = "<<(r2-NRSMat<complex<double> >(r)).norm()<<endl;
}
}
if(0)
{
int n;
cin >>n;
NRMat<double> a(n,n);
a.randomize(1);
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=0.;}
cout <<a;
NRMat<double> bb=a.transpose()*a;
NRMat<double> cc(bb);
NRMat<double> b(bb);
NRMat<double> c(cc);
cholesky(b,0);
cout <<b;
cout << "Error = "<<(b*b.transpose()-bb).norm()<<endl;
cholesky(c,1);
cout <<c;
cout << "Error = "<<(c.transpose()*c-cc).norm()<<endl;
}
if(0)
{
int n;
cin >>n;
NRMat<complex<double> > a(n,n);
a.randomize(1);
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=0.;}
for(int i=0; i<n; ++i) {a(i,i).imag()=0.; if(a(i,i).real()<0) a(i,i).real() *= -1;}
cout <<a;
NRMat<complex<double> > bb=a.transpose(true)*a;
NRMat<complex<double> > cc(bb);
NRMat<complex<double> > b(bb);
NRMat<complex<double> > c(cc);
cholesky(b,0);
cout <<b;
cout << "Error = "<<(b*b.transpose(true)-bb).norm()<<endl;
cholesky(c,1);
cout <<c;
cout << "Error = "<<(c.transpose(true)*c-cc).norm()<<endl;
}
if(0)
{
int n;
cin >>n;
NRMat<double> bb(0.,n,n);
int nn=0;
for(int i=0; i<n; ++i) for(int j=i; j<n; ++j) {if((double)random()/RAND_MAX>0.995 || i==j) {++nn; bb(i,j)=bb(j,i)=(double)random()/RAND_MAX*(i==j?10.:.5/(i+j)*(random()>RAND_MAX/2?1:-1));}}
bb=bb*bb.transpose();
//cout <<bb;
nn=0;
for(int i=0; i<n; ++i) for(int j=i; j<n; ++j) if(abs(bb(i,j))>1e-16 || abs(bb(j,i))>1e-16 ) ++nn;
cout <<"Original filling = "<<nn*2./n/(n+1)<<endl;
NRMat<double> b(bb);
cholesky(b,0);
//cout <<b;
cout << "Error = "<<(b*b.transpose()-bb).norm()<<endl;
nn=0;
for(int i=0; i<n; ++i) for(int j=i; j<n; ++j) if(abs(b(i,j))>1e-16 || abs(b(j,i))>1e-16 ) ++nn;
cout <<"Cholesky factor filling = "<<nn*2./n/(n+1)<<endl;
}
if(0)
{
int n;
cin >>n;
NRMat<double> a(n,n);
a.randomize(1);
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=0.;}
cout <<a;
NRMat<double> bb=a.transpose()*a;
SparseSMat<double> cc(bb);
NRMat<double> b(bb);
cholesky(b,0);
cout <<b;
SparseSMat<double> c;
c= cc.cholesky();
cout <<c;
}
if(0)
{
int n;
cin >>n;
NRMat<complex<double> > a(n,n);
a.randomize(1);
for(int i=0; i<n; ++i) for(int j=0; j<i; ++j) {a(i,j)=0.;}
for(int i=0; i<n; ++i) {a(i,i).imag() = 0.; if(a(i,i).real()<0) a(i,i).real() *= -10; else a(i,i).real() *= 10.;}
if(n<100)cout <<a;
NRMat<complex<double> > bb=a.transpose(true)*a;
SparseSMat<complex<double> > cc(bb);
if(n<100)cout <<"Input matrix\n"<<bb;
NRMat<complex<double> > b(bb);
//cholesky(b,0);
//if(n<100)cout <<"Dense Cholesky result\n"<<b;
SparseSMat<complex<double> > c;
c= cc.cholesky();
NRMat<complex<double> > cx(c);
if(n<100)cout <<"Sparse pivoted Cholesky result \n"<<cx;
if(n<100)cout <<"result of reconstruction\n"<<cx.transpose(true)*cx<<endl;
cout <<"Error = "<<(cx.transpose(true)*cx -bb).norm()<<endl;
}
if(1)
{
int n;
cin >>n;
SparseSMat<double> bh(n);
for(int i=0; i<=n/400; ++i) for(int j=i; j<n; ++j) {if((double)random()/RAND_MAX>0.995 || i==j)
{bh.add(i,j,(double)random()/RAND_MAX*(i==j?10.:(random()>RAND_MAX/2?1:-1)),false);}}
if(n<1000) cout <<"Random matrix\n"<<bh;
SparseSMat<double> bb(n);
bb.gemm(0.,bh,'c',bh,'n',1.);
if(n<1000) cout <<"Input matrix\n"<<bb;
cout <<"Original filling = "<<bb.simplify()<<endl;
SparseSMat<double> b = bb.cholesky();
if(n<1000) cout <<"Cholesky result\n"<<b;
SparseSMat<double> br(n);
br.gemm(0.,b,'c',b,'n',1.);
if(n<1000) cout <<"Result of reconstruction\n"<<br;
if(n<1000) cout <<"Difference\n"<<br-bb;
cout << "Error = "<<(br-bb).norm()<<endl;
cout <<"Cholesky factor filling = "<<b.simplify()<<endl;
}
}