#include #include #include #include #include "sparsemat.h" ////////////////////////////////////////////////////////////////////////////// //// forced instantization in the corresponding object file template SparseMat; template SparseMat< complex >; #ifdef _GLIBCPP_NO_TEMPLATE_EXPORT # define export #endif export template ostream& operator<<(ostream &s, const SparseMat &x) { SPMatindex n,m; n=x.nrows(); m=x.ncols(); s << (int)n << ' ' << (int)m << '\n'; matel *list=x.getlist(); while(list) { s << (int)list->row << ' ' << (int)list->col << ' ' << list->elem << '\n'; list=list->next; } s << "-1 -1\n"; return s; } export template istream& operator>>(istream &s, SparseMat &x) { int i,j; int n,m; matel *l=NULL; s >> n >> m; x.resize(n,m); s >> i >> j; while(i>=0 && j>=0) { matel *ll = l; l= new matel; l->next= ll; l->row=i; l->col=j; s >> l->elem; s >> i >> j; } x.setlist(l); return s; } //helpers to be used from different functions export template void SparseMat::unsort() { if(symmetric) colsorted=NULL; if(colsorted) delete[] colsorted; if(rowsorted) delete[] rowsorted; colsorted=rowsorted=NULL; nonzero=0; } export template void SparseMat::deletelist() { if(colsorted||rowsorted) unsort();//prevent obsolete pointers if(*count >1) laerror("trying to delete shared list"); matel *l=list; while(l) { matel *ltmp=l; l=l->next; delete ltmp; } list=NULL; delete count; count=NULL; } //no checks, not to be public export template void SparseMat::copylist(const matel *l) { list=NULL; while(l) { add(l->row,l->col,l->elem); l=l->next; } } export template void SparseMat::copyonwrite() { if(!count) laerror("probably an assignment to undefined sparse matrix"); if(*count > 1) { (*count)--; count = new int; *count=1; if(!list) laerror("empty list with count>1"); unsort(); copylist(list); } } //global for sort !!! is not thread-safe static void *globsorted; //global functions cannot be partially specialized in templates, we have to make it a member function //!!! gencmp's and genswap are critical for performance, make sure that compiler really inlines them template struct gencmp { inline static SPMatindexdiff EXEC(register const SPMatindex i, register const SPMatindex j) { register SPMatindexdiff k; register matel *ii,*jj; ii=((matel **)globsorted)[i]; jj=((matel **)globsorted)[j]; if (k=ii->col-jj->col) return k; else return ii->row-jj->row;} }; template struct gencmp { inline static SPMatindexdiff EXEC(register const SPMatindex i, register const SPMatindex j) { register SPMatindexdiff k; register matel *ii,*jj; ii=((matel **)globsorted)[i]; jj=((matel **)globsorted)[j]; if (k=ii->row-jj->row) return k; else return ii->col-jj->col;} }; template inline void genswap(const SPMatindex i,const SPMatindex j) { SWAP(((matel **)globsorted)[i],((matel **)globsorted)[j]); } template void genqsort(SPMatindex l,SPMatindex r) /*safer version for worst case*/ { register SPMatindex i,j,piv; /* other method for small arrays recommended in NUMREC is not used here does not give so large gain for moderate arrays and complicates the things, but would be worth trying (cf. profile) */ if(r<=l) return; /*1 element*/ if(gencmp::EXEC(r,l)<0) genswap(l,r); if(r-l==1) return; /*2 elements and preparation for median*/ piv= (l+r)/2; /*pivoting by median of 3 - safer */ if(gencmp::EXEC(piv,l)<0) genswap(l,piv); /*and change the pivot element implicitly*/ if(gencmp::EXEC(r,piv)<0) genswap(r,piv); /*and change the pivot element implicitly*/ if(r-l==2) return; /*in the case of 3 elements we are finished too */ /*general case , l-th r-th already processed*/ i=l+1; j=r-1; do{ /*important sharp inequality - stops at sentinel element for efficiency*/ /* this is inefficient if all keys are equal - unnecessary n log n swaps are done, but we assume that it is atypical input*/ while(gencmp::EXEC(i++,piv)<0); i--; while(gencmp::EXEC(j--,piv)>0); j++; if(i(i,j); if(i==piv) piv=j; else if(j==piv) piv=i; } if(i<=j) {i++; j--;} }while(i<=j); if(j-l < r-i) /*because of the stack in bad case process first the shorter subarray*/ {if(l(l,j); if(i(i,r);} else {if(i(i,r); if(l(l,j);} } export template unsigned int SparseMat::length() const { if(nonzero) return nonzero; unsigned int n=0; matel *l=list; while(l) { ++n; l=l->next; } const_cast *>(this)->nonzero=n; return n; } export template unsigned int SparseMat::sort(int type) const //must be const since used from operator* which must be const to be compatible with other stuff, dirty casts here { if(type==0&&rowsorted || type==1&&colsorted) return nonzero; if(!list) return ((SparseMat *)this)->nonzero=0; if(type!=2) const_cast *>(this) ->setunsymmetric(); else type=0;//symmetric and sorted not supported simultaneously, type 2 is special just for simplify //create array from list, reallocate as necessary unsigned int size=3*MAX(nn,mm); //initial guess for a number of nonzero elements matel **sorted= new matel* [size]; ((SparseMat *)this)->nonzero=0; matel *l = list; while(l) { sorted[(((SparseMat *)this)->nonzero)++]=l; if(nonzero >= size ) //reallocate { size*=2; matel **newsorted= new matel* [size]; memcpy(newsorted,sorted,size/2*sizeof(matel*)); delete[] sorted; sorted=newsorted; } l= l->next; } //now sort the array of pointers according to type globsorted =sorted; if(type==0) {genqsort(0,nonzero-1); ((SparseMat *)this)->rowsorted=sorted;} //type handled at compile time for more efficiency else {genqsort(0,nonzero-1); ((SparseMat *)this)->colsorted=sorted;} //should better be const cast //cout <<"sort: nonzero ="< void SparseMat::simplify() { unsigned int n; if(!list) return; copyonwrite(); if(symmetric) { unsort(); matel *p; p=list; while(p) { if(p->row>p->col) SWAP(p->row,p->col); //get into one triangle, not OK for complex hermitean p=p->next; } n=sort(2); //sort and further handle like a triangle matrix } else n=sort(0); //sorts according to row,column unsigned int i,j; SPMatindex r,c; j=0; r=rowsorted[j]->row; c=rowsorted[j]->col; for(i=1; irow && c==rowsorted[i]->col) {rowsorted[j]->elem +=rowsorted[i]->elem; delete rowsorted[i]; rowsorted[i]=NULL;} else { j=i; r=rowsorted[j]->row; c=rowsorted[j]->col; } } //check if summed to zero for(i=0; ielem)elem #endif ) {delete rowsorted[i]; rowsorted[i]=NULL;} //restore connectivity int nonz=0; matel *p,*first,*prev; first=NULL; prev=NULL; for(i=0; inext=p; p->next=NULL; prev=p; } list=first; nonzero=nonz; unsort(); //since there were NULLs introduced, rowsorted is not dense } export template void SparseMat::resize(const SPMatindex n, const SPMatindex m) { if(n<=0 || m<=0) laerror("illegal matrix dimension"); unsort(); if(count) { if(*count > 1) {(*count)--; count=NULL; list=NULL;} //detach from previous else if(*count==1) deletelist(); } nn=n; mm=m; count=new int(1); //empty but defined matrix list=NULL; symmetric=0; colsorted=rowsorted=NULL; } export template void SparseMat::addsafe(const SPMatindex n, const SPMatindex m, const T elem) { #ifdef debug if(n<0||n>=nn||m<0||m>=mm) laerror("SparseMat out of range"); #endif #ifdef SPARSEEPSILON if(abs(elem) SparseMat & SparseMat::operator=(const SparseMat &rhs) { if (this != &rhs) { unsort(); if(count) if(--(*count) ==0) {deletelist(); delete count;} // old stuff obsolete list=rhs.list; nn=rhs.nn; mm=rhs.mm; if(list) count=rhs.count; else count= new int(0); //make the matrix defined, but empty and not shared, count will be incremented below symmetric=rhs.symmetric; if(count) (*count)++; } return *this; } export template SparseMat & SparseMat::join(SparseMat &rhs) { if(symmetric!=rhs.symmetric||nn!=rhs.nn||mm!=rhs.mm) laerror("incompatible matrices in join()"); if(*rhs.count!=1) laerror("shared rhs in join()"); if(!count) {count=new int; *count=1; list=NULL;} else copyonwrite(); matel **last=&list; while(*last) last= &((*last)->next); *last=rhs.list; rhs.list=NULL; return *this; } export template SparseMat & SparseMat::addtriangle(const SparseMat &rhs, const bool lower, const char sign) { if(nn!=rhs.nn||mm!=rhs.mm) laerror("incompatible dimensions for +="); if(!count) {count=new int; *count=1; list=NULL;} else copyonwrite(); register matel *l=rhs.list; while(l) { if(rhs.symmetric || lower && l->row <=l->col || !lower && l->row >=l->col) #ifdef SPARSEEPSILON if(abs(l->elem)>SPARSEEPSILON) #endif add( l->row,l->col,sign=='+'?l->elem:- l->elem); l=l->next; } return *this; } export template SparseMat & SparseMat::operator+=(const SparseMat &rhs) { if(symmetric&&!rhs.symmetric) laerror("cannot add general to symmetric sparse"); if(nn!=rhs.nn||mm!=rhs.mm) laerror("incompatible dimensions for +="); if(!count) {count=new int; *count=1; list=NULL;} else copyonwrite(); bool symmetrize= !symmetric && rhs.symmetric; register matel *l=rhs.list; if(symmetrize) while(l) { #ifdef SPARSEEPSILON if(abs(l->elem)>SPARSEEPSILON) #endif {add( l->row,l->col,l->elem); if( l->row!=l->col) add( l->col,l->row,l->elem);} l=l->next; } else while(l) { #ifdef SPARSEEPSILON if(abs(l->elem)>SPARSEEPSILON) #endif add( l->row,l->col,l->elem); l=l->next; } return *this; } export template SparseMat & SparseMat::operator-=(const SparseMat &rhs) { if(symmetric&&!rhs.symmetric) laerror("cannot add general to symmetric sparse"); if(nn!=rhs.nn||mm!=rhs.mm) laerror("incompatible dimensions for -="); if(!count) {count=new int; *count=1; list=NULL;} else copyonwrite(); bool symmetrize= !symmetric && rhs.symmetric; register matel *l=rhs.list; if(symmetrize) while(l) { #ifdef SPARSEEPSILON if(abs(l->elem)>SPARSEEPSILON) #endif {add( l->row,l->col,- l->elem); if( l->row!=l->col) add( l->col,l->row,- l->elem);} l=l->next; } else while(l) { #ifdef SPARSEEPSILON if(abs(l->elem)>SPARSEEPSILON) #endif add( l->row,l->col,- l->elem); l=l->next; } return *this; } //constructor from a dense matrix export template SparseMat::SparseMat(const NRMat &rhs) { nn=rhs.nrows(); mm=rhs.ncols(); count=new int; *count=1; list=NULL; symmetric=0; colsorted=rowsorted=NULL; SPMatindex i,j; for(i=0;iSPARSEEPSILON) #else if(t) #endif add(i,j,t); } } //constructor dense matrix from sparse export template NRMat::NRMat(const SparseMat &rhs) { nn=rhs.nrows(); mm=rhs.ncols(); count=new int(1); T *p; #ifdef MATPTR v= new T*[nn]; p=v[0] = new T[mm*nn]; for (int i=1; i< nn; i++) v[i] = v[i-1] + mm; #else p= v = new T[mm*nn]; #endif memset(p,0,nn*mm*sizeof(T)); matel *l=rhs.getlist(); bool sym=rhs.issymmetric(); while(l) { #ifdef MATPTR v[l->row][l->col] +=l->elem; if(sym && l->row!=l->col) v[l->col][l->row] +=l->elem; #else v[l->row*mm+l->col] +=l->elem; if(sym && l->row!=l->col) v[l->col*mm+l->row] +=l->elem; #endif l=l->next; } } //constructor dense symmetric packed matrix from sparse #define nn2 (nn*(nn+1)/2) export template NRSMat::NRSMat(const SparseMat &rhs) { if(!rhs.issymmetric()||rhs.nrows()!=rhs.ncols()) laerror("sparse matrix is not symmetric"); nn=rhs.nrows(); count=new int(1); v=new T[nn2]; memset(v,0,nn2*sizeof(T)); matel *l=rhs.getlist(); while(l) { (*this)(l->row,l->col)=l->elem; l=l->next; } } #undef nn2 //constructor dense vector from sparse export template NRVec::NRVec(const SparseMat &rhs) { if(rhs.nrows()>1 && rhs.ncols()>1) laerror("cannot construct a vector from a sparse matrix with more than one row/column"); nn=rhs.nrows()>1?rhs.nrows():rhs.ncols(); v=new T[nn]; memset(v,0,nn*sizeof(T)); count=new int(1); matel *l=rhs.getlist(); if(rhs.nrows()>1) while(l) { v[l->row]+=l->elem; l=l->next; } else while(l) { v[l->col]+=l->elem; l=l->next; } } //assignment of a scalar matrix export template SparseMat & SparseMat::operator=(const T a) { if(!count ||nn<=0||mm<=0) laerror("assignment of scalar to undefined sparse matrix"); if(nn!=mm) laerror("assignment of scalar to non-square sparse matrix"); resize(nn,mm);//clear #ifdef SPARSEEPSILON if(abs(a) SparseMat & SparseMat::operator+=(const T a) { if(!count ||nn<=0||mm<=0) laerror("assignment of scalar to undefined sparse matrix"); if(nn!=mm) laerror("assignment of scalar to non-square sparse matrix"); if(a==(T)0) return *this; SPMatindex i; for(i=0;i SparseMat & SparseMat::operator-=(const T a) { if(!count ||nn<=0||mm<=0) laerror("assignment of scalar to undefined sparse matrix"); if(nn!=mm) laerror("assignment of scalar to non-square sparse matrix"); if(a==(T)0) return *this; SPMatindex i; for(i=0;i SparseMat::SparseMat(const NRSMat &rhs) { nn=rhs.nrows(); mm=rhs.ncols(); count=new int; *count=1; list=NULL; symmetric=1; colsorted=rowsorted=NULL; SPMatindex i,j; for(i=0;iSPARSEEPSILON #else t=rhs(i,j) #endif ) add(i,j,t); } } export template void SparseMat::transposeme() { if(!count) laerror("transposeme on undefined lhs"); if(symmetric||!list) return; copyonwrite();//also unsort register matel *l=list; while(l) { SWAP(l->row,l->col); l=l->next; } SWAP(nn,mm); } export template void SparseMat::setunsymmetric() { if(!symmetric) return; unsort(); symmetric=0; if(!count) return; copyonwrite(); matel *l=list; while(l) //include the mirror picture of elements into the list { if( #ifdef SPARSEEPSILON abs(l->elem)>SPARSEEPSILON && #endif l->row!=l->col) add(l->col,l->row,l->elem); //not OK for complex-hermitean l=l->next; } } export template SparseMat & SparseMat::operator*=(const T a) { if(!count) laerror("operator*= on undefined lhs"); if(!list||a==(T)1) return *this; if(a==(T)0) resize(nn,mm); else copyonwrite(); register matel *l=list; while(l) { l->elem*=a; l=l->next; } return *this; } const double SparseMat::dot(const NRMat &rhs) const { double r=0; matel *l=list; while(l) { r+= l->elem*rhs[l->row][l->col]; if(symmetric&&l->row!=l->col) r+=l->elem*rhs[l->col][l->row]; l=l->next; } return r; } template void NRVec::gemv(const T beta, const SparseMat &a, const char trans, const T alpha, const NRVec &x) { if((trans=='n'?a.ncols():a.nrows())!= (SPMatindex)x.size()) laerror("incompatible sizes in gemv"); copyonwrite(); if(beta!=(T)0) (*this) *= beta; else memset(v,0,nn*sizeof(T)); bool transp = tolower(trans)!='n'; //not OK for complex matel *l=a.getlist(); if(alpha==(T)0 || !l) return; T *vec=x.v; if(alpha==(T)1) { if(a.issymmetric()) { while(l) { v[l->row]+= l->elem*vec[l->col]; if(l->row!=l->col) v[l->col]+= l->elem*vec[l->row]; l=l->next; } } else { if(transp) while(l) { v[l->col]+= l->elem*vec[l->row]; l=l->next; } else while(l) { v[l->row]+= l->elem*vec[l->col]; l=l->next; } } } else { if(a.issymmetric()) { while(l) { v[l->row]+= alpha*l->elem*vec[l->col]; if(l->row!=l->col) v[l->col]+= alpha*l->elem*vec[l->row]; l=l->next; } } else { if(transp) while(l) { v[l->col]+= alpha*l->elem*vec[l->row]; l=l->next; } else while(l) { v[l->row]+= alpha*l->elem*vec[l->col]; l=l->next; } } } } //multiplication with dense vector from both sides template const NRVec SparseMat::multiplyvector(const NRVec &vec, const bool transp) const { if(transp && nn!=(SPMatindex)vec.size() || !transp && mm!=(SPMatindex)vec.size()) laerror("incompatible sizes in sparsemat*vector"); NRVec result(transp?mm:nn); result.gemv((T)0, *this, transp?'t':'n', (T)1., vec); return result; } template const NRVec NRVec::operator*(const SparseMat &mat) const { if(mat.nrows()!= (SPMatindex)size()) laerror("incompatible sizes in vector*sparsemat"); NRVec result((T)0,mat.ncols()); matel *l=mat.getlist(); bool symmetric=mat.issymmetric(); while(l) { result.v[l->col]+= l->elem*v[l->row]; if(symmetric&&l->row!=l->col) result.v[l->row]+= l->elem*v[l->col]; l=l->next; } return result; } template const T SparseMat::trace() const { matel *l=list; T sum(0); while(l) { if(l->row==l->col) sum+= l->elem; l=l->next; } return sum; } //not OK for complex hermitean template const T SparseMat::norm(const T scalar) const { if(!list) return T(0); const_cast *>(this)->simplify(); matel *l=list; T sum(0); if(scalar!=(T)0) { if(symmetric) while(l) { T hlp=l->elem; bool b=l->row==l->col; if(b) hlp-=scalar; T tmp=hlp*hlp; sum+= tmp; if(!b) sum+=tmp; l=l->next; } else while(l) { T hlp=l->elem; if(l->row==l->col) hlp-=scalar; sum+= hlp*hlp; l=l->next; } } else { if(symmetric) while(l) { T tmp=l->elem*l->elem; sum+= tmp; if(l->row!=l->col) sum+=tmp; l=l->next; } else while(l) { sum+= l->elem*l->elem; l=l->next; } } return sqrt(sum); //not OK for int, would need traits technique } template void SparseMat::axpy(const T alpha, const SparseMat &x, const bool transp) { if(!transp && (nn!=x.nn||mm!=x.mm) || transp && (mm!=x.nn||nn!=x.mm) ) laerror("incompatible dimensions for axpy"); if(!count) {count=new int; *count=1; list=NULL;} else copyonwrite(); if(alpha==(T)0||x.list==NULL) return; if(!transp||x.symmetric) { if(alpha==(T)1) {*this +=x; return;} if(alpha==(T)-1) {*this -=x; return;} } if(symmetric!=x.symmetric) laerror("general axpy not supported for different symmetry types"); //now does not matter if both are general or both symmetric (transposition will not matter) register matel *l=x.list; if(transp) while(l) { register T t=alpha*l->elem; #ifdef SPARSEEPSILON if(abs(t)>SPARSEEPSILON) #endif add( l->col,l->row,t); l=l->next; } else while(l) { register T t=alpha*l->elem; #ifdef SPARSEEPSILON if(abs(t)>SPARSEEPSILON) #endif add( l->row,l->col,t); l=l->next; } } template const T SparseMat::dot(const SparseMat &rhs) const //complex conj. not implemented yet { if(nn!=rhs.nn || mm!=rhs.mm) laerror("dot of incompatible sparse matrices"); if(symmetric||rhs.symmetric) laerror("dot of symmetric sparse matrices not implemented"); T result=0; if(list && rhs.list) //both nonzero { unsigned int na=sort(0); unsigned int nb=rhs.sort(0); //now merge the sorted lists register unsigned int i,j; register SPMatindex ra,ca; j=0; for(i=0; irow; ca=rowsorted[i]->col; while(jrow) col) elem; register unsigned int k; /*j remembers the position, k forwards in the rhs.rowsorted to find all combinations*/ k=j; do { result += tmp*rhs.rowsorted[k]->elem; k++; } while(krow == ra) && (rhs.rowsorted[k]->col == ca)); } /*else skip in left operand*/ } } return result; } template const SparseMat SparseMat::operator*(const SparseMat &rhs) const { if(mm!=rhs.nn) laerror("product of incompatible sparse matrices"); if(symmetric||rhs.symmetric) laerror("product of symmetric sparse matrices not implemented"); SparseMat result(nn,rhs.mm); if(list && rhs.list) //both nonzero { unsigned int na=sort(1); unsigned int nb=rhs.sort(0); //now merge the sorted lists register unsigned int i,j; register SPMatindex rb=0,ca; j=0; for(i=0; icol; while(jrow) elem; register unsigned int k; /*j remembers the position, k forwards in the rhs.rowsorted to find all combinations*/ k=j; do { result.add(colsorted[i]->row,rhs.rowsorted[k]->col,tmp*rhs.rowsorted[k]->elem); k++; } while(krow) == ca)); } /*else skip in left operand*/ } result.simplify();//otherwise number of terms tends to grow exponentially } return result; } template void SparseMat::gemm(const T beta, const SparseMat &a, const char transa, const SparseMat &b, const char transb, const T alpha) { SPMatindex l(transa=='n'?a.nn:a.mm); SPMatindex k(transa=='n'?a.mm:a.nn); SPMatindex kk(transb=='n'?b.nn:b.mm); SPMatindex ll(transb=='n'?b.mm:b.nn); if(a.symmetric||b.symmetric) laerror("symmetric sparse matrices not supported in gemm"); if(beta==(T)0) resize(l,ll); //empty matrix else *this *= beta; //takes care about beta=1 if(l!=nn|| ll!=mm||k!=kk) laerror("incompatible sparse matrices in gemm"); if(alpha==(T)0 || !a.list ||!b.list) return; copyonwrite(); //regular case, specialize for transpositions matel **ma,**mb; unsigned int na,nb; bool tra= transa!='n'; bool trb= transb!='n'; if(!tra) {na=a.sort(1); ma=a.colsorted;} else {na=a.sort(0); ma=a.rowsorted;} if(!trb) {nb=b.sort(0); mb=b.rowsorted;} else {nb=b.sort(1); mb=b.colsorted;} //now merge the sorted lists register unsigned int i,j; register SPMatindex rb=0,ca,row; j=0; for(i=0; irow:ma[i]->col; row=tra?ma[i]->col:ma[i]->row; while(jcol:mb[j]->row) elem; register unsigned int k; /*j remembers the position, k forwards in the mb to find all combinations*/ k=j; do { register SPMatindex col; col=trb?mb[k]->row:mb[k]->col; if(!symmetric||row<=col) add(row,col,tmp*mb[k]->elem); k++; } while(kcol:mb[k]->row) == ca)); } /*else skip in ma*/ } simplify(); } #ifdef _GLIBCPP_NO_TEMPLATE_EXPORT #define INSTANTIZE(T) \ template ostream& operator<<(ostream &s, const SparseMat &x); \ template istream& operator>>(istream &s, SparseMat &x); \ template void SparseMat::copyonwrite(); \ template void SparseMat::resize(const SPMatindex n, const SPMatindex m); \ template void SparseMat::unsort(); \ template unsigned int SparseMat::sort(int type) const; \ template unsigned int SparseMat::length() const; \ template void SparseMat::deletelist(); \ template void SparseMat::simplify(); \ template void SparseMat::copylist(const matel *l); \ template void SparseMat::add(const SPMatindex n, const SPMatindex m, const T elem); \ template SparseMat & SparseMat::operator=(const SparseMat &rhs); \ template SparseMat & SparseMat::operator+=(const SparseMat &rhs); \ template SparseMat & SparseMat::operator-=(const SparseMat &rhs); \ template SparseMat::SparseMat(const NRMat &rhs); \ template SparseMat::SparseMat(const NRSMat &rhs); \ template void SparseMat::transposeme(); \ template SparseMat & SparseMat::operator*=(const T a); \ template void SparseMat::setunsymmetric(); \ template SparseMat & SparseMat::operator=(const T a); \ template SparseMat & SparseMat::operator+=(const T a); \ template SparseMat & SparseMat::operator-=(const T a); \ template NRMat::NRMat(const SparseMat &rhs); \ template NRSMat::NRSMat(const SparseMat &rhs); \ template NRVec::NRVec(const SparseMat &rhs); \ template const NRVec SparseMat::operator*(const NRVec &vec) const; \ template const NRVec NRVec::operator*(const SparseMat &mat) const; \ template SparseMat & SparseMat::join(SparseMat &rhs); \ template const T SparseMat::trace() const; \ template const T SparseMat::norm(const T scalar) const; \ template void SparseMat::axpy(const T alpha, const SparseMat &x, const bool transp); \ template const SparseMat SparseMat::operator*(const SparseMat &rhs) const; \ template const T SparseMat::dot(const SparseMat &rhs) const; \ template void SparseMat::gemm(const T beta, const SparseMat &a, const char transa, const SparseMat &b, const char transb, const T alpha); \ template void NRVec::gemv(const T beta, const SparseMat &a, const char trans, const T alpha, const NRVec &x);\ INSTANTIZE(double) // some functions are not OK for hermitean! INSTANTIZE(complex) #endif