Compare commits
4 Commits
4500752146
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 26ed939901 | |||
| 671b924c8c | |||
| 1965f4f653 | |||
| 7e90168443 |
@@ -39,6 +39,7 @@ namespace LA {
|
|||||||
//@@@small matrix gdiagonalize - shift complex roots up (option to gdiagonalize?)
|
//@@@small matrix gdiagonalize - shift complex roots up (option to gdiagonalize?)
|
||||||
//@@@test gdiagonalize whether it sorts the roots and what for complex ones
|
//@@@test gdiagonalize whether it sorts the roots and what for complex ones
|
||||||
//@@@implement left eigenvectors for nonsymmetric case
|
//@@@implement left eigenvectors for nonsymmetric case
|
||||||
|
//@@@implement start from larger Krylov space than a single vector - useful for multiroot solutions
|
||||||
|
|
||||||
|
|
||||||
//Davidson algorithm: J. Comp. Phys. 17:817 (1975)
|
//Davidson algorithm: J. Comp. Phys. 17:817 (1975)
|
||||||
|
|||||||
103
lanczos.h
103
lanczos.h
@@ -24,6 +24,10 @@
|
|||||||
#include "nonclass.h"
|
#include "nonclass.h"
|
||||||
#include "auxstorage.h"
|
#include "auxstorage.h"
|
||||||
|
|
||||||
|
//TODO:
|
||||||
|
//@@@implement restart when Krylov space is exceeded
|
||||||
|
//@@@implement inital guess of more than 1 vector (also in davidson) and block version of the methods
|
||||||
|
|
||||||
namespace LA {
|
namespace LA {
|
||||||
|
|
||||||
//Lanczos diagonalization of hermitean matrix
|
//Lanczos diagonalization of hermitean matrix
|
||||||
@@ -36,7 +40,7 @@ namespace LA {
|
|||||||
template <typename T, typename Matrix>
|
template <typename T, typename Matrix>
|
||||||
extern void lanczos(const Matrix &bigmat, NRVec<T> &eivals, NRVec<T> *eivecs, const char *eivecsfile,
|
extern void lanczos(const Matrix &bigmat, NRVec<T> &eivals, NRVec<T> *eivecs, const char *eivecsfile,
|
||||||
int nroots=1, const bool verbose=0, const double eps=1e-6,
|
int nroots=1, const bool verbose=0, const double eps=1e-6,
|
||||||
const bool incore=1, int maxit=100, const int maxkrylov = 500,
|
const bool incore=1, int maxit=100, const int maxkrylov = 1000,
|
||||||
void (*initguess)(NRVec<T> &)=NULL, const typename LA_traits<T>::normtype *target=NULL)
|
void (*initguess)(NRVec<T> &)=NULL, const typename LA_traits<T>::normtype *target=NULL)
|
||||||
{
|
{
|
||||||
if(!bigmat.issymmetric()) laerror("lanczos only for hermitean matrices");
|
if(!bigmat.issymmetric()) laerror("lanczos only for hermitean matrices");
|
||||||
@@ -46,19 +50,16 @@ if ( n!= (int)bigmat.ncols()) laerror("non-square matrix in lanczos");
|
|||||||
if(eivals.size()<nroots) laerror("too small eivals dimension in lanczos");
|
if(eivals.size()<nroots) laerror("too small eivals dimension in lanczos");
|
||||||
|
|
||||||
NRVec<T> vec1(n),vec2(n);
|
NRVec<T> vec1(n),vec2(n);
|
||||||
NRVec<typename LA_traits<T>::normtype> r(maxkrylov);
|
|
||||||
NRVec<T> *v0,*v1;
|
NRVec<T> *v0,*v1;
|
||||||
AuxStorage<T> *s0,*s1;
|
AuxStorage<T> *s0,*s1;
|
||||||
|
|
||||||
if(incore)
|
if(incore)
|
||||||
{
|
{
|
||||||
v0 = new NRVec<T>[maxkrylov];
|
v0 = new NRVec<T>[maxkrylov];
|
||||||
v1 = new NRVec<T>[maxkrylov];
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
s0 = new AuxStorage<T>;
|
s0 = new AuxStorage<T>;
|
||||||
s1 = new AuxStorage<T>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int i,j;
|
int i,j;
|
||||||
@@ -67,8 +68,6 @@ if(nroots>=maxkrylov) nroots =maxkrylov-1;
|
|||||||
int nroot=0;
|
int nroot=0;
|
||||||
int oldnroot;
|
int oldnroot;
|
||||||
|
|
||||||
//@@@@@@@@@@
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
//default guess based on lowest diagonal element of the matrix
|
//default guess based on lowest diagonal element of the matrix
|
||||||
if(initguess) initguess(vec1);
|
if(initguess) initguess(vec1);
|
||||||
@@ -81,36 +80,96 @@ else
|
|||||||
vec1[j]=1;
|
vec1[j]=1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NRVec<typename LA_traits<T>::normtype> alpha(maxkrylov),beta(maxkrylov-1);
|
||||||
|
|
||||||
//initial step
|
//initial step
|
||||||
vec1.normalize();
|
vec1.normalize();
|
||||||
bigmat.gemv(0,vec2,'n',1,vec1); //avoid bigmat.operator*(vec), since that needs to allocate another n-sized vector
|
|
||||||
if(incore) v0[0]=vec1; else s0->put(vec1,0);
|
if(incore) v0[0]=vec1; else s0->put(vec1,0);
|
||||||
if(incore) v1[0]=vec2; else s1->put(vec2,0);
|
bigmat.gemv(0,vec2,'n',1,vec1); //avoid bigmat.operator*(vec), since that needs to allocate another n-sized vector
|
||||||
|
{
|
||||||
|
T tmp=vec2*vec1;
|
||||||
|
alpha[0]= LA_traits<T>::realpart(tmp);
|
||||||
|
if(LA_traits<T>::imagpart(tmp)>1e-6) laerror("matrix probably not hermitian in lanczos");
|
||||||
|
}
|
||||||
|
vec2.axpy(-alpha[0],vec1);
|
||||||
|
|
||||||
|
NRVec<typename LA_traits<T>::normtype> r(1); r[0]=alpha[0];
|
||||||
|
NRVec<typename LA_traits<T>::normtype> rold;
|
||||||
|
NRMat<typename LA_traits<T>::normtype> smallV;
|
||||||
|
int krylovsize=1;
|
||||||
//iterative Lanczos
|
//iterative Lanczos
|
||||||
int it=0;
|
int it=0;
|
||||||
for(k=2; k<=maxkrylov;++k)
|
for(j=1; j<maxkrylov;++j)
|
||||||
{
|
{
|
||||||
diagonalize3(smallV,r,1,1,krylovsize+1,&smallSwork,1); //for symmetric matrix they have already been sorted to ascending order in lapack
|
++it;
|
||||||
|
if(it>maxit) {std::cout <<"too many interations in lanczos\n"; flag=1; goto finished;}
|
||||||
|
++krylovsize;
|
||||||
|
//extend the Krylov space (last w vector expected in vec2, last v vector in vec1)
|
||||||
|
beta[j-1] = vec2.norm();
|
||||||
|
if(beta[j-1] > 0)
|
||||||
|
{
|
||||||
|
vec2 /= beta[j-1];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
laerror("zero norm in lanczos");
|
||||||
|
//could generate an arbitrary vector and orthonormalize it
|
||||||
|
}
|
||||||
|
if(incore) v0[j]=vec2; else s0->put(vec2,j);
|
||||||
|
vec1 *= -beta[j-1];
|
||||||
|
bigmat.gemv(1,vec1,'n',1,vec2);
|
||||||
|
{
|
||||||
|
T tmp=vec1*vec2;
|
||||||
|
alpha[j]= LA_traits<T>::realpart(tmp);
|
||||||
|
if(LA_traits<T>::imagpart(tmp)>1e-6) laerror("matrix probably not hermitian in lanczos");
|
||||||
|
}
|
||||||
|
vec1.axpy(-alpha[j],vec2);
|
||||||
|
vec1.swap(vec2); //move new w vector to vec2, new v vector to vec1
|
||||||
|
|
||||||
|
//diagonalize the tridiagonal
|
||||||
|
smallV.resize(j+1,j+1);
|
||||||
|
rold=r;
|
||||||
|
r=alpha.subvector(0,j);
|
||||||
|
NRVec<typename LA_traits<T>::normtype> rr=beta.subvector(0,j-1);
|
||||||
|
diagonalize3(r,rr,&smallV);
|
||||||
if(target) //resort eigenpairs by distance from the target
|
if(target) //resort eigenpairs by distance from the target
|
||||||
{
|
{
|
||||||
NRVec<typename LA_traits<T>::normtype> key(krylovsize+1);
|
NRVec<typename LA_traits<T>::normtype> key(j+1);
|
||||||
for(int i=0; i<=krylovsize; ++i) key[i] = abs(r[i] - *target);
|
for(int i=0; i<=j; ++i) key[i] = abs(r[i] - *target);
|
||||||
NRPerm<int> p(krylovsize+1);
|
NRPerm<int> p(j+1);
|
||||||
key.sort(0,p);
|
key.sort(0,p);
|
||||||
|
|
||||||
NRVec<typename LA_traits<T>::normtype> rp(maxkrylov);
|
NRVec<typename LA_traits<T>::normtype> rp(j+1);
|
||||||
NRMat<T> smallVp(maxkrylov,maxkrylov);
|
NRMat<typename LA_traits<T>::normtype> smallVp(j+1,j+1);
|
||||||
for(int i=0; i<=krylovsize; ++i)
|
for(int i=0; i<=j; ++i)
|
||||||
{
|
{
|
||||||
rp[i]= r[p[i+1]-1];
|
rp[i]= r[p[i+1]-1];
|
||||||
for(int j=0; j<=krylovsize; ++j) smallVp(j,i) = smallV(j,p[i+1]-1);
|
for(int k=0; k<=j; ++k) smallVp(k,i) = smallV(k,p[i+1]-1);
|
||||||
}
|
}
|
||||||
r = rp;
|
r = rp;
|
||||||
smallV = smallVp;
|
smallV = smallVp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(verbose)
|
||||||
|
{
|
||||||
|
for(int iroot=0; iroot<std::min(krylovsize,nroots); ++iroot)
|
||||||
|
{
|
||||||
|
std::cout <<"Lanczos: iter="<<it <<" dim="<<krylovsize<<" root="<<iroot<<" eigenvalue="<<r[iroot]<<"\n";
|
||||||
|
}
|
||||||
|
std::cout.flush();
|
||||||
|
}
|
||||||
|
//convergence test when we have enough roots even in rold
|
||||||
|
if(krylovsize>nroots)
|
||||||
|
{
|
||||||
|
bool conv=true;
|
||||||
|
for(int iroot=0; iroot<nroots; ++iroot)
|
||||||
|
if(abs(r[iroot]-rold[iroot])>eps) conv=false;
|
||||||
|
if(conv)
|
||||||
|
{
|
||||||
|
flag=0;
|
||||||
|
goto converged;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
flag=1;
|
flag=1;
|
||||||
goto finished;
|
goto finished;
|
||||||
@@ -125,7 +184,7 @@ for(nroot=0; nroot<nroots; ++nroot)
|
|||||||
if(eivecs)
|
if(eivecs)
|
||||||
{
|
{
|
||||||
vec1=0;
|
vec1=0;
|
||||||
for(j=0; j<=krylovsize; ++j )
|
for(j=0; j<krylovsize; ++j )
|
||||||
{
|
{
|
||||||
if(!incore) s0->get(vec2,j);
|
if(!incore) s0->get(vec2,j);
|
||||||
vec1.axpy(smallV(j,nroot),incore?v0[j]:vec2);
|
vec1.axpy(smallV(j,nroot),incore?v0[j]:vec2);
|
||||||
@@ -141,12 +200,10 @@ for(nroot=0; nroot<nroots; ++nroot)
|
|||||||
|
|
||||||
if(eivecsfile) delete ev;
|
if(eivecsfile) delete ev;
|
||||||
|
|
||||||
//@@@@
|
|
||||||
#endif
|
|
||||||
|
|
||||||
finished:
|
finished:
|
||||||
if(incore) {delete[] v0; delete[] v1;}
|
if(incore) {delete[] v0;}
|
||||||
else {delete s0; delete s1;}
|
else {delete s0;}
|
||||||
|
|
||||||
if(flag) laerror("no convergence in lanczos");
|
if(flag) laerror("no convergence in lanczos");
|
||||||
}
|
}
|
||||||
|
|||||||
104
t.cc
104
t.cc
@@ -4502,9 +4502,43 @@ cout <<"Error = "<<(xt-y).norm()<<endl;
|
|||||||
|
|
||||||
if(0)
|
if(0)
|
||||||
{
|
{
|
||||||
//n must not be too small
|
|
||||||
int n,m;
|
int n,m;
|
||||||
cin>>n >>m;
|
bool which;
|
||||||
|
cin>>n >>m >>which;
|
||||||
|
NRSMat<double> a(n);
|
||||||
|
NRVec<double> rr(n);
|
||||||
|
|
||||||
|
for(int i=0;i<n;++i) for(int j=0;j<=i;++j)
|
||||||
|
{
|
||||||
|
a(i,j)= RANDDOUBLE();
|
||||||
|
if(i==j) a(i,i)+= .5*(i-n*.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
NRSMat<double> aa;
|
||||||
|
NRMat<double> vv(n,n);
|
||||||
|
aa=a; diagonalize(aa,rr,&vv);
|
||||||
|
NRVec<double> r(m);
|
||||||
|
NRVec<double> *eivecs = new NRVec<double>[m];
|
||||||
|
cout <<"Exact energies " <<rr<<endl;
|
||||||
|
|
||||||
|
if(which) lanczos(a,r,eivecs,NULL,m,1,1e-6,1,200,300);
|
||||||
|
else davidson(a,r,eivecs,NULL,m,1,1e-6,1,200,300);
|
||||||
|
cout <<"Iterative energies " <<r;
|
||||||
|
cout <<"Eigenvectors compare:\n";
|
||||||
|
for(int i=0; i<m; ++i)
|
||||||
|
{
|
||||||
|
cout <<eivecs[i];
|
||||||
|
for(int j=0; j<n;++j) cout <<vv[j][i]<<" ";
|
||||||
|
cout <<endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if(0)
|
||||||
|
{
|
||||||
|
int n,m;
|
||||||
|
bool which;
|
||||||
|
cin>>n >>m>>which ;
|
||||||
NRSMat<complex<double> > a(n);
|
NRSMat<complex<double> > a(n);
|
||||||
a.randomize(.1);
|
a.randomize(.1);
|
||||||
|
|
||||||
@@ -4522,9 +4556,10 @@ cout <<"Exact energies "<<rr;
|
|||||||
|
|
||||||
NRVec<complex<double> > r(m);
|
NRVec<complex<double> > r(m);
|
||||||
NRVec<complex<double> > *eivecs = new NRVec<complex<double> >[m];
|
NRVec<complex<double> > *eivecs = new NRVec<complex<double> >[m];
|
||||||
davidson(a,r,eivecs,NULL,m,true,1e-5,true,10*n,n);
|
if(which) lanczos(a,r,eivecs,NULL,m,true,1e-5,true,10*n,n);
|
||||||
|
else davidson(a,r,eivecs,NULL,m,true,1e-5,true,10*n,n);
|
||||||
|
|
||||||
cout <<"Davidson energies " <<r;
|
cout <<"Davidson/Lanczos energies " <<r;
|
||||||
cout <<"Exact energies "<<rr;
|
cout <<"Exact energies "<<rr;
|
||||||
|
|
||||||
cout <<"Eigenvectors compare:\n";
|
cout <<"Eigenvectors compare:\n";
|
||||||
@@ -4539,27 +4574,52 @@ for(int i=0; i<m; ++i)
|
|||||||
|
|
||||||
if(1)
|
if(1)
|
||||||
{
|
{
|
||||||
int n,m;
|
int r,n,sym;
|
||||||
cin>>n >>m;
|
cin>>r>>n>>sym;
|
||||||
NRSMat<double> a(n);
|
NRVec<INDEXGROUP> shape(3);
|
||||||
NRVec<double> rr(n);
|
shape[0].number=2;
|
||||||
|
shape[0].symmetry=0;
|
||||||
|
shape[0].range=n+1;
|
||||||
|
shape[0].offset=0;
|
||||||
|
|
||||||
for(int i=0;i<n;++i) for(int j=0;j<=i;++j)
|
shape[1].number=r;
|
||||||
{
|
shape[1].symmetry= sym;
|
||||||
a(i,j)= RANDDOUBLE();
|
shape[1].range=n;
|
||||||
if(i==j) a(i,i)+= .5*(i-n*.5);
|
shape[1].offset=0;
|
||||||
}
|
|
||||||
|
shape[2].number=2;
|
||||||
|
shape[2].symmetry=0;
|
||||||
|
shape[2].range=n+2;
|
||||||
|
shape[2].offset=0;
|
||||||
|
|
||||||
|
|
||||||
|
Tensor<double> x(shape); x.randomize(1.);
|
||||||
|
x.defaultnames();
|
||||||
|
cout <<"x= "<<x.shape << " "<<x.names<<endl;
|
||||||
|
|
||||||
|
NRVec<INDEXGROUP> yshape(2);
|
||||||
|
yshape[0].number=1;
|
||||||
|
yshape[0].symmetry=0;
|
||||||
|
yshape[0].range=n;
|
||||||
|
yshape[0].offset=0;
|
||||||
|
|
||||||
|
yshape[1].number=1;
|
||||||
|
yshape[1].symmetry= 0;
|
||||||
|
yshape[1].range=n+3;
|
||||||
|
yshape[1].offset=0;
|
||||||
|
|
||||||
|
Tensor<double> y(yshape); y.randomize(1.);
|
||||||
|
|
||||||
|
INDEX posit(1,0);
|
||||||
|
//Tensor<double> z=x.unwind_index(1,1);
|
||||||
|
//Tensor<double> z=x.contraction(INDEX(1,1),y,INDEX(0,0),1,false,false);
|
||||||
|
Tensor<double> z=x.linear_transform(1,y);
|
||||||
|
|
||||||
|
cout <<z.shape;
|
||||||
|
|
||||||
|
//check
|
||||||
|
|
||||||
NRSMat<double> aa;
|
|
||||||
NRMat<double> vv(n,n);
|
|
||||||
aa=a; diagonalize(aa,rr,&vv);
|
|
||||||
NRVec<double> r(m);
|
|
||||||
NRVec<double> *eivecs = new NRVec<double>[m];
|
|
||||||
double target= 0;
|
|
||||||
cout <<"Exact energies " <<rr<<endl;
|
|
||||||
|
|
||||||
davidson(a,r,eivecs,NULL,m,1,1e-6,1,200,300,(void (*)(LA::NRVec<double>&))NULL,&target);
|
|
||||||
cout <<"Davidson energies " <<r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}//main
|
}//main
|
||||||
|
|||||||
66
tensor.cc
66
tensor.cc
@@ -713,6 +713,9 @@ return q;
|
|||||||
template<typename T>
|
template<typename T>
|
||||||
Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
|
Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
|
||||||
{
|
{
|
||||||
|
if(p.size()!=shape.size()) laerror("permutation size mismatch in permute_index_groups");
|
||||||
|
if(p.is_identity()) return *this;
|
||||||
|
|
||||||
//std::cout <<"permute_index_groups permutation = "<<p<<std::endl;
|
//std::cout <<"permute_index_groups permutation = "<<p<<std::endl;
|
||||||
NRVec<INDEXGROUP> newshape=shape.permuted(p,true);
|
NRVec<INDEXGROUP> newshape=shape.permuted(p,true);
|
||||||
//std::cout <<"permute_index_groups newshape = "<<newshape<<std::endl;
|
//std::cout <<"permute_index_groups newshape = "<<newshape<<std::endl;
|
||||||
@@ -836,6 +839,7 @@ if(group==0 && index==0 && shape[0].symmetry==0) //formal split of 1 index witho
|
|||||||
}
|
}
|
||||||
if(shape[group].number==1) return unwind_index_group(group); //single index in the group
|
if(shape[group].number==1) return unwind_index_group(group); //single index in the group
|
||||||
|
|
||||||
|
|
||||||
//general case - recalculate the shape and allocate the new tensor
|
//general case - recalculate the shape and allocate the new tensor
|
||||||
NRVec<INDEXGROUP> newshape(shape.size()+1);
|
NRVec<INDEXGROUP> newshape(shape.size()+1);
|
||||||
newshape[0].number=1;
|
newshape[0].number=1;
|
||||||
@@ -856,7 +860,10 @@ for(int i=0; i<shape.size(); ++i)
|
|||||||
--newshape[i+1].number;
|
--newshape[i+1].number;
|
||||||
flatindex += index;
|
flatindex += index;
|
||||||
}
|
}
|
||||||
else flatindex += shape[i].number;
|
else
|
||||||
|
{
|
||||||
|
if(i<group) flatindex += shape[i].number;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1840,6 +1847,10 @@ for(int g=shape.size()-1; g>=0; --g)
|
|||||||
Tensor<T> mat(x[g],true); //flat tensor from a matrix
|
Tensor<T> mat(x[g],true); //flat tensor from a matrix
|
||||||
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
|
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
|
||||||
{
|
{
|
||||||
|
#ifdef LA_TENSOR_INDEXPOSITION
|
||||||
|
//what should we do with index position?
|
||||||
|
//either set upperindex of mat appropriately, or request ignoring that in contractions?
|
||||||
|
#endif
|
||||||
r= tmp.contraction(indexposition(rank()-1,tmp.shape),mat,INDEX(0,0),(T)1,false,false); //always the last index
|
r= tmp.contraction(indexposition(rank()-1,tmp.shape),mat,INDEX(0,0),(T)1,false,false); //always the last index
|
||||||
if(i>0)
|
if(i>0)
|
||||||
{
|
{
|
||||||
@@ -1870,6 +1881,59 @@ return r;
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
Tensor<T> Tensor<T>::linear_transform(const int g, const Tensor<T> &x) const
|
||||||
|
{
|
||||||
|
if(g<0||g>=shape.size()) laerror("wrong index group number in linear_transform");
|
||||||
|
if(x.rank()!=2 || x.shape.size()!=2 || x.shape[0].number!=1||x.shape[1].number!=1) laerror("wrong tensor shape for linear_transform");
|
||||||
|
if(x.shape[0].range!=shape[g].range) laerror("index range mismatch in linear_transform");
|
||||||
|
|
||||||
|
Tensor<T> tmp(*this);
|
||||||
|
Tensor<T> r;
|
||||||
|
|
||||||
|
|
||||||
|
//contract all indices in the reverse order
|
||||||
|
int gnow=g;
|
||||||
|
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
|
||||||
|
{
|
||||||
|
r= tmp.contraction(INDEX(gnow,i),x,INDEX(0,0),(T)1,false,false);
|
||||||
|
++gnow; //new group number in r, one index was added left
|
||||||
|
if(i>0)
|
||||||
|
{
|
||||||
|
tmp=r;
|
||||||
|
r.deallocate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//the group's indices are now individual ones leftmost in tensor r, restore group size and symmetry
|
||||||
|
if(shape[g].number>1)
|
||||||
|
{
|
||||||
|
INDEXLIST il(shape[g].number);
|
||||||
|
for(int i=0; i<shape[g].number; ++i)
|
||||||
|
{
|
||||||
|
il[i].group=i;
|
||||||
|
il[i].index=0;
|
||||||
|
}
|
||||||
|
r = r.merge_indices(il,shape[g].symmetry);
|
||||||
|
}
|
||||||
|
|
||||||
|
//permute the group (now 0) to its original position
|
||||||
|
if(g!=0)
|
||||||
|
{
|
||||||
|
NRPerm<int> p(r.shape.size());
|
||||||
|
for(int i=0; i<g; ++i) p[i+1] = (i+1)+1;
|
||||||
|
p[g+1]=1;
|
||||||
|
for(int i=g+1; i<r.shape.size();++i) p[i+1] = i+1;
|
||||||
|
//std::cout<<"perm "<<p;
|
||||||
|
r=r.permute_index_groups(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
//preserve names
|
||||||
|
r.names=names;
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
int Tensor<T>::findflatindex(const INDEXNAME nam) const
|
int Tensor<T>::findflatindex(const INDEXNAME nam) const
|
||||||
|
|||||||
2
tensor.h
2
tensor.h
@@ -421,6 +421,8 @@ public:
|
|||||||
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
|
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
|
||||||
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
|
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
|
||||||
Tensor linear_transform(const NRVec<NRMat<T> > &x) const; //linear transform by a different matrix per each index group, preserving group symmetries
|
Tensor linear_transform(const NRVec<NRMat<T> > &x) const; //linear transform by a different matrix per each index group, preserving group symmetries
|
||||||
|
Tensor linear_transform(const int g, const Tensor<T> &x) const; //linear transform a single group, preserve its position and symmetry, x must be rank-2 flat tensor (this is useful for raising/lowering indices)
|
||||||
|
Tensor linear_transform(const int g, const NRMat<T> &x) const {Tensor<T> mat(x,true); return linear_transform(g,mat);}; //linear transform a single group, preserve its position and symmetry
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user