simplified (inverse)Tucker for non-iverting index order

This commit is contained in:
2025-11-20 16:00:53 +01:00
parent 54642a71cc
commit a342032b58
3 changed files with 55 additions and 22 deletions

31
t.cc
View File

@@ -3733,6 +3733,7 @@ INDEXGROUP shape;
{ {
shape.number=r; shape.number=r;
shape.symmetry= 1; shape.symmetry= 1;
//shape.symmetry= -1;
shape.range=n; shape.range=n;
shape.offset=0; shape.offset=0;
} }
@@ -4392,7 +4393,7 @@ cout <<"Error = "<<(z-zzz).norm()<<endl;
} }
if(1) if(0)
{ {
//for profiling and timing //for profiling and timing
int nn; int nn;
@@ -4423,5 +4424,33 @@ cout <<z.norm()<<endl;
} }
if(1)
{
//tucker test order
int r,n;
bool inv;
cin>>r>>n>>inv;
NRVec<INDEXGROUP> shape(r);
for(int i=0; i<r; ++i)
{
shape[i].number=1;
shape[i].symmetry=0;
shape[i].range=n+i;
shape[i].offset=0;
}
Tensor<double> x(shape);
x.randomize(1.);
cout<<x;
Tensor<double> x0(x);
x0.copyonwrite();
NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
cout<<"Tucker\n"<<x<<endl;
cout<<dec;
Tensor<double> y = x.inverseTucker(dec,inv);
cout <<"invTucker\n"<<y;
cout <<"Error = "<<(x0-y).norm()<<endl;
}
}//main }//main

View File

@@ -1722,14 +1722,15 @@ if(r==1) //create an analogous output for the trivial case
} }
//loop over all indices; relies on the fact that unwinding does not change order of remaining indices //loop over all indices; relies on the fact that unwinding does not change order of remaining indices
//for inverseorder=false, to avoid inverting order by permute_index_groups we repeatedly unwind the LAST index, and all indices rotate at this position with the first one in the last iteration due to the unwind!
for(int i=0; i<r; ++i) for(int i=0; i<r; ++i)
{ {
INDEX I=indexposition(i,shape); INDEX I= inverseorder? indexposition(i,shape) : indexposition(r-1,shape);
NRMat<T> um; NRMat<T> um;
NRVec<INDEXGROUP> ushape; NRVec<INDEXGROUP> ushape;
{ {
Tensor<T> uu=unwind_index(I); Tensor<T> uu=unwind_index(I);
ushape=uu.shape; //ushape.copyonwrite(); should not be needed ushape=uu.shape;
um=uu.matrix(); um=uu.matrix();
} }
int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
@@ -1755,9 +1756,10 @@ for(int i=0; i<r; ++i)
umnew=u.submatrix(0,umnr-1,0,preserve-1); umnew=u.submatrix(0,umnr-1,0,preserve-1);
} }
else umnew=u; else umnew=u;
ret[(inverseorder? r-i-1 : i)]=vt.transpose(true); ret[r-i-1]=vt.transpose(true);
umnew.diagmultr(w); umnew.diagmultr(w);
//rebuild tensor of the preserved shape from matrix //rebuild tensor of the preserved shape from matrix
ushape.copyonwrite();
ushape[0].range=preserve; ushape[0].range=preserve;
{ {
NRVec<T> newdata(umnew); NRVec<T> newdata(umnew);
@@ -1766,17 +1768,10 @@ for(int i=0; i<r; ++i)
} }
} }
if(!is_flat()) laerror("this should not happen"); if(!is_flat()) laerror("this should not happen");
if(!inverseorder)
{
NRPerm<int> p(r);
for(int i=1; i<=r; ++i) p[r-i+1]=i;
*this = permute_index_groups(p);
}
return ret; return ret;
} }
template<typename T> template<typename T>
Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const
{ {
@@ -1784,9 +1779,11 @@ if(rank()!=x.size()) laerror("input of inverseTucker does not match rank");
Tensor<T> tmp(*this); Tensor<T> tmp(*this);
Tensor<T> r; Tensor<T> r;
if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker"); if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker");
if(inverseorder)
{
for(int i=0; i<rank(); ++i) for(int i=0; i<rank(); ++i)
{ {
Tensor<T> mat(x[i],true); Tensor<T> mat(x[i],true); //flat tensor from a matrix
r= tmp.contraction(i,0,mat,0,0,(T)1,false,false); r= tmp.contraction(i,0,mat,0,0,(T)1,false,false);
if(i<rank()-1) if(i<rank()-1)
{ {
@@ -1794,14 +1791,21 @@ for(int i=0; i<rank(); ++i)
r.deallocate(); r.deallocate();
} }
} }
if(!inverseorder) }
else //not inverseroder
{
for(int i=rank()-1; i>=0; --i)
{ {
NRPerm<int> p(r.rank()); Tensor<T> mat(x[i],true); //flat tensor from a matrix
for(int i=1; i<=r.rank(); ++i) p[r.rank()-i+1]=i; r= tmp.contraction(rank()-1,0,mat,0,0,(T)1,false,false); //the current index will be the last after previous contractions
return r.permute_index_groups(p); if(i>0)
{
tmp=r;
r.deallocate();
}
} }
else }
return r; return r;
} }
@@ -2116,7 +2120,7 @@ NRVec<NRVec_from1<int> > antigroups;
parse_antisymmetrizer(antisymmetrizer,antigroups,antinames); parse_antisymmetrizer(antisymmetrizer,antigroups,antinames);
//check the names make sense and fill in the possibly missing ones as separate group //check the names make sense and fill in the possibly missing ones as separate group
if(antinames.size()>tmpnames.size()) laerror("too many indices in the antisymmetrizet"); if(antinames.size()>tmpnames.size()) laerror("too many indices in the antisymmetrizer");
bitvector isexplicit(tmpnames.size()); bitvector isexplicit(tmpnames.size());
isexplicit.clear(); isexplicit.clear();
for(int i=0; i<antinames.size(); ++i) for(int i=0; i<antinames.size(); ++i)

View File

@@ -248,7 +248,7 @@ public:
explicit Tensor(const NRVec<T> &x); explicit Tensor(const NRVec<T> &x);
explicit Tensor(const NRMat<T> &x, bool flat=false); explicit Tensor(const NRMat<T> &x, bool flat=false);
explicit Tensor(const NRSMat<T> &x); explicit Tensor(const NRSMat<T> &x);
NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index) NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index) //@@@generalize to column index being n leftmost tensor index groups
bool is_named() const {if(names.size()==0) return false; if(names.size()!=myrank) laerror("bad number of index names"); return true;}; bool is_named() const {if(names.size()==0) return false; if(names.size()!=myrank) laerror("bad number of index names"); return true;};
bool is_uniquely_named() const; //no repeated names bool is_uniquely_named() const; //no repeated names
@@ -399,8 +399,8 @@ public:
Tensor merge_indices(const INDEXLIST &il, int symmetry=0) const; //opposite to flatten (merging with optional symmetrization/antisymmetrization and compression) Tensor merge_indices(const INDEXLIST &il, int symmetry=0) const; //opposite to flatten (merging with optional symmetrization/antisymmetrization and compression)
Tensor merge_indices(const NRVec<INDEXNAME> &nl, int symmetry=0) const {return merge_indices(findindexlist(nl),symmetry);}; Tensor merge_indices(const NRVec<INDEXNAME> &nl, int symmetry=0) const {return merge_indices(findindexlist(nl),symmetry);};
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
}; };