simplified (inverse)Tucker for non-iverting index order
This commit is contained in:
31
t.cc
31
t.cc
@@ -3733,6 +3733,7 @@ INDEXGROUP shape;
|
||||
{
|
||||
shape.number=r;
|
||||
shape.symmetry= 1;
|
||||
//shape.symmetry= -1;
|
||||
shape.range=n;
|
||||
shape.offset=0;
|
||||
}
|
||||
@@ -4392,7 +4393,7 @@ cout <<"Error = "<<(z-zzz).norm()<<endl;
|
||||
|
||||
}
|
||||
|
||||
if(1)
|
||||
if(0)
|
||||
{
|
||||
//for profiling and timing
|
||||
int nn;
|
||||
@@ -4423,5 +4424,33 @@ cout <<z.norm()<<endl;
|
||||
|
||||
}
|
||||
|
||||
if(1)
|
||||
{
|
||||
//tucker test order
|
||||
int r,n;
|
||||
bool inv;
|
||||
cin>>r>>n>>inv;
|
||||
NRVec<INDEXGROUP> shape(r);
|
||||
for(int i=0; i<r; ++i)
|
||||
{
|
||||
shape[i].number=1;
|
||||
shape[i].symmetry=0;
|
||||
shape[i].range=n+i;
|
||||
shape[i].offset=0;
|
||||
}
|
||||
Tensor<double> x(shape);
|
||||
x.randomize(1.);
|
||||
cout<<x;
|
||||
Tensor<double> x0(x);
|
||||
x0.copyonwrite();
|
||||
NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
|
||||
cout<<"Tucker\n"<<x<<endl;
|
||||
cout<<dec;
|
||||
|
||||
Tensor<double> y = x.inverseTucker(dec,inv);
|
||||
cout <<"invTucker\n"<<y;
|
||||
cout <<"Error = "<<(x0-y).norm()<<endl;
|
||||
}
|
||||
|
||||
|
||||
}//main
|
||||
|
||||
40
tensor.cc
40
tensor.cc
@@ -1722,14 +1722,15 @@ if(r==1) //create an analogous output for the trivial case
|
||||
}
|
||||
|
||||
//loop over all indices; relies on the fact that unwinding does not change order of remaining indices
|
||||
//for inverseorder=false, to avoid inverting order by permute_index_groups we repeatedly unwind the LAST index, and all indices rotate at this position with the first one in the last iteration due to the unwind!
|
||||
for(int i=0; i<r; ++i)
|
||||
{
|
||||
INDEX I=indexposition(i,shape);
|
||||
INDEX I= inverseorder? indexposition(i,shape) : indexposition(r-1,shape);
|
||||
NRMat<T> um;
|
||||
NRVec<INDEXGROUP> ushape;
|
||||
{
|
||||
Tensor<T> uu=unwind_index(I);
|
||||
ushape=uu.shape; //ushape.copyonwrite(); should not be needed
|
||||
ushape=uu.shape;
|
||||
um=uu.matrix();
|
||||
}
|
||||
int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
|
||||
@@ -1755,9 +1756,10 @@ for(int i=0; i<r; ++i)
|
||||
umnew=u.submatrix(0,umnr-1,0,preserve-1);
|
||||
}
|
||||
else umnew=u;
|
||||
ret[(inverseorder? r-i-1 : i)]=vt.transpose(true);
|
||||
ret[r-i-1]=vt.transpose(true);
|
||||
umnew.diagmultr(w);
|
||||
//rebuild tensor of the preserved shape from matrix
|
||||
ushape.copyonwrite();
|
||||
ushape[0].range=preserve;
|
||||
{
|
||||
NRVec<T> newdata(umnew);
|
||||
@@ -1766,17 +1768,10 @@ for(int i=0; i<r; ++i)
|
||||
}
|
||||
}
|
||||
if(!is_flat()) laerror("this should not happen");
|
||||
if(!inverseorder)
|
||||
{
|
||||
NRPerm<int> p(r);
|
||||
for(int i=1; i<=r; ++i) p[r-i+1]=i;
|
||||
*this = permute_index_groups(p);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
template<typename T>
|
||||
Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const
|
||||
{
|
||||
@@ -1784,9 +1779,11 @@ if(rank()!=x.size()) laerror("input of inverseTucker does not match rank");
|
||||
Tensor<T> tmp(*this);
|
||||
Tensor<T> r;
|
||||
if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker");
|
||||
if(inverseorder)
|
||||
{
|
||||
for(int i=0; i<rank(); ++i)
|
||||
{
|
||||
Tensor<T> mat(x[i],true);
|
||||
Tensor<T> mat(x[i],true); //flat tensor from a matrix
|
||||
r= tmp.contraction(i,0,mat,0,0,(T)1,false,false);
|
||||
if(i<rank()-1)
|
||||
{
|
||||
@@ -1794,14 +1791,21 @@ for(int i=0; i<rank(); ++i)
|
||||
r.deallocate();
|
||||
}
|
||||
}
|
||||
if(!inverseorder)
|
||||
}
|
||||
else //not inverseroder
|
||||
{
|
||||
for(int i=rank()-1; i>=0; --i)
|
||||
{
|
||||
NRPerm<int> p(r.rank());
|
||||
for(int i=1; i<=r.rank(); ++i) p[r.rank()-i+1]=i;
|
||||
return r.permute_index_groups(p);
|
||||
Tensor<T> mat(x[i],true); //flat tensor from a matrix
|
||||
r= tmp.contraction(rank()-1,0,mat,0,0,(T)1,false,false); //the current index will be the last after previous contractions
|
||||
if(i>0)
|
||||
{
|
||||
tmp=r;
|
||||
r.deallocate();
|
||||
}
|
||||
else
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -2116,7 +2120,7 @@ NRVec<NRVec_from1<int> > antigroups;
|
||||
parse_antisymmetrizer(antisymmetrizer,antigroups,antinames);
|
||||
|
||||
//check the names make sense and fill in the possibly missing ones as separate group
|
||||
if(antinames.size()>tmpnames.size()) laerror("too many indices in the antisymmetrizet");
|
||||
if(antinames.size()>tmpnames.size()) laerror("too many indices in the antisymmetrizer");
|
||||
bitvector isexplicit(tmpnames.size());
|
||||
isexplicit.clear();
|
||||
for(int i=0; i<antinames.size(); ++i)
|
||||
|
||||
6
tensor.h
6
tensor.h
@@ -248,7 +248,7 @@ public:
|
||||
explicit Tensor(const NRVec<T> &x);
|
||||
explicit Tensor(const NRMat<T> &x, bool flat=false);
|
||||
explicit Tensor(const NRSMat<T> &x);
|
||||
NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index)
|
||||
NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index) //@@@generalize to column index being n leftmost tensor index groups
|
||||
|
||||
bool is_named() const {if(names.size()==0) return false; if(names.size()!=myrank) laerror("bad number of index names"); return true;};
|
||||
bool is_uniquely_named() const; //no repeated names
|
||||
@@ -399,8 +399,8 @@ public:
|
||||
Tensor merge_indices(const INDEXLIST &il, int symmetry=0) const; //opposite to flatten (merging with optional symmetrization/antisymmetrization and compression)
|
||||
Tensor merge_indices(const NRVec<INDEXNAME> &nl, int symmetry=0) const {return merge_indices(findindexlist(nl),symmetry);};
|
||||
|
||||
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
|
||||
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
|
||||
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
|
||||
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
|
||||
};
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user