small improvements in tensor/tucker

This commit is contained in:
Jiri Pittner 2025-10-23 17:29:09 +02:00
parent 4bd2761cc5
commit 346ce5bc3a
3 changed files with 34 additions and 8 deletions

2
t.cc
View File

@ -3683,7 +3683,7 @@ cin>>r>>n;
INDEXGROUP shape; INDEXGROUP shape;
{ {
shape.number=r; shape.number=r;
shape.symmetry= -1; shape.symmetry= 1;
shape.range=n; shape.range=n;
shape.offset=0; shape.offset=0;
} }

View File

@ -802,10 +802,30 @@ template<typename T>
Tensor<T> Tensor<T>::flatten(int group) const Tensor<T> Tensor<T>::flatten(int group) const
{ {
if(group>=shape.size()) laerror("too high group number in flatten"); if(group>=shape.size()) laerror("too high group number in flatten");
if(is_flat()) return *this; if(is_flat())
{
if(has_symmetry()) //get rid of formal symemtry
{
Tensor<T> r(*this);
r.shape.copyonwrite();
for(int g=0; g<r.shape.size(); ++g) r.shape[g].symmetry=0;
return r;
}
else
return *this;
}
if(group>=0) //single group if(group>=0) //single group
{ {
if(shape[group].number==1) return *this; if(shape[group].number==1)
{
if(shape[group].symmetry==0) return *this;
else
{
Tensor<T> r(*this);
r.shape[group].symmetry=0;
return r;
}
}
if(shape[group].symmetry==0) if(shape[group].symmetry==0)
{ {
Tensor<T> r(*this); Tensor<T> r(*this);
@ -816,7 +836,11 @@ if(group>=0) //single group
if(group<0 && !is_compressed()) if(group<0 && !is_compressed())
{ {
Tensor<T> r(*this); Tensor<T> r(*this);
for(int g=0; g<shape.size(); ++g) if(shape[g].number>1) r.split_index_group(g); for(int g=0; g<shape.size(); ++g)
{
if(shape[g].number>1) r.split_index_group(g);
}
for(int g=0; g<r.shape.size(); ++g) r.shape[g].symmetry=0;
return r; return r;
} }
@ -1285,7 +1309,7 @@ return r;
//NOTE: Tucker of rank=2 is inherently inefficient - result is a diagonal tensor stored in full and 2 calls to SVD //NOTE: Tucker of rank=2 is inherently inefficient - result is a diagonal tensor stored in full and 2 calls to SVD
//we could avoid the second SVD, but the wasteful storage and erconstruction would remain //we could avoid the second SVD, but the wasteful storage and reconstruction would remain
// //
template<typename T> template<typename T>
NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr, bool inverseorder) NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr, bool inverseorder)
@ -1314,9 +1338,9 @@ for(int i=0; i<r; ++i)
NRMat<T> um; NRMat<T> um;
NRVec<indexgroup> ushape; NRVec<indexgroup> ushape;
{ {
Tensor<T> u=unwind_index(I); Tensor<T> uu=unwind_index(I);
ushape=u.shape; ushape.copyonwrite(); ushape=uu.shape; //ushape.copyonwrite(); should not be needed
um=u.matrix(); um=uu.matrix();
} }
int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols()); NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols());

View File

@ -47,6 +47,7 @@
//@@@ will not be particularly efficient //@@@ will not be particularly efficient
// //
//@@@conversions to/from fourindex, optional negarive range for beta spin handling //@@@conversions to/from fourindex, optional negarive range for beta spin handling
//@@@use the fact that fourindex_dense is inherited from Mat/SMat and construct tensor from the (unsymmetrized) NRMat sharing data, just rewrite then the shape
// //
//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback //@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
// //
@ -173,6 +174,7 @@ public:
bool is_flat() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1) return false; return true;}; bool is_flat() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1) return false; return true;};
bool is_compressed() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1&&shape[i].symmetry!=0) return true; return false;}; bool is_compressed() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1&&shape[i].symmetry!=0) return true; return false;};
bool has_symmetry() const {for(int i=0; i<shape.size(); ++i) if(shape[i].symmetry!=0) return true; return false;};
void clear() {data.clear();}; void clear() {data.clear();};
int rank() const {return myrank;}; int rank() const {return myrank;};
int calcrank(); //is computed from shape int calcrank(); //is computed from shape