From cd09d93c27e736f2957e4532b70c34a53008a09b Mon Sep 17 00:00:00 2001 From: Jiri Pittner Date: Thu, 23 Oct 2025 15:29:48 +0200 Subject: [PATCH] Tucked tested on compressed tensors, flattening implemented --- t.cc | 59 +++++++++++++++++++++++++++++++++++- tensor.cc | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++---- tensor.h | 11 +++++-- 3 files changed, 151 insertions(+), 10 deletions(-) diff --git a/t.cc b/t.cc index fa8e767..0a53081 100644 --- a/t.cc +++ b/t.cc @@ -3619,7 +3619,7 @@ cout << "Error "<<(u*sdiag*vt-abak).norm()< y = x.inverseTucker(dec,inv); +cout <<"invTucker\n"< y = x.inverseTucker(dec,inv); +cout <<"invTucker\n"< x1=x0.flatten(); +cout <<"Error = "<<(x1-y).norm()< void Tensor::grouploopover(void (*callback)(const GROUPINDEX &, T *)) { GROUPINDEX I(shape.size()); -T *pp=&data[0]; +T *pp= &data[0]; loopovergroups(*this,shape.size()-1,&pp,I,callback); } @@ -649,7 +649,7 @@ for(int i=0; i)(J); //rhs operator() generates the redundant elements for the unwinded lhs tensor +} +// + + +template +Tensor Tensor::flatten(int group) const +{ +if(group>=shape.size()) laerror("too high group number in flatten"); +if(is_flat()) return *this; +if(group>=0) //single group + { + if(shape[group].number==1) return *this; + if(shape[group].symmetry==0) + { + Tensor r(*this); + r.split_index_group(group); + return r; + } + } +if(group<0 && !is_compressed()) + { + Tensor r(*this); + for(int g=0; g1) r.split_index_group(g); + return r; + } + +//general case +int newsize; +if(group<0) newsize=rank(); +else newsize=shape.size()+shape[group].number-1; + +//build new shape +NRVec newshape(newsize); +int gg=0; +for(int g=0; g1) //flatten this group + { + for(int i=0; i newshape(shape.size()+shape[group].number-1); int gg=0; @@ -1133,16 +1206,19 @@ for(int i=0; i umnew; + //std::cout <<"TEST "<; template class Tensor >; template std::ostream & operator<<(std::ostream &s, const Tensor &x); diff --git a/tensor.h b/tensor.h index 87c8dd0..83becb9 100644 --- a/tensor.h +++ b/tensor.h @@ -40,14 +40,16 @@ //TODO: //@@@!!!!!! - implement index names and contractions, unwinding etc. by named index list +//@@@index names flat or in groups ? // //@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result //@@@ will need to store vector of INDEX to the original tensor for the result's flatindex //@@@ will not be particularly efficient // -//@@@conversions to/from fourindex +//@@@conversions to/from fourindex, optional negarive rande for beta spin handling +//@@@ optional distinguish covariant and contravariant check in contraction // -//@@@!!!!!!!!!!!const loopover and grouploopover +//maybe const loopover and grouploopover to avoid problems with shallowly copied tensors // //@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback // @@ -162,6 +164,7 @@ public: NRMat matrix() const {return NRMat(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index) bool is_flat() const {for(int i=0; i1) return false; return true;}; + bool is_compressed() const {for(int i=0; i1&&shape[i].symmetry!=0) return true; return false;}; void clear() {data.clear();}; int rank() const {return myrank;}; int calcrank(); //is computed from shape @@ -240,9 +243,11 @@ public: // Note that *this tensor can be e.g. antisymmetric while rhs is not and is being antisymmetrized by the PermutationAlgebra // The efficiency is not optimal, even when avoiding the outer product, the calculation is done indexing element by element // More efficient would be applying permutation algebra symbolically and efficiently computing term by term - void split_index_group(int group); //formal split of a non-symmetric index group WITHOUT the need for data reorganization + + void split_index_group(int group); //formal in-place split of a non-symmetric index group WITHOUT the need for data reorganization void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups WITHOUT the need for data reorganization Tensor merge_index_groups(const NRVec &groups) const; + Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them NRVec > Tucker(typename LA_traits::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened Tensor inverseTucker(const NRVec > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker };