diff --git a/t.cc b/t.cc index 986eefb..9d0a973 100644 --- a/t.cc +++ b/t.cc @@ -3256,6 +3256,8 @@ for(int i=0; i<4; ++i) } Tensor t(s); t.randomize(1.); +INDEXNAME list[4]={"i","j","k","l"}; +t.names=list; cout < p({3,1,4,2}); Tensor tt=t.permute_index_groups(p); @@ -3269,6 +3271,26 @@ for(int i=0; i s(4); +for(int i=0; i<4; ++i) + { + s[i].number=2; + s[i].symmetry=0; + s[i].offset=0; + s[i].range=n; + } +Tensor t(s); +t.randomize(1.); +INDEXNAME list[8]={"i1","i2","j1","j2","k1","k2","l1","l2"}; +t.names=list; +NRPerm p({3,1,4,2}); +Tensor tt=t.permute_index_groups(p); +cout < e(g); e.randomize(1.); +INDEXNAME list[4]={"i","j","k","l"}; +e.names=list; Tensor eu = e.unwind_index(0,1); Tensor eu2 = e.unwind_index(0,2); Tensor eu3 = e.unwind_index(0,3); @@ -3349,6 +3373,9 @@ ag.range=n; Tensor a(ag); a.randomize(1.); +INDEXNAME alist[4]={"i","j","k","l"}; +a.names=alist; + INDEXGROUP bg; bg.number=3; @@ -3358,14 +3385,24 @@ bg.range=n; Tensor b(bg); b.randomize(1.); +INDEXNAME blist[3]={"a","i","b"}; +b.names=blist; + INDEXLIST il1(1); il1[0]={0,0}; INDEXLIST il2(1); il2[0]={0,1}; +INDEXNAME clist[1]={"i"}; +NRVec cl(clist); Tensor cc = a.contractions(il1,b,il2); -//Tensor cc = a.contraction(0,0,b,0,1); +Tensor ff = a.contractions(b,cl); +Tensor dd = a.contraction(0,0,b,0,1); +Tensor ee = a.contraction(b,"i"); cout <1e-13) laerror("internal error in contraction"); + if(abs(c(m,l,k,j,i)-cc(m,l,k,j,i))>1e-13) laerror("internal error in contractions"); + if(abs(c(m,l,k,j,i)-dd(m,l,k,j,i))>1e-13) laerror("internal error in contraction"); + if(abs(c(m,l,k,j,i)-ee(m,l,k,j,i))>1e-13) laerror("internal error in named contraction"); + if(abs(c(m,l,k,j,i)-ff(m,l,k,j,i))>1e-13) laerror("internal error in named contractions"); } //cout < e(g); e.randomize(1.); +INDEXNAME list[4]={"i","j","k","l"}; +e.names=list; + + INDEXLIST il(2); il[0]= {0,1}; il[1]= {0,3}; @@ -3893,9 +3937,9 @@ INDEXGROUP shape; } Tensor t(shape); t.clear(); -INDEXNAME list[3]={"i1","i2","i3"}; -t.names=list; -//t.names= {"i1","i2","i3"}; does not compile +//INDEXNAME list[3]={"i1","i2","i3"}; t.names=list; +//t.names= NRVec({"i1","i2","i3"}); //does not compile +t.defaultnames(); for(int i=0; i->data[target] = *v; } +//permutation of individual indices from permutation of index groups +NRPerm group2flat_perm(const NRVec &shape, const NRPerm &p) +{ +int rank=0; +for(int i=0; i q(rank); +int ii=1; +for(int i=0; i Tensor Tensor::permute_index_groups(const NRPerm &p) const @@ -668,6 +687,11 @@ Tensor Tensor::permute_index_groups(const NRPerm &p) const NRVec newshape=shape.permuted(p,true); //std::cout <<"permute_index_groups newshape = "< r(newshape); +if(is_named()) + { + NRPerm q=group2flat_perm(shape,p); + r.names = names.permuted(q,true); + } //prepare statics for the callback help_p = &p; @@ -773,6 +797,7 @@ for(int i=0; i(nn,mm,kk,&data[0],&u.data[0], &rhsu.data[0],alpha,beta,conjugate); } + template void Tensor::addcontractions(const Tensor &rhs1, const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha, T beta, bool doresize, bool conjugate1, bool conjugate2) { @@ -1125,15 +1178,29 @@ int ii=0; for(int i=il1.size(); i newnames; +if(u.is_named() && rhsu.is_named()) + { + for(int i=0; i r = permute_index_groups(p); -r.merge_adjacent_index_groups(0,groups.size()-1); +Tensor r = permute_index_groups(p); //takes care of names permutation too +r.merge_adjacent_index_groups(0,groups.size()-1); //flat names invariant return r; } @@ -1437,7 +1504,7 @@ int Tensor::findflatindex(const INDEXNAME nam) const if(!is_named()) laerror("tensor indices were not named"); for(int i=0; i +NRVec Tensor::findindexlist(const NRVec &names) const +{ +int n=names.size(); +NRVec ind(n); +for(int i=0; i > //leftmost index is least significant (changing fastest) in the storage order @@ -39,9 +40,6 @@ #include "miscfunc.h" //TODO: -//@@@!!!!!! - implement index names and their treatment in all operations which permute indices -//@@@implement contractions, unwinding etc. by named index list -// //@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result //@@@ will need to store vector of INDEX to the original tensor for the result's flatindex //@@@ will not be particularly efficient @@ -49,7 +47,7 @@ //maybe optional negative range for beta spin handling in some cases of fourindex-tensor conversions // //@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback -// +//@@@? apply_permutation_algebra if result should be symmetric/antisymmetric in such a way to compute only the nonredundant part // @@ -90,6 +88,11 @@ typedef int LA_largeindex; #define N_INDEXNAME 8 struct INDEXNAME { char name[N_INDEXNAME]; + INDEXNAME() {}; + INDEXNAME(const char *n) {strncpy(name,n,N_INDEXNAME);}; + INDEXNAME & operator=(const INDEXNAME &rhs) {strncpy(name,rhs.name,N_INDEXNAME); return *this;}; + bool operator==(const INDEXNAME &rhs) const {return 0==strncmp(name,rhs.name,N_INDEXNAME);}; + bool operator!=(const INDEXNAME &rhs) const {return !(rhs == *this);}; }; template<> class LA_traits { @@ -184,7 +187,7 @@ public: //constructors Tensor() : myrank(0) {}; Tensor(const NRVec &s) : shape(s) { data.resize(calcsize()); calcrank();}; //general tensor - Tensor(const NRVec &s, const NRVec &newnames) : shape(s), names(newnames) { data.resize(calcsize()); calcrank(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //general tensor + Tensor(const NRVec &s, const NRVec &newnames) : shape(s), names(newnames) { data.resize(calcsize()); calcrank(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //general tensor Tensor(const NRVec &s, const NRVec &mydata) : shape(s) { LA_largeindex dsize=calcsize(); calcrank(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata;} Tensor(const NRVec &s, const NRVec &mydata, const NRVec &newnames) : shape(s), names(newnames) { LA_largeindex dsize=calcsize(); calcrank(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata; if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");} Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank();}; //tensor with a single index group @@ -204,6 +207,7 @@ public: bool is_compressed() const {for(int i=0; i1&&shape[i].symmetry!=0) return true; return false;}; bool has_symmetry() const {for(int i=0; i findindexlist(const NRVec &names) const; inline Tensor& operator+=(const Tensor &rhs) { @@ -268,16 +273,27 @@ public: void grouploopover(void (*callback)(const GROUPINDEX &, T *)); //loop over all elements disregarding the internal structure of index groups void constgrouploopover(void (*callback)(const GROUPINDEX &, const T *)) const; //loop over all elements disregarding the internal structure of index groups -//@@@@@@@@@implement names treatment in the following Tensor permute_index_groups(const NRPerm &p) const; //rearrange the tensor storage permuting index groups as a whole + Tensor unwind_index(int group, int index) const; //separate an index from a group and expand it to full range as the least significant one (the leftmost one) Tensor unwind_index(const INDEX &I) const {return unwind_index(I.group,I.index);}; + Tensor unwind_index(const INDEXNAME &N) const {return unwind_index(findindex(N));}; + Tensor unwind_indices(const INDEXLIST &il) const; //the same for a list of indices + Tensor unwind_indices(const NRVec &names) const {return unwind_indices(findindexlist(names));}; + void addcontraction(const Tensor &rhs1, int group, int index, const Tensor &rhs2, int rhsgroup, int rhsindex, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false); //rhs1 will have more significant non-contracted indices in the result than rhs2 + inline void addcontraction(const Tensor &rhs1, const INDEX &I1, const Tensor &rhs2, const INDEX &I2, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false) {addcontraction(rhs1, I1.group, I1.index, rhs2, I2.group, I2.index, alpha, beta, doresize, conjugate, conjugate);}; + inline void addcontraction(const Tensor &rhs1, const Tensor &rhs2, const INDEXNAME &iname, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false) {addcontraction(rhs1, rhs1.findindex(iname), rhs2, rhs2.findindex(iname), alpha, beta, doresize, conjugate, conjugate);}; + inline Tensor contraction(int group, int index, const Tensor &rhs, int rhsgroup, int rhsindex, T alpha=1, bool conjugate1=false, bool conjugate=false) const {Tensor r; r.addcontraction(*this,group,index,rhs,rhsgroup,rhsindex,alpha,0,true, conjugate1, conjugate); return r; }; + inline Tensor contraction(const INDEX &I, const Tensor &rhs, const INDEX &RHSI, T alpha=1, bool conjugate1=false, bool conjugate=false) const {return contraction(I.group,I.index, rhs, RHSI.group, RHSI.index,alpha, conjugate1, conjugate);}; + inline Tensor contraction(const Tensor &rhs, const INDEXNAME &iname, T alpha=1, bool conjugate1=false, bool conjugate=false) const {return contraction(findindex(iname),rhs,rhs.findindex(iname),alpha, conjugate1, conjugate);}; void addcontractions(const Tensor &rhs1, const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate2=false); + inline void addcontractions(const Tensor &rhs1, const Tensor &rhs2, const NRVec &names, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate2=false) {addcontractions(rhs1, rhs1.findindexlist(names), rhs2, rhs2.findindexlist(names), alpha, beta, doresize, conjugate1,conjugate2);}; inline Tensor contractions( const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha=1, bool conjugate1=false, bool conjugate2=false) const {Tensor r; r.addcontractions(*this,il1,rhs2,il2,alpha,0,true,conjugate1, conjugate2); return r; }; + inline Tensor contractions(const Tensor &rhs2, const NRVec names, T alpha=1, bool conjugate1=false, bool conjugate2=false) const {return contractions(findindexlist(names),rhs2,rhs2.findindexlist(names),alpha,conjugate1,conjugate2); }; void apply_permutation_algebra(const Tensor &rhs, const PermutationAlgebra &pa, bool inverse=false, T alpha=1, T beta=0); //general (not optimally efficient) symmetrizers, antisymmetrizers etc. acting on the flattened index list: void apply_permutation_algebra(const NRVec &rhsvec, const PermutationAlgebra &pa, bool inverse=false, T alpha=1, T beta=0); //avoids explicit outer product but not vectorized, rather inefficient @@ -289,8 +305,10 @@ public: void split_index_group(int group); //formal in-place split of a non-symmetric index group WITHOUT the need for data reorganization or names rearrangement void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups WITHOUT the need for data reorganization or names rearrangement - Tensor merge_index_groups(const NRVec &groups) const; - Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them + + Tensor merge_index_groups(const NRVec &groups) const; + Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them, leaving flat index order the same + NRVec > Tucker(typename LA_traits::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened Tensor inverseTucker(const NRVec > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker };