tensor: implemented index names

This commit is contained in:
Jiri Pittner 2025-11-05 16:09:29 +01:00
parent 7941b7a7e2
commit 5e4c4dddea
3 changed files with 156 additions and 19 deletions

54
t.cc
View File

@ -3256,6 +3256,8 @@ for(int i=0; i<4; ++i)
}
Tensor<double> t(s);
t.randomize(1.);
INDEXNAME list[4]={"i","j","k","l"};
t.names=list;
cout <<t;
NRPerm<int> p({3,1,4,2});
Tensor<double> tt=t.permute_index_groups(p);
@ -3269,6 +3271,26 @@ for(int i=0; i<n; ++i)
}
}
if(0)
{
int n=3;
NRVec<INDEXGROUP> s(4);
for(int i=0; i<4; ++i)
{
s[i].number=2;
s[i].symmetry=0;
s[i].offset=0;
s[i].range=n;
}
Tensor<double> t(s);
t.randomize(1.);
INDEXNAME list[8]={"i1","i2","j1","j2","k1","k2","l1","l2"};
t.names=list;
NRPerm<int> p({3,1,4,2});
Tensor<double> tt=t.permute_index_groups(p);
cout <<tt;
}
if(0)
{
@ -3283,6 +3305,8 @@ g.range=n;
Tensor<double> e(g);
e.randomize(1.);
INDEXNAME list[4]={"i","j","k","l"};
e.names=list;
Tensor<double> eu = e.unwind_index(0,1);
Tensor<double> eu2 = e.unwind_index(0,2);
Tensor<double> eu3 = e.unwind_index(0,3);
@ -3349,6 +3373,9 @@ ag.range=n;
Tensor<double> a(ag);
a.randomize(1.);
INDEXNAME alist[4]={"i","j","k","l"};
a.names=alist;
INDEXGROUP bg;
bg.number=3;
@ -3358,14 +3385,24 @@ bg.range=n;
Tensor<double> b(bg);
b.randomize(1.);
INDEXNAME blist[3]={"a","i","b"};
b.names=blist;
INDEXLIST il1(1);
il1[0]={0,0};
INDEXLIST il2(1);
il2[0]={0,1};
INDEXNAME clist[1]={"i"};
NRVec<INDEXNAME> cl(clist);
Tensor<double> cc = a.contractions(il1,b,il2);
//Tensor<double> cc = a.contraction(0,0,b,0,1);
Tensor<double> ff = a.contractions(b,cl);
Tensor<double> dd = a.contraction(0,0,b,0,1);
Tensor<double> ee = a.contraction(b,"i");
cout <<cc;
cout <<dd;
cout <<ee;
cout <<ff;
INDEXGROUP cga;
cga.number=3;
@ -3392,7 +3429,10 @@ for(int i=0; i<n; ++i)
{
for(int p=0; p<n; ++p)
c.lhs(m,l,k,j,i) += a(p,i,j,k) * b(m,p,l);
if(abs(c(m,l,k,j,i)-cc(m,l,k,j,i))>1e-13) laerror("internal error in contraction");
if(abs(c(m,l,k,j,i)-cc(m,l,k,j,i))>1e-13) laerror("internal error in contractions");
if(abs(c(m,l,k,j,i)-dd(m,l,k,j,i))>1e-13) laerror("internal error in contraction");
if(abs(c(m,l,k,j,i)-ee(m,l,k,j,i))>1e-13) laerror("internal error in named contraction");
if(abs(c(m,l,k,j,i)-ff(m,l,k,j,i))>1e-13) laerror("internal error in named contractions");
}
//cout <<c;
@ -3411,6 +3451,10 @@ g.range=n;
Tensor<double> e(g);
e.randomize(1.);
INDEXNAME list[4]={"i","j","k","l"};
e.names=list;
INDEXLIST il(2);
il[0]= {0,1};
il[1]= {0,3};
@ -3893,9 +3937,9 @@ INDEXGROUP shape;
}
Tensor<double> t(shape);
t.clear();
INDEXNAME list[3]={"i1","i2","i3"};
t.names=list;
//t.names= {"i1","i2","i3"}; does not compile
//INDEXNAME list[3]={"i1","i2","i3"}; t.names=list;
//t.names= NRVec<INDEXNAME>({"i1","i2","i3"}); //does not compile
t.defaultnames();
for(int i=0; i<n; ++i)
for(int j=0; j<n; ++j)
for(int k=0; k<n; ++k)

View File

@ -72,6 +72,7 @@ for(int i=0; i<shape.size(); ++i)
break;
}
}
if(s==0) laerror("empty tensor - perhaps antisymmetric in dim<rank");
return s;
}
@ -660,6 +661,24 @@ help_t<T>->data[target] = *v;
}
//permutation of individual indices from permutation of index groups
NRPerm<int> group2flat_perm(const NRVec<indexgroup> &shape, const NRPerm<int> &p)
{
int rank=0;
for(int i=0; i<shape.size(); ++i) rank+=shape[i].number;
NRPerm<int> q(rank);
int ii=1;
for(int i=0; i<shape.size(); ++i)
{
int selgroup=p[i+1]-1;
for(int j=0; j<shape[selgroup].number; ++j)
{
q[ii++] = 1+flatposition(selgroup,j,shape);
}
}
return q;
}
template<typename T>
Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
@ -668,6 +687,11 @@ Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
NRVec<indexgroup> newshape=shape.permuted(p,true);
//std::cout <<"permute_index_groups newshape = "<<newshape<<std::endl;
Tensor<T> r(newshape);
if(is_named())
{
NRPerm<int> q=group2flat_perm(shape,p);
r.names = names.permuted(q,true);
}
//prepare statics for the callback
help_p = &p;
@ -773,6 +797,7 @@ for(int i=0; i<shape.size(); ++i)
else flatindex += shape[i].number;
}
//std::cout <<"unwind new shape = "<<newshape<<std::endl;
Tensor<T> r(newshape);
@ -790,12 +815,18 @@ for(int i=2; i<=rank(); ++i)
}
if(!indexperm.is_valid())
{
std::cout << "indexperm = "<<indexperm<<std::endl;
//std::cout << "indexperm = "<<indexperm<<std::endl;
laerror("internal error 3 in unwind_index");
}
//std::cout <<"unwind permutation = "<<indexperm<<std::endl;
if(is_named())
{
r.names = names.permuted(indexperm,true);
//std::cout <<"unwind new names = "<<r.names<<std::endl;
}
//loop recursively and do the unwinding
help_tt<T> = this;
help_p = &indexperm;
@ -892,10 +923,12 @@ for(int g=0; g<shape.size(); ++g)
}
}
std::cout <<"Flatten new shape = "<<newshape<<std::endl;
//std::cout <<"Flatten new shape = "<<newshape<<std::endl;
//decompress the tensor data
Tensor<T> r(newshape);
r.names=names;
help_tt<T> = this;
r.loopover(flatten_callback);
return r;
@ -908,7 +941,7 @@ template<typename T>
Tensor<T> Tensor<T>::unwind_indices(const INDEXLIST &il) const
{
if(il.size()==0) return *this;
if(il.size()==1) return unwind_index(il[0].group,il[0].index);
if(il.size()==1) return unwind_index(il[0]);
for(int i=0; i<il.size(); ++i)
{
@ -1017,6 +1050,12 @@ if(!indexperm.is_valid())
laerror("internal error 3 in unwind_indices");
}
if(is_named())
{
r.names = names.permuted(indexperm,true);
//std::cout <<"unwind new names = "<<r.names<<std::endl;
}
//loop recursively and do the unwinding
help_tt<T> = this;
help_p = &indexperm;
@ -1078,15 +1117,28 @@ int ii=0;
for(int i=1; i<rhsu.shape.size(); ++i) newshape[ii++] = rhsu.shape[i];
for(int i=1; i<u.shape.size(); ++i) newshape[ii++] = u.shape[i]; //this tensor will have more significant indices than the rhs one
NRVec<INDEXNAME> newnames;
if(u.is_named() && rhsu.is_named())
{
if(u.names[0]!=rhsu.names[0]) laerror("contraction index name mismatch in addcontraction");
newnames.resize(u.names.size()+rhsu.names.size()-2);
int ii=0;
for(int i=1; i<rhsu.names.size(); ++i) newnames[ii++] = rhsu.names[i];
for(int i=1; i<u.names.size(); ++i) newnames[ii++] = u.names[i];
}
if(doresize)
{
if(beta!= (T)0) laerror("resize in addcontraction requires beta=0");
resize(newshape);
names=newnames;
}
else
{
if(shape!=newshape) laerror("tensor shape mismatch in addcontraction");
if(is_named() && names!=newnames) laerror("remaining index names mismatch in addcontraction");
}
int nn,mm,kk;
kk=u.groupsizes[0];
if(kk!=rhsu.groupsizes[0]) laerror("internal error in addcontraction");
@ -1097,6 +1149,7 @@ auxmatmult<T>(nn,mm,kk,&data[0],&u.data[0], &rhsu.data[0],alpha,beta,conjugate);
}
template<typename T>
void Tensor<T>::addcontractions(const Tensor &rhs1, const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha, T beta, bool doresize, bool conjugate1, bool conjugate2)
{
@ -1125,15 +1178,29 @@ int ii=0;
for(int i=il1.size(); i<rhsu.shape.size(); ++i) newshape[ii++] = rhsu.shape[i];
for(int i=il1.size(); i<u.shape.size(); ++i) newshape[ii++] = u.shape[i]; //this tensor will have more significant indices than the rhs one
NRVec<INDEXNAME> newnames;
if(u.is_named() && rhsu.is_named())
{
for(int i=0; i<il1.size(); ++i) if(u.names[i]!=rhsu.names[i]) laerror("contraction index name mismatch in addcontractions");
newnames.resize(u.names.size()+rhsu.names.size()-2*il1.size());
int ii=0;
for(int i=il1.size(); i<rhsu.names.size(); ++i) newnames[ii++] = rhsu.names[i];
for(int i=il1.size(); i<u.names.size(); ++i) newnames[ii++] = u.names[i];
}
if(doresize)
{
if(beta!= (T)0) laerror("resize in addcontractions requires beta=0");
resize(newshape);
names=newnames;
}
else
{
if(shape!=newshape) laerror("tensor shape mismatch in addcontraction");
if(is_named() && names!=newnames) laerror("remaining index names mismatch in addcontractions");
}
int nn,mm,kk;
kk=1;
int kk2=1;
@ -1318,8 +1385,8 @@ for(int g=0; g<shape.size(); ++g)
p[gg++] = 1+g;
if(gg!=shape.size() || !p.is_valid()) laerror("internal error in merge_index_groups");
Tensor<T> r = permute_index_groups(p);
r.merge_adjacent_index_groups(0,groups.size()-1);
Tensor<T> r = permute_index_groups(p); //takes care of names permutation too
r.merge_adjacent_index_groups(0,groups.size()-1); //flat names invariant
return r;
}
@ -1437,7 +1504,7 @@ int Tensor<T>::findflatindex(const INDEXNAME nam) const
if(!is_named()) laerror("tensor indices were not named");
for(int i=0; i<names.size(); ++i)
{
if(!strncmp(nam.name,names[i].name,N_INDEXNAME)) return i;
if(nam==names[i]) return i;
}
return -1;
}
@ -1450,6 +1517,14 @@ if(n<0) laerror("index with this name was not found");
return indexposition(n,shape);
}
template<typename T>
NRVec<INDEX> Tensor<T>::findindexlist(const NRVec<INDEXNAME> &names) const
{
int n=names.size();
NRVec<INDEX> ind(n);
for(int i=0; i<n; ++i) ind[i] = findindex(names[i]);
return ind;
}

View File

@ -19,6 +19,7 @@
//a simple tensor class with arbitrary symmetry of index subgroups
//stored in an efficient way
//indices can optionally have names and by handled by name
//each index group has a specific symmetry (nosym,sym,antisym)
//additional symmetry between index groups (like in 2-electron integrals) is not supported directly, you would need to nest the class to Tensor<Tensor<T> >
//leftmost index is least significant (changing fastest) in the storage order
@ -39,9 +40,6 @@
#include "miscfunc.h"
//TODO:
//@@@!!!!!! - implement index names and their treatment in all operations which permute indices
//@@@implement contractions, unwinding etc. by named index list
//
//@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result
//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex
//@@@ will not be particularly efficient
@ -49,7 +47,7 @@
//maybe optional negative range for beta spin handling in some cases of fourindex-tensor conversions
//
//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
//
//@@@? apply_permutation_algebra if result should be symmetric/antisymmetric in such a way to compute only the nonredundant part
//
@ -90,6 +88,11 @@ typedef int LA_largeindex;
#define N_INDEXNAME 8
struct INDEXNAME {
char name[N_INDEXNAME];
INDEXNAME() {};
INDEXNAME(const char *n) {strncpy(name,n,N_INDEXNAME);};
INDEXNAME & operator=(const INDEXNAME &rhs) {strncpy(name,rhs.name,N_INDEXNAME); return *this;};
bool operator==(const INDEXNAME &rhs) const {return 0==strncmp(name,rhs.name,N_INDEXNAME);};
bool operator!=(const INDEXNAME &rhs) const {return !(rhs == *this);};
};
template<>
class LA_traits<INDEXNAME> {
@ -204,6 +207,7 @@ public:
bool is_compressed() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1&&shape[i].symmetry!=0) return true; return false;};
bool has_symmetry() const {for(int i=0; i<shape.size(); ++i) if(shape[i].symmetry!=0) return true; return false;};
void clear() {data.clear();};
void defaultnames() {names.resize(rank()); for(int i=0; i<rank(); ++i) sprintf(names[i].name,"i%03d",i);}
int rank() const {return myrank;};
int calcrank(); //is computed from shape
LA_largeindex calcsize(); //set redundant data and return total size
@ -235,6 +239,7 @@ public:
//find index by name
int findflatindex(const INDEXNAME nam) const;
INDEX findindex(const INDEXNAME nam) const;
NRVec<INDEX> findindexlist(const NRVec<INDEXNAME> &names) const;
inline Tensor& operator+=(const Tensor &rhs)
{
@ -268,16 +273,27 @@ public:
void grouploopover(void (*callback)(const GROUPINDEX &, T *)); //loop over all elements disregarding the internal structure of index groups
void constgrouploopover(void (*callback)(const GROUPINDEX &, const T *)) const; //loop over all elements disregarding the internal structure of index groups
//@@@@@@@@@implement names treatment in the following
Tensor permute_index_groups(const NRPerm<int> &p) const; //rearrange the tensor storage permuting index groups as a whole
Tensor unwind_index(int group, int index) const; //separate an index from a group and expand it to full range as the least significant one (the leftmost one)
Tensor unwind_index(const INDEX &I) const {return unwind_index(I.group,I.index);};
Tensor unwind_index(const INDEXNAME &N) const {return unwind_index(findindex(N));};
Tensor unwind_indices(const INDEXLIST &il) const; //the same for a list of indices
Tensor unwind_indices(const NRVec<INDEXNAME> &names) const {return unwind_indices(findindexlist(names));};
void addcontraction(const Tensor &rhs1, int group, int index, const Tensor &rhs2, int rhsgroup, int rhsindex, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false); //rhs1 will have more significant non-contracted indices in the result than rhs2
inline void addcontraction(const Tensor &rhs1, const INDEX &I1, const Tensor &rhs2, const INDEX &I2, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false) {addcontraction(rhs1, I1.group, I1.index, rhs2, I2.group, I2.index, alpha, beta, doresize, conjugate, conjugate);};
inline void addcontraction(const Tensor &rhs1, const Tensor &rhs2, const INDEXNAME &iname, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false) {addcontraction(rhs1, rhs1.findindex(iname), rhs2, rhs2.findindex(iname), alpha, beta, doresize, conjugate, conjugate);};
inline Tensor contraction(int group, int index, const Tensor &rhs, int rhsgroup, int rhsindex, T alpha=1, bool conjugate1=false, bool conjugate=false) const {Tensor<T> r; r.addcontraction(*this,group,index,rhs,rhsgroup,rhsindex,alpha,0,true, conjugate1, conjugate); return r; };
inline Tensor contraction(const INDEX &I, const Tensor &rhs, const INDEX &RHSI, T alpha=1, bool conjugate1=false, bool conjugate=false) const {return contraction(I.group,I.index, rhs, RHSI.group, RHSI.index,alpha, conjugate1, conjugate);};
inline Tensor contraction(const Tensor &rhs, const INDEXNAME &iname, T alpha=1, bool conjugate1=false, bool conjugate=false) const {return contraction(findindex(iname),rhs,rhs.findindex(iname),alpha, conjugate1, conjugate);};
void addcontractions(const Tensor &rhs1, const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate2=false);
inline void addcontractions(const Tensor &rhs1, const Tensor &rhs2, const NRVec<INDEXNAME> &names, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate2=false) {addcontractions(rhs1, rhs1.findindexlist(names), rhs2, rhs2.findindexlist(names), alpha, beta, doresize, conjugate1,conjugate2);};
inline Tensor contractions( const INDEXLIST &il1, const Tensor &rhs2, const INDEXLIST &il2, T alpha=1, bool conjugate1=false, bool conjugate2=false) const {Tensor<T> r; r.addcontractions(*this,il1,rhs2,il2,alpha,0,true,conjugate1, conjugate2); return r; };
inline Tensor contractions(const Tensor &rhs2, const NRVec<INDEXNAME> names, T alpha=1, bool conjugate1=false, bool conjugate2=false) const {return contractions(findindexlist(names),rhs2,rhs2.findindexlist(names),alpha,conjugate1,conjugate2); };
void apply_permutation_algebra(const Tensor &rhs, const PermutationAlgebra<int,T> &pa, bool inverse=false, T alpha=1, T beta=0); //general (not optimally efficient) symmetrizers, antisymmetrizers etc. acting on the flattened index list:
void apply_permutation_algebra(const NRVec<Tensor> &rhsvec, const PermutationAlgebra<int,T> &pa, bool inverse=false, T alpha=1, T beta=0); //avoids explicit outer product but not vectorized, rather inefficient
@ -289,8 +305,10 @@ public:
void split_index_group(int group); //formal in-place split of a non-symmetric index group WITHOUT the need for data reorganization or names rearrangement
void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups WITHOUT the need for data reorganization or names rearrangement
Tensor merge_index_groups(const NRVec<int> &groups) const;
Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them
Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them, leaving flat index order the same
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
};