tensor: implemented merge_indices
This commit is contained in:
64
t.cc
64
t.cc
@@ -3974,7 +3974,7 @@ cout <<t.dot(u)<<endl;
|
||||
}
|
||||
|
||||
|
||||
if(1)
|
||||
if(0)
|
||||
{
|
||||
//check full constractions
|
||||
int r,n;
|
||||
@@ -4001,4 +4001,66 @@ cout <<x.dot(y) <<" "<< xf.dot(yf)<< " "<<z<<" "<<zf<<endl;
|
||||
}
|
||||
|
||||
|
||||
if(0)
|
||||
{
|
||||
//check symmetrizer/antisymmetrizer with no remaining indices
|
||||
int r,n,sym;
|
||||
cin>>r>>n>>sym;
|
||||
INDEXGROUP shape;
|
||||
{
|
||||
shape.number=r;
|
||||
shape.symmetry= sym;
|
||||
shape.range=n;
|
||||
shape.offset=0;
|
||||
}
|
||||
Tensor<double> x(shape); x.randomize(1.);
|
||||
//cout <<x;
|
||||
Tensor<double> xf=x.flatten();
|
||||
|
||||
INDEXLIST il(r);
|
||||
for(int i=0; i<r; ++i) il[i]= {i,0};
|
||||
Tensor<double> xx = xf.merge_indices(il,sym);
|
||||
//cout <<xx;
|
||||
cout <<"Error = "<<(xx-x).norm()<<endl;
|
||||
|
||||
}
|
||||
|
||||
if(1)
|
||||
{
|
||||
//check symmetrizer/antisymmetrizer in general case
|
||||
int r,n,sym;
|
||||
cin>>r>>n>>sym;
|
||||
NRVec<INDEXGROUP> shape(3);
|
||||
shape[0].number=2;
|
||||
shape[0].symmetry=0;
|
||||
shape[0].range=n+1;
|
||||
shape[0].offset=0;
|
||||
|
||||
shape[1].number=r;
|
||||
shape[1].symmetry= sym;
|
||||
shape[1].range=n;
|
||||
shape[1].offset=0;
|
||||
|
||||
shape[2].number=2;
|
||||
shape[2].symmetry=0;
|
||||
shape[2].range=n+2;
|
||||
shape[2].offset=0;
|
||||
|
||||
|
||||
Tensor<double> x(shape); x.randomize(1.);
|
||||
cout <<"x= "<<x.shape;
|
||||
Tensor<double> xf=x.flatten(1);
|
||||
cout <<"xf= "<<xf.shape;
|
||||
Tensor<double> xxx=x.unwind_index_group(1);
|
||||
cout <<"xxx= "<<xxx.shape<<endl;
|
||||
|
||||
INDEXLIST il(r);
|
||||
for(int i=0; i<r; ++i) il[i]= {1+i,0};
|
||||
Tensor<double> xx = xf.merge_indices(il,sym);
|
||||
cout <<"xx = "<<xx.shape;
|
||||
cout <<"Error = "<<(xx-xxx).norm()<<endl;
|
||||
|
||||
}
|
||||
|
||||
|
||||
}//main
|
||||
|
||||
126
tensor.cc
126
tensor.cc
@@ -964,6 +964,7 @@ for(int i=0; i<il.size(); ++i)
|
||||
{
|
||||
if(il[i].group<0||il[i].group>=shape.size()) laerror("wrong group number in unwind_indices");
|
||||
if(il[i].index<0||il[i].index>=shape[il[i].group].number) laerror("wrong index number in unwind_indices");
|
||||
for(int j=0; j<i; ++j) if(il[i]==il[j]) laerror("repeated index in the list");
|
||||
}
|
||||
|
||||
//all indices are solo in their groups - permute groups
|
||||
@@ -1242,6 +1243,7 @@ for(int i=0; i<il1.size(); ++i)
|
||||
#ifdef LA_TENSOR_INDEXPOSITION
|
||||
if(rhs1.shape[il1[i].group].upperindex ^ rhs2.shape[il2[i].group].upperindex == false) laerror("can contact only upper with lower index");
|
||||
#endif
|
||||
for(int j=0; j<i; ++j) if(il1[i]==il1[j]||il2[i]==il2[j]) laerror("repeated index in the list");
|
||||
}
|
||||
|
||||
const Tensor<T> u = conjugate1? (rhs1.unwind_indices(il1)).conjugateme() : rhs1.unwind_indices(il1);
|
||||
@@ -1650,6 +1652,130 @@ return ind;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Tensor<T> Tensor<T>::merge_indices(const INDEXLIST &il, int sym) const
|
||||
{
|
||||
if(il.size()==0) laerror("empty index list for merge_indices");
|
||||
if(il.size()==1) return unwind_index(il[0]); //result should be index group of size 1
|
||||
|
||||
bool samegroup=true;
|
||||
bool isordered=true;
|
||||
for(int i=0; i<il.size(); ++i)
|
||||
{
|
||||
if(il[i].group<0||il[i].group>=shape.size()) laerror("wrong group number in merge_indices");
|
||||
if(il[i].index<0||il[i].index>=shape[il[i].group].number) laerror("wrong index number in merge_indices");
|
||||
for(int j=0; j<i; ++j) if(il[i]==il[j]) laerror("repeated index in the list");
|
||||
#ifdef LA_TENSOR_INDEXPOSITION
|
||||
if(shape[il[0].group].upperindex != shape[il[i].group].upperindex == false) laerror("can merge only within lower or upper separately");
|
||||
#endif
|
||||
if(shape[il[0].group].range != shape[il[i].group].range)
|
||||
{
|
||||
std::cout << "indices "<<il[0]<<" and "<<il[i]<< " have ranges "<<shape[il[0].group].range<< " and "<< shape[il[i].group].range <<" respectively\n";
|
||||
laerror("incompatible range in merge_indices");
|
||||
}
|
||||
if(shape[il[0].group].offset != shape[il[i].group].offset) laerror("incompatible offset in merge_indices");
|
||||
if(il[0].group != il[i].group) samegroup=false;
|
||||
if(il[i].index!=i) isordered=false;
|
||||
}
|
||||
|
||||
if(samegroup && isordered && il.size()==shape[il[0].group].number) return unwind_index_group(il[0].group);
|
||||
|
||||
|
||||
//calculate new shape and flat index permutation
|
||||
NRVec<indexgroup> workshape(shape);
|
||||
workshape.copyonwrite();
|
||||
NRPerm<int> basicperm(rank());
|
||||
|
||||
bitvector was_in_list(rank());
|
||||
was_in_list.clear();
|
||||
for(int i=0; i<il.size(); ++i)
|
||||
{
|
||||
int fp=flatposition(il[i],shape);
|
||||
was_in_list.set(fp);
|
||||
basicperm[i+1] = 1+fp;
|
||||
if( --workshape[il[i].group].number <0) laerror("inconsistent index list with index group size");
|
||||
}
|
||||
int newshapesize=1; //newly created group
|
||||
for(int i=0; i<workshape.size(); ++i) if(workshape[i].number>0) ++newshapesize; //this group survived index removal
|
||||
|
||||
NRVec<indexgroup> newshape(newshapesize);
|
||||
newshape[0].number=il.size();
|
||||
newshape[0].symmetry=sym;
|
||||
newshape[0].offset=shape[il[0].group].offset;
|
||||
newshape[0].range=shape[il[0].group].range;
|
||||
#ifdef LA_TENSOR_INDEXPOSITION
|
||||
newshape[0].upperindex=shape[il[0].group].upperindex;
|
||||
#endif
|
||||
int ii=1;
|
||||
for(int i=0; i<workshape.size(); ++i)
|
||||
if(workshape[i].number>0)
|
||||
newshape[ii++] = workshape[i];
|
||||
int jj=1+il.size();
|
||||
for(int i=0; i<rank(); ++i)
|
||||
if(!was_in_list[i])
|
||||
basicperm[jj++] = 1+i;
|
||||
if(!basicperm.is_valid()) laerror("internal error in merge_indices");
|
||||
|
||||
//std::cout <<"newshape = "<<newshape<<std::endl;
|
||||
//std::cout <<"basicperm = "<<basicperm<<std::endl;
|
||||
|
||||
|
||||
//prepare permutation algebra
|
||||
PermutationAlgebra<int,T> pa;
|
||||
if(sym==0)
|
||||
{
|
||||
pa.resize(1);
|
||||
pa[0].weight=1;
|
||||
pa[0].perm=basicperm;
|
||||
}
|
||||
else
|
||||
{
|
||||
PermutationAlgebra<int,int> sa = sym>0 ? symmetrizer<int>(il.size()) : antisymmetrizer<int>(il.size());
|
||||
//std::cout <<"SA = "<<sa<<std::endl;
|
||||
pa.resize(sa.size());
|
||||
for(int i=0; i<sa.size(); ++i)
|
||||
{
|
||||
pa[i].weight = (T) sa[i].weight;
|
||||
pa[i].perm.resize(rank());
|
||||
for(int j=1; j<=il.size(); ++j) pa[i].perm[j] = basicperm[sa[i].perm[j]];
|
||||
for(int j=il.size()+1; j<=rank(); ++j) pa[i].perm[j] = basicperm[j];
|
||||
}
|
||||
}
|
||||
|
||||
//std::cout <<"Use PA = "<<pa<<std::endl;
|
||||
|
||||
Tensor<T> r(newshape);
|
||||
r.apply_permutation_algebra(*this,pa,false,(T)1/(T)pa.size(),0);
|
||||
return r;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void Tensor<T>::canonicalize_shape()
|
||||
{
|
||||
const indexgroup *sh = &(* const_cast<const NRVec<indexgroup> *>(&shape))[0];
|
||||
for(int i=0; i<shape.size(); ++i)
|
||||
{
|
||||
if(sh[i].number==1 && sh[i].symmetry!=0) {shape.copyonwrite(); shape[i].symmetry=0;}
|
||||
if(sh[i].symmetry>1 ) {shape.copyonwrite(); shape[i].symmetry=1;}
|
||||
if(sh[i].symmetry<-1) {shape.copyonwrite(); shape[i].symmetry= -1;}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::ostream & operator<<(std::ostream &s, const INDEX &x)
|
||||
{
|
||||
s<<x.group<<" "<<x.index;
|
||||
return s;
|
||||
}
|
||||
|
||||
std::istream & operator>>(std::istream &s, INDEX &x)
|
||||
{
|
||||
s>>x.group>>x.index;
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template class Tensor<double>;
|
||||
template class Tensor<std::complex<double> >;
|
||||
|
||||
31
tensor.h
31
tensor.h
@@ -41,15 +41,11 @@
|
||||
|
||||
//TODO:
|
||||
//@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result
|
||||
//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex
|
||||
//@@@ will not be particularly efficient
|
||||
//
|
||||
//maybe optional negative range for beta spin handling in some cases of fourindex-tensor conversions
|
||||
//
|
||||
//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex, will not be particularly efficient
|
||||
//@@@?maybe optional negative range for beta spin handling in some cases of fourindex-tensor conversions
|
||||
//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
|
||||
//@@@? apply_permutation_algebra if result should be symmetric/antisymmetric in such a way to compute only the nonredundant part
|
||||
//@@@symetrizace a antisymetrizace skupiny indexu - jak efektivneji nez pres permutationalgebra?
|
||||
//
|
||||
//@@@ is that needed? we can flatten the relevant groups and permute index groups alternatively - maybe implement on high level this way for convenience
|
||||
|
||||
|
||||
|
||||
//do not distinguish covariant/contravariant indices
|
||||
@@ -160,9 +156,14 @@ struct INDEX
|
||||
{
|
||||
int group;
|
||||
int index;
|
||||
bool operator==(const INDEX &rhs) const {return group==rhs.group && index==rhs.index;};
|
||||
};
|
||||
typedef NRVec<INDEX> INDEXLIST; //collection of several indices
|
||||
|
||||
std::ostream & operator<<(std::ostream &s, const INDEX &x);
|
||||
std::istream & operator>>(std::istream &s, INDEX &x);
|
||||
|
||||
|
||||
int flatposition(int group, int index, const NRVec<indexgroup> &shape);
|
||||
int flatposition(const INDEX &i, const NRVec<indexgroup> &shape); //position of that index in FLATINDEX
|
||||
INDEX indexposition(int flatindex, const NRVec<indexgroup> &shape); //inverse to flatposition
|
||||
@@ -188,12 +189,12 @@ public:
|
||||
//constructors
|
||||
Tensor() : myrank(-1) {};
|
||||
explicit Tensor(const T &x) : myrank(0), data(1) {data[0]=x;}; //scalar
|
||||
Tensor(const NRVec<indexgroup> &s) : shape(s) { data.resize(calcsize()); calcrank();}; //general tensor
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<INDEXNAME> &newnames) : shape(s), names(newnames) { data.resize(calcsize()); calcrank(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //general tensor
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<T> &mydata) : shape(s) { LA_largeindex dsize=calcsize(); calcrank(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata;}
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<T> &mydata, const NRVec<INDEXNAME> &newnames) : shape(s), names(newnames) { LA_largeindex dsize=calcsize(); calcrank(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata; if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}
|
||||
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank();}; //tensor with a single index group
|
||||
Tensor(const indexgroup &g, const NRVec<INDEXNAME> &newnames) : names(newnames) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //tensor with a single index group
|
||||
Tensor(const NRVec<indexgroup> &s) : shape(s) { data.resize(calcsize()); calcrank(); canonicalize_shape();}; //general tensor
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<INDEXNAME> &newnames) : shape(s), names(newnames) { data.resize(calcsize()); calcrank(); canonicalize_shape(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //general tensor
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<T> &mydata) : shape(s) { LA_largeindex dsize=calcsize(); calcrank(); canonicalize_shape(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata;}
|
||||
Tensor(const NRVec<indexgroup> &s, const NRVec<T> &mydata, const NRVec<INDEXNAME> &newnames) : shape(s), names(newnames) { LA_largeindex dsize=calcsize(); calcrank(); canonicalize_shape(); if(mydata.size()!=dsize) laerror("inconsistent data size with shape"); data=mydata; if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}
|
||||
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank(); canonicalize_shape();}; //tensor with a single index group
|
||||
Tensor(const indexgroup &g, const NRVec<INDEXNAME> &newnames) : names(newnames) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank(); canonicalize_shape(); if(names.size()!=myrank && names.size()!=0) laerror("bad number of index names");}; //tensor with a single index group
|
||||
Tensor(const Tensor &rhs): myrank(rhs.myrank), shape(rhs.shape), groupsizes(rhs.groupsizes), cumsizes(rhs.cumsizes), data(rhs.data), names(rhs.names) {};
|
||||
Tensor(int xrank, const NRVec<indexgroup> &xshape, const NRVec<LA_largeindex> &xgroupsizes, const NRVec<LA_largeindex> xcumsizes, const NRVec<T> &xdata) : myrank(xrank), shape(xshape), groupsizes(xgroupsizes), cumsizes(xcumsizes), data(xdata) {};
|
||||
Tensor(int xrank, const NRVec<indexgroup> &xshape, const NRVec<LA_largeindex> &xgroupsizes, const NRVec<LA_largeindex> xcumsizes, const NRVec<T> &xdata, const NRVec<INDEXNAME> &xnames) : myrank(xrank), shape(xshape), groupsizes(xgroupsizes), cumsizes(xcumsizes), data(xdata), names(xnames) {};
|
||||
@@ -212,6 +213,7 @@ public:
|
||||
void defaultnames() {names.resize(rank()); for(int i=0; i<rank(); ++i) sprintf(names[i].name,"i%03d",i);}
|
||||
int rank() const {return myrank;};
|
||||
int calcrank(); //is computed from shape
|
||||
void canonicalize_shape();
|
||||
LA_largeindex calcsize(); //set redundant data and return total size
|
||||
LA_largeindex size() const {return data.size();};
|
||||
void copyonwrite() {shape.copyonwrite(); groupsizes.copyonwrite(); cumsizes.copyonwrite(); data.copyonwrite(); names.copyonwrite();};
|
||||
@@ -316,6 +318,7 @@ public:
|
||||
|
||||
Tensor merge_index_groups(const NRVec<int> &groups) const;
|
||||
Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them, leaving flat index order the same
|
||||
Tensor merge_indices(const INDEXLIST &il, int symmetry=0) const; //opposite to flatten (merging with optional symmetrization/antisymmetrization and compression)
|
||||
|
||||
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
|
||||
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
|
||||
|
||||
Reference in New Issue
Block a user