tensor: linear_transform implemented

This commit is contained in:
2025-11-20 18:17:34 +01:00
parent d136c2314d
commit ad1c4ee968
3 changed files with 122 additions and 23 deletions

47
t.cc
View File

@@ -4424,7 +4424,7 @@ cout <<z.norm()<<endl;
} }
if(1) if(0)
{ {
//tucker test order //tucker test order
int r,n; int r,n;
@@ -4451,7 +4451,52 @@ cout<<dec;
Tensor<double> y = x.inverseTucker(dec,inv); Tensor<double> y = x.inverseTucker(dec,inv);
cout <<"invTucker\n"<<y; cout <<"invTucker\n"<<y;
cout <<"Error = "<<(x0-y).norm()<<endl; cout <<"Error = "<<(x0-y).norm()<<endl;
if(inv==false)
{
Tensor<double> z = x.linear_transform(dec);
cout <<"Error2 = "<<(x0-z).norm()<<endl;
}
} }
if(1)
{
//test linear transform
int r=3;
int n,sym;
cin>>n>>sym;
NRVec<INDEXGROUP> shape(2);
{
shape[0].number=r;
shape[0].symmetry=sym;
shape[0].range=n;
shape[0].offset=0;
shape[1].number=1;
shape[1].symmetry=0;
shape[1].range=n;
shape[1].offset=0;
}
Tensor<double> x(shape);
x.randomize(1.);
NRVec<NRMat<double> > t(2);
t[0].resize(n+2,n); t[0].randomize(1.);
t[1].resize(n+2,n); t[1].randomize(1.);
Tensor<double> xt=x.linear_transform(t);
shape.copyonwrite();
shape[0].range=n+2;
shape[1].range=n+2;
Tensor<double> y(shape);
//this is the most stupid way to compute the transform ;-)
for(int i=0; i<n+2; ++i) for(int j=0; j<n+2; ++j) for(int k=0; k<n+2; ++k) for(int l=0; l<n+2; ++l)
{
y.lhs(i,j,k,l)=0;
for(int ii=0; ii<n; ++ii) for(int jj=0; jj<n; ++jj) for(int kk=0; kk<n; ++kk) for(int ll=0; ll<n; ++ll)
y.lhs(i,j,k,l) += x(ii,jj,kk,ll) * t[0](i,ii) * t[0](j,jj) * t[0](k,kk) * t[1](l,ll) ;
}
cout <<"Error = "<<(xt-y).norm()<<endl;
}
}//main }//main

View File

@@ -1785,7 +1785,6 @@ template<typename T>
Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const
{ {
if(rank()!=x.size()) laerror("input of inverseTucker does not match rank"); if(rank()!=x.size()) laerror("input of inverseTucker does not match rank");
NRVec<INDEXNAME> names_saved = names;
Tensor<T> tmp(*this); Tensor<T> tmp(*this);
Tensor<T> r; Tensor<T> r;
if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker"); if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker");
@@ -1818,14 +1817,60 @@ for(int i=rank()-1; i>=0; --i)
if(inverseorder) if(inverseorder)
{ {
NRPerm<int> p(rank()); p.identity(); NRPerm<int> p(rank()); p.identity();
r.names=names_saved.permuted(p.reverse()); r.names=names.permuted(p.reverse());
} }
else else
r.names=names_saved; r.names=names;
return r; return r;
} }
template<typename T>
Tensor<T> Tensor<T>::linear_transform(const NRVec<NRMat<T> > &x) const
{
if(x.size()!=shape.size()) laerror("wrong number of transformation matrices in linear_transform");
for(int i=0; i<shape.size(); ++i) if(x[i].ncols()!=shape[i].range) laerror("mismatch of transformation matrix size in linear_transform");
Tensor<T> tmp(*this);
Tensor<T> r;
//do the groups in reverse order to preserve index ordering
for(int g=shape.size()-1; g>=0; --g)
{
Tensor<T> mat(x[g],true); //flat tensor from a matrix
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
{
r= tmp.contraction(indexposition(rank()-1,tmp.shape),mat,INDEX(0,0),(T)1,false,false); //always the last index
if(i>0)
{
tmp=r;
r.deallocate();
}
}
//the group's indices are now individual ones leftmost in tensor r, restore group size and symmetry
if(shape[g].number>1)
{
INDEXLIST il(shape[g].number);
for(int i=0; i<shape[g].number; ++i)
{
il[i].group=i;
il[i].index=0;
}
r = r.merge_indices(il,shape[g].symmetry);
}
if(g>0)
{
tmp=r;
r.deallocate();
}
}
r.names=names;
return r;
}
template<typename T> template<typename T>
int Tensor<T>::findflatindex(const INDEXNAME nam) const int Tensor<T>::findflatindex(const INDEXNAME nam) const
{ {

View File

@@ -83,6 +83,15 @@ if(LA_traits<T>::is_complex()) //condition known at compile time
} }
static inline bool ptr_ok(void *ptr)
{
#ifdef DEBUG
if(ptr==NULL) std::cout<<"Warning: write to nonexistent tensor element ignored\n";
#endif
return (bool)ptr;
}
template<typename T> template<typename T>
class Signedpointer class Signedpointer
{ {
@@ -91,11 +100,11 @@ int sgn;
public: public:
Signedpointer(T *p, int s) : ptr(p),sgn(s) {}; Signedpointer(T *p, int s) : ptr(p),sgn(s) {};
//dereferencing *ptr should be ignored for sgn==0 //dereferencing *ptr should be ignored for sgn==0
const T operator=(const T rhs) {*ptr = signeddata(sgn,rhs); return rhs;} const T operator=(const T rhs) {if(ptr_ok(ptr)) *ptr = signeddata(sgn,rhs); return rhs;}
void operator*=(const T rhs) {*ptr *= rhs;} void operator*=(const T rhs) {if(ptr_ok(ptr)) *ptr *= rhs;}
void operator/=(const T rhs) {*ptr /= rhs;} void operator/=(const T rhs) {if(ptr_ok(ptr)) *ptr /= rhs;}
void operator+=(T rhs) {*ptr += signeddata(sgn,rhs);} void operator+=(T rhs) {if(ptr_ok(ptr)) *ptr += signeddata(sgn,rhs);}
void operator-=(T rhs) {*ptr -= signeddata(sgn,rhs);} void operator-=(T rhs) {if(ptr_ok(ptr)) *ptr -= signeddata(sgn,rhs);}
}; };
@@ -182,6 +191,8 @@ struct INDEX
int group; int group;
int index; int index;
bool operator==(const INDEX &rhs) const {return group==rhs.group && index==rhs.index;}; bool operator==(const INDEX &rhs) const {return group==rhs.group && index==rhs.index;};
INDEX() {};
INDEX(int g, int i): group(g), index(i) {};
}; };
typedef NRVec<INDEX> INDEXLIST; //collection of several indices typedef NRVec<INDEX> INDEXLIST; //collection of several indices
@@ -223,13 +234,16 @@ public:
int myrank; int myrank;
NRVec<LA_largeindex> groupsizes; //group sizes of symmetry index groups (a function of shape but precomputed for efficiency) NRVec<LA_largeindex> groupsizes; //group sizes of symmetry index groups (a function of shape but precomputed for efficiency)
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency); always cumsizes[0]=1, index group 0 is the innermost-loop one NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency); always cumsizes[0]=1, index group 0 is the innermost-loop one
public:
NRVec<INDEXNAME> names; NRVec<INDEXNAME> names;
//indexing
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
LA_largeindex index(int *sign, const FLATINDEX &I) const; //map the tensor indices to the position in data LA_largeindex index(int *sign, const FLATINDEX &I) const; //map the tensor indices to the position in data
LA_largeindex vindex(int *sign, LA_index i1, va_list args) const; //map list of indices to the position in data LA_largeindex vindex(int *sign, LA_index i1, va_list args) const; //map list of indices to the position in data
SUPERINDEX inverse_index(LA_largeindex s) const; //inefficient, but possible if needed SUPERINDEX inverse_index(LA_largeindex s) const; //inefficient, but possible if needed
int myflatposition(int group, int index) const {return flatposition(group,index,shape);};
int myflatposition(const INDEX &i) const {return flatposition(i,shape);};
INDEX myindexposition(int flatindex) const {return indexposition(flatindex,shape);};
//constructors //constructors
Tensor() : myrank(-1) {}; Tensor() : myrank(-1) {};
@@ -273,22 +287,16 @@ public:
void deallocate() {data.resize(0); shape.resize(0); groupsizes.resize(0); cumsizes.resize(0); names.resize(0);}; void deallocate() {data.resize(0); shape.resize(0); groupsizes.resize(0); cumsizes.resize(0); names.resize(0);};
inline Signedpointer<T> lhs(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I); inline Signedpointer<T> lhs(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I);
#ifdef DEBUG if(sign==0) return Signedpointer<T>(NULL,sign);
if(sign==0) laerror("l-value pointer to nonexistent tensor element"); else return Signedpointer<T>(&data[i],sign);};
#endif
return Signedpointer<T>(&data[i],sign);};
inline T operator()(const SUPERINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);}; inline T operator()(const SUPERINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I);
#ifdef DEBUG if(sign==0) return Signedpointer<T>(NULL,sign);
if(sign==0) laerror("l-value pointer to nonexistent tensor element"); else return Signedpointer<T>(&data[i],sign);};
#endif
return Signedpointer<T>(&data[i],sign);};
inline T operator()(const FLATINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);}; inline T operator()(const FLATINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Signedpointer<T> lhs(LA_index i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); inline Signedpointer<T> lhs(LA_index i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args);
#ifdef DEBUG if(sign==0) return Signedpointer<T>(NULL,sign);
if(sign==0) laerror("l-value pointer to nonexistent tensor element"); else return Signedpointer<T>(&data[i],sign); };
#endif
return Signedpointer<T>(&data[i],sign); };
inline T operator()(LA_index i1...) const {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; else return signeddata(sign,data[i]);}; inline T operator()(LA_index i1...) const {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Tensor& operator=(const Tensor &rhs) {myrank=rhs.myrank; shape=rhs.shape; groupsizes=rhs.groupsizes; cumsizes=rhs.cumsizes; data=rhs.data; names=rhs.names; return *this;}; inline Tensor& operator=(const Tensor &rhs) {myrank=rhs.myrank; shape=rhs.shape; groupsizes=rhs.groupsizes; cumsizes=rhs.cumsizes; data=rhs.data; names=rhs.names; return *this;};
@@ -406,6 +414,7 @@ public:
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
Tensor linear_transform(const NRVec<NRMat<T> > &x) const; //linear transform by a different matrix per each index group, preserving group symmetries
}; };