bugfix unwind_index; Tucker works
This commit is contained in:
		
							parent
							
								
									ba58060c2d
								
							
						
					
					
						commit
						12cf5b76a5
					
				
							
								
								
									
										83
									
								
								t.cc
									
									
									
									
									
								
							
							
						
						
									
										83
									
								
								t.cc
									
									
									
									
									
								
							@ -3275,13 +3275,20 @@ if(0)
 | 
			
		||||
int n=5;
 | 
			
		||||
INDEXGROUP g;
 | 
			
		||||
g.number=4;
 | 
			
		||||
g.symmetry= -1;
 | 
			
		||||
g.symmetry= 0;
 | 
			
		||||
//g.symmetry= 1;
 | 
			
		||||
//g.symmetry= -1;
 | 
			
		||||
g.offset=0;
 | 
			
		||||
g.range=n;
 | 
			
		||||
 | 
			
		||||
Tensor<double> e(g);
 | 
			
		||||
e.randomize(1.);
 | 
			
		||||
Tensor<double> eu = e.unwind_index(0,1);
 | 
			
		||||
Tensor<double> eu2 = e.unwind_index(0,2);
 | 
			
		||||
Tensor<double> eu3 = e.unwind_index(0,3);
 | 
			
		||||
 | 
			
		||||
cout <<e<<endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
for(int i=0; i<n; ++i)
 | 
			
		||||
        for(int j=0; j<n; ++j)
 | 
			
		||||
@ -3289,11 +3296,47 @@ for(int i=0; i<n; ++i)
 | 
			
		||||
                        for(int l=0; l<n; ++l)
 | 
			
		||||
                                {
 | 
			
		||||
				if(e(i,j,k,l)!=eu(j,i,k,l)) laerror("error in unwind_index");
 | 
			
		||||
				if(e(i,j,k,l)!=eu2(k,i,j,l)) laerror("error2 in unwind_index");
 | 
			
		||||
				if(e(i,j,k,l)!=eu3(l,i,j,k)) laerror("error3 in unwind_index");
 | 
			
		||||
				}
 | 
			
		||||
cout <<e;
 | 
			
		||||
cout <<eu;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if(0)
 | 
			
		||||
{
 | 
			
		||||
int n=2;
 | 
			
		||||
NRVec<INDEXGROUP> g(4);
 | 
			
		||||
for(int i=0; i<4; ++i)
 | 
			
		||||
	{
 | 
			
		||||
	g[i].number=1;
 | 
			
		||||
	g[i].symmetry= 0;
 | 
			
		||||
	g[i].offset=0;
 | 
			
		||||
	g[i].range=n+i;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
Tensor<double> e(g);
 | 
			
		||||
e.randomize(1.);
 | 
			
		||||
 | 
			
		||||
cout<< "E shape = "<<e.shape<<endl;
 | 
			
		||||
Tensor<double> eu = e.unwind_index(1,0);
 | 
			
		||||
cout<< "Eu shape = "<<eu.shape<<endl;
 | 
			
		||||
Tensor<double> eu2 = e.unwind_index(2,0);
 | 
			
		||||
cout<< "Eu2 shape = "<<eu2.shape<<endl;
 | 
			
		||||
Tensor<double> eu3 = e.unwind_index(3,0);
 | 
			
		||||
cout<< "Eu3 shape = "<<eu3.shape<<endl;
 | 
			
		||||
 | 
			
		||||
for(int i=0; i<n; ++i)
 | 
			
		||||
        for(int j=0; j<n+1; ++j)
 | 
			
		||||
                for(int k=0; k<n+2; ++k)
 | 
			
		||||
                        for(int l=0; l<n+3; ++l)
 | 
			
		||||
                                {
 | 
			
		||||
                                if(e(i,j,k,l)!=eu(j,i,k,l)) laerror("error in unwind_index");
 | 
			
		||||
				if(e(i,j,k,l)!=eu2(k,i,j,l)) laerror("error2 in unwind_index");
 | 
			
		||||
                                if(e(i,j,k,l)!=eu3(l,i,j,k)) laerror("error3 in unwind_index");
 | 
			
		||||
                                }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if(0)
 | 
			
		||||
{
 | 
			
		||||
@ -3355,7 +3398,6 @@ for(int i=0; i<n; ++i)
 | 
			
		||||
//cout <<c;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//test Tensor apply_permutation_algebra
 | 
			
		||||
 | 
			
		||||
//test unwind_indices
 | 
			
		||||
if(0)
 | 
			
		||||
@ -3390,7 +3432,8 @@ if(0)
 | 
			
		||||
int n=5;
 | 
			
		||||
INDEXGROUP g;
 | 
			
		||||
g.number=2;
 | 
			
		||||
g.symmetry= 1;
 | 
			
		||||
//g.symmetry= 1;
 | 
			
		||||
g.symmetry= 0;
 | 
			
		||||
g.offset=0;
 | 
			
		||||
g.range=n;
 | 
			
		||||
 | 
			
		||||
@ -3557,7 +3600,7 @@ y.apply_permutation_algebra(rhsvec,b,false,1.,0.);
 | 
			
		||||
cout <<y;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if(1)
 | 
			
		||||
if(0)
 | 
			
		||||
{
 | 
			
		||||
//compact SVD
 | 
			
		||||
NRMat<double> a;
 | 
			
		||||
@ -3576,5 +3619,33 @@ cout << "Error "<<(u*sdiag*vt-abak).norm()<<endl;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if(1)
 | 
			
		||||
{
 | 
			
		||||
//tucker of a flat tensor
 | 
			
		||||
int r,n;
 | 
			
		||||
cin>>r>>n;
 | 
			
		||||
NRVec<INDEXGROUP> shape(r);
 | 
			
		||||
for(int i=0; i<r; ++i)
 | 
			
		||||
	{
 | 
			
		||||
	shape[i].number=1;
 | 
			
		||||
	shape[i].symmetry=0;
 | 
			
		||||
	shape[i].range=n+i;
 | 
			
		||||
	shape[i].offset=0;
 | 
			
		||||
	}
 | 
			
		||||
Tensor<double> x(shape);
 | 
			
		||||
x.randomize(1.);
 | 
			
		||||
cout<<x;
 | 
			
		||||
Tensor<double> x0(x);
 | 
			
		||||
x0.copyonwrite();
 | 
			
		||||
bool inv=true;
 | 
			
		||||
NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
 | 
			
		||||
cout<<"Tucker\n"<<x<<endl;
 | 
			
		||||
cout<<dec;
 | 
			
		||||
 | 
			
		||||
Tensor<double> y = x.inverseTucker(dec,inv);
 | 
			
		||||
cout <<"invTucker\n"<<y;
 | 
			
		||||
cout <<"Error = "<<(x0-y).norm()<<endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}//main
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										167
									
								
								tensor.cc
									
									
									
									
									
								
							
							
						
						
									
										167
									
								
								tensor.cc
									
									
									
									
									
								
							@ -34,7 +34,7 @@ int r=0;
 | 
			
		||||
for(int i=0; i<shape.size(); ++i)
 | 
			
		||||
        {
 | 
			
		||||
	const indexgroup *sh = &(* const_cast<const NRVec<indexgroup> *>(&shape))[i];
 | 
			
		||||
        if(sh->number==0) laerror("empty index group");
 | 
			
		||||
        if(sh->number<=0) laerror("empty index group"); //we do not support scalar as a trivial case
 | 
			
		||||
        r+=sh->number;
 | 
			
		||||
        }
 | 
			
		||||
myrank=r;
 | 
			
		||||
@ -46,6 +46,7 @@ return r;
 | 
			
		||||
template<typename T>
 | 
			
		||||
LA_largeindex Tensor<T>::calcsize()
 | 
			
		||||
{
 | 
			
		||||
if(shape.size()==0) laerror("tensor must have rank at least 1");
 | 
			
		||||
groupsizes.resize(shape.size()); 
 | 
			
		||||
cumsizes.resize(shape.size());
 | 
			
		||||
LA_largeindex s=1;
 | 
			
		||||
@ -330,11 +331,11 @@ calcsize();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename T>
 | 
			
		||||
Tensor<T>::Tensor(const NRMat<T> &x)
 | 
			
		||||
Tensor<T>::Tensor(const NRMat<T> &x, bool flat)
 | 
			
		||||
: data(&x(0,0),x.nrows()*x.ncols())
 | 
			
		||||
{
 | 
			
		||||
myrank=2;
 | 
			
		||||
if(x.nrows()==x.ncols())
 | 
			
		||||
if(x.nrows()==x.ncols() && !flat)
 | 
			
		||||
	{
 | 
			
		||||
	shape.resize(1);
 | 
			
		||||
	shape[0].number=2;
 | 
			
		||||
@ -542,7 +543,9 @@ help_t<T>->data[target] = *v;
 | 
			
		||||
template<typename T>
 | 
			
		||||
Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
 | 
			
		||||
{
 | 
			
		||||
NRVec<indexgroup> newshape=shape.permuted(p);
 | 
			
		||||
//std::cout <<"permute_index_groups permutation = "<<p<<std::endl;
 | 
			
		||||
NRVec<indexgroup> newshape=shape.permuted(p,true);
 | 
			
		||||
//std::cout <<"permute_index_groups newshape = "<<newshape<<std::endl;
 | 
			
		||||
Tensor<T> r(newshape);
 | 
			
		||||
 | 
			
		||||
//prepare statics for the callback
 | 
			
		||||
@ -568,13 +571,8 @@ for(int i=0; i<I.size(); ++i)
 | 
			
		||||
return J;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int flatposition(const INDEX &i, const NRVec<indexgroup> &shape)
 | 
			
		||||
{
 | 
			
		||||
int ii=0;
 | 
			
		||||
for(int g=0; g<i.group; ++g) ii+= shape[g].number;
 | 
			
		||||
ii += i.index;
 | 
			
		||||
return ii;
 | 
			
		||||
}
 | 
			
		||||
int flatposition(const INDEX &i, const NRVec<indexgroup> &shape) 
 | 
			
		||||
{return flatposition(i.group,i.index,shape);}
 | 
			
		||||
 | 
			
		||||
int flatposition(int group, int index, const NRVec<indexgroup> &shape)
 | 
			
		||||
{
 | 
			
		||||
@ -584,12 +582,26 @@ ii += index;
 | 
			
		||||
return ii;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
INDEX indexposition(int flatindex, const NRVec<indexgroup> &shape)
 | 
			
		||||
{
 | 
			
		||||
INDEX I={0,0};
 | 
			
		||||
if(flatindex<0) laerror("illegal index in indexposition");
 | 
			
		||||
for(int g=0; g<shape.size(); ++g)
 | 
			
		||||
	{
 | 
			
		||||
	I.group=g;
 | 
			
		||||
	if(flatindex<shape[g].number) {I.index=flatindex; return I;}
 | 
			
		||||
	flatindex-=shape[g].number;
 | 
			
		||||
	}
 | 
			
		||||
laerror("flatindex out of range");
 | 
			
		||||
return I;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<typename T>
 | 
			
		||||
static void unwind_callback(const SUPERINDEX &I, T *v)
 | 
			
		||||
{
 | 
			
		||||
FLATINDEX J = superindex2flat(I);
 | 
			
		||||
FLATINDEX JP = J.permuted(*help_p,true);
 | 
			
		||||
FLATINDEX JP = J.permuted(*help_p,false);
 | 
			
		||||
//std::cout <<"TEST unwind_callback: from "<<JP<<" TO "<<J<<std::endl;
 | 
			
		||||
*v = (*help_tt<T>)(JP); //rhs operator() generates the redundant elements for the unwinded lhs tensor
 | 
			
		||||
}
 | 
			
		||||
@ -637,6 +649,8 @@ for(int i=0; i<shape.size(); ++i)
 | 
			
		||||
	else flatindex += shape[i].number;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
std::cout <<"unwind new shape = "<<newshape<<std::endl;
 | 
			
		||||
 | 
			
		||||
Tensor<T> r(newshape);
 | 
			
		||||
if(r.rank()!=rank()) laerror("internal error 2 in unwind_index");
 | 
			
		||||
 | 
			
		||||
@ -656,6 +670,8 @@ if(!indexperm.is_valid())
 | 
			
		||||
	laerror("internal error 3 in unwind_index");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
std::cout <<"unwind permutation = "<<indexperm<<std::endl;
 | 
			
		||||
 | 
			
		||||
//loop recursively and do the unwinding
 | 
			
		||||
help_tt<T> = this;
 | 
			
		||||
help_p = &indexperm;
 | 
			
		||||
@ -782,7 +798,7 @@ return r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename T>
 | 
			
		||||
static void auxmatmult(int nn, int mm, int kk, T *r, T *a, T *b,  T alpha=1, T beta=0, bool conjugate=false) //R(nn,mm) = A(nn,kk) * B^T(mm,kk)
 | 
			
		||||
static void auxmatmult(int nn, int mm, int kk, T *r, const T *a, const T *b,  T alpha=1, T beta=0, bool conjugate=false) //R(nn,mm) = A(nn,kk) * B^T(mm,kk)
 | 
			
		||||
{
 | 
			
		||||
for(int i=0; i<nn; ++i) for(int j=0; j<mm; ++j)
 | 
			
		||||
	{
 | 
			
		||||
@ -793,13 +809,13 @@ for(int i=0; i<nn; ++i) for(int j=0; j<mm; ++j)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<>
 | 
			
		||||
void auxmatmult<double>(int nn, int mm, int kk, double *r, double *a, double *b, double alpha, double beta, bool conjugate)
 | 
			
		||||
void auxmatmult<double>(int nn, int mm, int kk, double *r, const double *a, const double *b, double alpha, double beta, bool conjugate)
 | 
			
		||||
{
 | 
			
		||||
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, nn, mm, kk, alpha, a, kk, b, kk, beta, r, mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<>
 | 
			
		||||
void auxmatmult<std::complex<double> >(int nn, int mm, int kk, std::complex<double> *r, std::complex<double> *a, std::complex<double> *b,  std::complex<double> alpha,  std::complex<double> beta, bool conjugate)
 | 
			
		||||
void auxmatmult<std::complex<double> >(int nn, int mm, int kk, std::complex<double> *r, const std::complex<double> *a, const std::complex<double> *b,  std::complex<double> alpha,  std::complex<double> beta, bool conjugate)
 | 
			
		||||
{
 | 
			
		||||
cblas_zgemm(CblasRowMajor, CblasNoTrans, (conjugate?CblasConjTrans:CblasTrans), nn, mm, kk, &alpha, a, kk, b, kk, &beta, r, mm);
 | 
			
		||||
}
 | 
			
		||||
@ -823,9 +839,8 @@ if(rhsindex<0||rhsindex>=rhs.shape[rhsgroup].number)  laerror("wrong index numbe
 | 
			
		||||
if(rhs1.shape[group].offset != rhs.shape[rhsgroup].offset) laerror("incompatible index offset in contraction");
 | 
			
		||||
if(rhs1.shape[group].range != rhs.shape[rhsgroup].range) laerror("incompatible index range in contraction");
 | 
			
		||||
 | 
			
		||||
Tensor<T> u = rhs1.unwind_index(group,index);
 | 
			
		||||
if(conjugate1) u.conjugateme();
 | 
			
		||||
Tensor<T> rhsu = rhs.unwind_index(rhsgroup,rhsindex);
 | 
			
		||||
const Tensor<T> u = conjugate1? (rhs1.unwind_index(group,index)).conjugate() : rhs1.unwind_index(group,index);
 | 
			
		||||
const Tensor<T> rhsu = rhs.unwind_index(rhsgroup,rhsindex);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
NRVec<indexgroup> newshape(u.shape.size()+rhsu.shape.size()-2);
 | 
			
		||||
@ -1077,51 +1092,99 @@ return r;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<typename T> 
 | 
			
		||||
NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr)
 | 
			
		||||
NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr, bool inverseorder)
 | 
			
		||||
{
 | 
			
		||||
int r=rank();
 | 
			
		||||
NRVec<NRMat<T> > ret(r);
 | 
			
		||||
if(r<2) return ret;
 | 
			
		||||
if(r<1) laerror("illegal rank in Tucker");
 | 
			
		||||
copyonwrite();
 | 
			
		||||
 | 
			
		||||
int rr=0;	
 | 
			
		||||
for(int i=0; i<shape.size(); ++i)
 | 
			
		||||
	for(int j=0; j<shape[i].number; ++j) //loop over all indices
 | 
			
		||||
if(r==1) //create an analogous output for the trivial case 
 | 
			
		||||
	{
 | 
			
		||||
	ret[0]=NRMat<T>(data,data.size(),1);
 | 
			
		||||
	shape[0].range=1;
 | 
			
		||||
	data.resize(calcsize()); 
 | 
			
		||||
	calcrank();
 | 
			
		||||
	data[0]=1;
 | 
			
		||||
	return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
//loop over all indices; relies on the fact tha unwinding does not change order of remaining indices
 | 
			
		||||
for(int i=0; i<r; ++i)
 | 
			
		||||
	{
 | 
			
		||||
	INDEX I=indexposition(i,shape);
 | 
			
		||||
	NRMat<T> um;
 | 
			
		||||
	NRVec<indexgroup> ushape;
 | 
			
		||||
	{
 | 
			
		||||
	Tensor<T> u=unwind_index(I);
 | 
			
		||||
	ushape=u.shape; ushape.copyonwrite();
 | 
			
		||||
	um=u.matrix();
 | 
			
		||||
	}
 | 
			
		||||
	int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
 | 
			
		||||
	NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols());
 | 
			
		||||
	NRVec<typename LA_traits<T>::normtype> w(mini);
 | 
			
		||||
	singular_decomposition(um,&u,w,&vt,0);
 | 
			
		||||
	um.resize(0,0); //deallocate
 | 
			
		||||
	int preserve=mini;
 | 
			
		||||
	for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;}
 | 
			
		||||
	if(preserve==0) laerror("singular tensor in Tucker decomposition");
 | 
			
		||||
	NRMat<T> umnew;
 | 
			
		||||
	if(preserve<mini)
 | 
			
		||||
		{
 | 
			
		||||
		NRMat<T> um;
 | 
			
		||||
		NRVec<indexgroup> ushape;
 | 
			
		||||
		{
 | 
			
		||||
		Tensor<T> u=unwind_index(i,j);
 | 
			
		||||
		ushape=u.shape;
 | 
			
		||||
		um=u.matrix();
 | 
			
		||||
		}
 | 
			
		||||
		int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
 | 
			
		||||
		NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols());
 | 
			
		||||
		NRVec<typename LA_traits<T>::normtype> w(mini);
 | 
			
		||||
		singular_decomposition(um,&u,w,&vt,0);
 | 
			
		||||
		um.resize(0,0); //deallocate
 | 
			
		||||
		int preserve=mini;
 | 
			
		||||
		for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;}
 | 
			
		||||
		if(preserve==0) laerror("singular tensor in Tucker decomposition");
 | 
			
		||||
		NRMat<T> umnew;
 | 
			
		||||
		if(preserve<mini)
 | 
			
		||||
			{
 | 
			
		||||
			vt=vt.submatrix(0,preserve-1,0,um.ncols()-1);
 | 
			
		||||
			w=w.subvector(0,preserve-1);
 | 
			
		||||
			umnew=u.submatrix(0,um.nrows()-1,0,preserve-1);
 | 
			
		||||
			}
 | 
			
		||||
		else umnew=u;
 | 
			
		||||
		ret[rr++]=vt.transpose(true);
 | 
			
		||||
		umnew.diagmultr(w);
 | 
			
		||||
		//rebuild tensor of the preserved shape from matrix
 | 
			
		||||
		ushape[0].range=preserve;
 | 
			
		||||
		NRVec<T> newdata(umnew);
 | 
			
		||||
		*this = Tensor(ushape,newdata);
 | 
			
		||||
		vt=vt.submatrix(0,preserve-1,0,um.ncols()-1);
 | 
			
		||||
		w=w.subvector(0,preserve-1);
 | 
			
		||||
		umnew=u.submatrix(0,um.nrows()-1,0,preserve-1);
 | 
			
		||||
		}
 | 
			
		||||
	else umnew=u;
 | 
			
		||||
	ret[(inverseorder? r-i-1 : i)]=vt.transpose(true);
 | 
			
		||||
	umnew.diagmultr(w);
 | 
			
		||||
	//rebuild tensor of the preserved shape from matrix
 | 
			
		||||
	ushape[0].range=preserve;
 | 
			
		||||
	{
 | 
			
		||||
	NRVec<T> newdata(umnew);
 | 
			
		||||
	umnew.resize(0,0);//deallocate
 | 
			
		||||
	*this = Tensor(ushape,newdata);
 | 
			
		||||
	}
 | 
			
		||||
	}
 | 
			
		||||
if(!is_flat()) laerror("this should not happen");
 | 
			
		||||
if(!inverseorder)
 | 
			
		||||
	{
 | 
			
		||||
	NRPerm<int> p(r);
 | 
			
		||||
	for(int i=1; i<=r; ++i) p[r-i+1]=i;
 | 
			
		||||
	*this =  permute_index_groups(p);
 | 
			
		||||
	}
 | 
			
		||||
return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<typename T>
 | 
			
		||||
Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const
 | 
			
		||||
{
 | 
			
		||||
if(rank()!=x.size()) laerror("input of inverseTucker does not match rank");
 | 
			
		||||
Tensor<T> tmp(*this);
 | 
			
		||||
Tensor<T> r;
 | 
			
		||||
if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker");
 | 
			
		||||
for(int i=0; i<rank(); ++i)
 | 
			
		||||
	{
 | 
			
		||||
	Tensor<T> mat(x[i],true);
 | 
			
		||||
	r= tmp.contraction(i,0,mat,0,0,(T)1,false,false);
 | 
			
		||||
	if(i<rank()-1) 
 | 
			
		||||
		{
 | 
			
		||||
		tmp=r;
 | 
			
		||||
		r.deallocate();
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
if(!inverseorder)
 | 
			
		||||
        {
 | 
			
		||||
        NRPerm<int> p(r.rank());
 | 
			
		||||
        for(int i=1; i<=r.rank(); ++i) p[r.rank()-i+1]=i;
 | 
			
		||||
        return r.permute_index_groups(p);
 | 
			
		||||
        }
 | 
			
		||||
else
 | 
			
		||||
	return r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										17
									
								
								tensor.h
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								tensor.h
									
									
									
									
									
								
							@ -35,6 +35,7 @@
 | 
			
		||||
#include "vec.h"
 | 
			
		||||
#include "mat.h"
 | 
			
		||||
#include "smat.h"
 | 
			
		||||
#include "fourindex.h"
 | 
			
		||||
#include "miscfunc.h"
 | 
			
		||||
 | 
			
		||||
//TODO:
 | 
			
		||||
@ -44,6 +45,10 @@
 | 
			
		||||
//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex
 | 
			
		||||
//@@@ will not be particularly efficient
 | 
			
		||||
//
 | 
			
		||||
//@@@conversions to/from fourindex
 | 
			
		||||
//
 | 
			
		||||
//@@@!!!!!!!!!!!const loopover and grouploopover
 | 
			
		||||
//
 | 
			
		||||
//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
 | 
			
		||||
//
 | 
			
		||||
//
 | 
			
		||||
@ -122,7 +127,9 @@ int index;
 | 
			
		||||
};
 | 
			
		||||
typedef NRVec<INDEX> INDEXLIST; //collection of several indices
 | 
			
		||||
 | 
			
		||||
int flatposition(int group, int index, const NRVec<indexgroup> &shape);
 | 
			
		||||
int flatposition(const INDEX &i, const NRVec<indexgroup> &shape); //position of that index in FLATINDEX
 | 
			
		||||
INDEX indexposition(int flatindex, const NRVec<indexgroup> &shape); //inverse to flatposition
 | 
			
		||||
 | 
			
		||||
FLATINDEX superindex2flat(const SUPERINDEX &I);
 | 
			
		||||
 | 
			
		||||
@ -150,7 +157,7 @@ public:
 | 
			
		||||
	Tensor(const Tensor &rhs): myrank(rhs.myrank), shape(rhs.shape), groupsizes(rhs.groupsizes), cumsizes(rhs.cumsizes), data(rhs.data) {};
 | 
			
		||||
	Tensor(int xrank, const NRVec<indexgroup> &xshape, const NRVec<LA_largeindex> &xgroupsizes,  const  NRVec<LA_largeindex> xcumsizes, const  NRVec<T> &xdata) :  myrank(xrank), shape(xshape), groupsizes(xgroupsizes), cumsizes(xcumsizes), data(xdata) {};
 | 
			
		||||
	explicit Tensor(const NRVec<T> &x);
 | 
			
		||||
	explicit Tensor(const NRMat<T> &x);
 | 
			
		||||
	explicit Tensor(const NRMat<T> &x, bool flat=false);
 | 
			
		||||
	explicit Tensor(const NRSMat<T> &x);
 | 
			
		||||
	NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index)
 | 
			
		||||
 | 
			
		||||
@ -162,6 +169,7 @@ public:
 | 
			
		||||
	LA_largeindex size() const {return data.size();};
 | 
			
		||||
	void copyonwrite() {shape.copyonwrite(); groupsizes.copyonwrite(); cumsizes.copyonwrite(); data.copyonwrite();};
 | 
			
		||||
	void resize(const NRVec<indexgroup> &s) {shape=s; data.resize(calcsize()); calcrank();};
 | 
			
		||||
	void deallocate() {data.resize(0); shape.resize(0); groupsizes.resize(0); cumsizes.resize(0);};
 | 
			
		||||
        inline Signedpointer<T> lhs(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
 | 
			
		||||
	inline T operator()(const SUPERINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
 | 
			
		||||
        inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
 | 
			
		||||
@ -176,6 +184,8 @@ public:
 | 
			
		||||
	inline Tensor& operator/=(const T &a) {data/=a; return *this;};
 | 
			
		||||
	inline Tensor operator/(const T &a) const {Tensor r(*this); r /=a; return r;};
 | 
			
		||||
 | 
			
		||||
	typename LA_traits<T>::normtype norm() const {return data.norm();};
 | 
			
		||||
 | 
			
		||||
	inline Tensor operator*(const Tensor &rhs) const {return Tensor(rhs.shape.concat(shape),data.otimes2vec(rhs.data));} //outer product,  rhs indices will be the less significant 
 | 
			
		||||
 | 
			
		||||
	Tensor& conjugateme() {data.conjugateme(); return *this;};
 | 
			
		||||
@ -215,6 +225,7 @@ public:
 | 
			
		||||
 | 
			
		||||
	Tensor permute_index_groups(const NRPerm<int> &p) const; //rearrange the tensor storage permuting index groups as a whole
 | 
			
		||||
	Tensor unwind_index(int group, int index) const; //separate an index from a group and expand it to full range as the least significant one (the leftmost one)
 | 
			
		||||
	Tensor unwind_index(const INDEX &I) const {return unwind_index(I.group,I.index);};
 | 
			
		||||
	Tensor unwind_indices(const INDEXLIST &il) const; //the same for a list of indices
 | 
			
		||||
	void addcontraction(const Tensor &rhs1, int group, int index, const Tensor &rhs2, int rhsgroup, int rhsindex, T alpha=1, T beta=1, bool doresize=false, bool conjugate1=false, bool conjugate=false); //rhs1 will have more significant non-contracted indices in the result than rhs2
 | 
			
		||||
	inline Tensor contraction(int group, int index, const Tensor &rhs, int rhsgroup, int rhsindex, T alpha=1, bool conjugate1=false, bool conjugate=false) const {Tensor<T> r; r.addcontraction(*this,group,index,rhs,rhsgroup,rhsindex,alpha,0,true, conjugate1, conjugate); return r; };
 | 
			
		||||
@ -232,8 +243,8 @@ public:
 | 
			
		||||
	void split_index_group(int group); //formal split of a non-symmetric index group WITHOUT the need for data reorganization
 | 
			
		||||
	void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups  WITHOUT the need for data reorganization
 | 
			
		||||
	Tensor merge_index_groups(const NRVec<int> &groups) const;
 | 
			
		||||
	NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12); //HOSVD-Tucker decomposition, return core tensor in *this, flattened 
 | 
			
		||||
 | 
			
		||||
	NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened 
 | 
			
		||||
	Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user