Tucked tested on compressed tensors, flattening implemented
This commit is contained in:
		
							parent
							
								
									a3ace7c757
								
							
						
					
					
						commit
						cd09d93c27
					
				
							
								
								
									
										59
									
								
								t.cc
									
									
									
									
									
								
							
							
						
						
									
										59
									
								
								t.cc
									
									
									
									
									
								
							@ -3619,7 +3619,7 @@ cout << "Error "<<(u*sdiag*vt-abak).norm()<<endl;
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if(1)
 | 
					if(0)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
//tucker of a flat tensor
 | 
					//tucker of a flat tensor
 | 
				
			||||||
int r,n;
 | 
					int r,n;
 | 
				
			||||||
@ -3647,5 +3647,62 @@ cout <<"invTucker\n"<<y;
 | 
				
			|||||||
cout <<"Error = "<<(x0-y).norm()<<endl;
 | 
					cout <<"Error = "<<(x0-y).norm()<<endl;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if(0)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					//tucker of a non-flat non-symmetric tensor
 | 
				
			||||||
 | 
					int r,n;
 | 
				
			||||||
 | 
					cin>>r>>n;
 | 
				
			||||||
 | 
					INDEXGROUP shape;
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					        shape.number=r;
 | 
				
			||||||
 | 
					        shape.symmetry=0;
 | 
				
			||||||
 | 
					        shape.range=n;
 | 
				
			||||||
 | 
					        shape.offset=0;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					Tensor<double> x(shape);
 | 
				
			||||||
 | 
					x.randomize(1.);
 | 
				
			||||||
 | 
					cout<<x;
 | 
				
			||||||
 | 
					Tensor<double> x0(x);
 | 
				
			||||||
 | 
					x0.copyonwrite();
 | 
				
			||||||
 | 
					bool inv=true;
 | 
				
			||||||
 | 
					NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
 | 
				
			||||||
 | 
					cout<<"Tucker\n"<<x<<endl;
 | 
				
			||||||
 | 
					cout<<dec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Tensor<double> y = x.inverseTucker(dec,inv);
 | 
				
			||||||
 | 
					cout <<"invTucker\n"<<y;
 | 
				
			||||||
 | 
					x0.split_index_group(0);
 | 
				
			||||||
 | 
					cout <<"Error = "<<(x0-y).norm()<<endl;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if(1)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					//tucker of a non-flat symmetric tensor
 | 
				
			||||||
 | 
					int r,n;
 | 
				
			||||||
 | 
					cin>>r>>n;
 | 
				
			||||||
 | 
					INDEXGROUP shape;
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					        shape.number=r;
 | 
				
			||||||
 | 
					        shape.symmetry= -1;
 | 
				
			||||||
 | 
					        shape.range=n;
 | 
				
			||||||
 | 
					        shape.offset=0;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					Tensor<double> x(shape);
 | 
				
			||||||
 | 
					x.randomize(1.);
 | 
				
			||||||
 | 
					cout<<x;
 | 
				
			||||||
 | 
					Tensor<double> x0(x);
 | 
				
			||||||
 | 
					x0.copyonwrite();
 | 
				
			||||||
 | 
					bool inv=true;
 | 
				
			||||||
 | 
					NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
 | 
				
			||||||
 | 
					cout<<"Tucker\n"<<x<<endl;
 | 
				
			||||||
 | 
					cout<<dec;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Tensor<double> y = x.inverseTucker(dec,inv);
 | 
				
			||||||
 | 
					cout <<"invTucker\n"<<y;
 | 
				
			||||||
 | 
					Tensor<double> x1=x0.flatten();
 | 
				
			||||||
 | 
					cout <<"Error = "<<(x1-y).norm()<<endl;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}//main
 | 
					}//main
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										89
									
								
								tensor.cc
									
									
									
									
									
								
							
							
						
						
									
										89
									
								
								tensor.cc
									
									
									
									
									
								
							@ -649,7 +649,7 @@ for(int i=0; i<shape.size(); ++i)
 | 
				
			|||||||
	else flatindex += shape[i].number;
 | 
						else flatindex += shape[i].number;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
std::cout <<"unwind new shape = "<<newshape<<std::endl;
 | 
					//std::cout <<"unwind new shape = "<<newshape<<std::endl;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Tensor<T> r(newshape);
 | 
					Tensor<T> r(newshape);
 | 
				
			||||||
if(r.rank()!=rank()) laerror("internal error 2 in unwind_index");
 | 
					if(r.rank()!=rank()) laerror("internal error 2 in unwind_index");
 | 
				
			||||||
@ -670,7 +670,7 @@ if(!indexperm.is_valid())
 | 
				
			|||||||
	laerror("internal error 3 in unwind_index");
 | 
						laerror("internal error 3 in unwind_index");
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
std::cout <<"unwind permutation = "<<indexperm<<std::endl;
 | 
					//std::cout <<"unwind permutation = "<<indexperm<<std::endl;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
//loop recursively and do the unwinding
 | 
					//loop recursively and do the unwinding
 | 
				
			||||||
help_tt<T> = this;
 | 
					help_tt<T> = this;
 | 
				
			||||||
@ -680,6 +680,79 @@ return r;
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					template<typename T>
 | 
				
			||||||
 | 
					static void flatten_callback(const SUPERINDEX &I, T *v)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					FLATINDEX J = superindex2flat(I);
 | 
				
			||||||
 | 
					//std::cout <<"TEST flatten_callback: from "<<JP<<" TO "<<J<<std::endl;
 | 
				
			||||||
 | 
					*v = (*help_tt<T>)(J); //rhs operator() generates the redundant elements for the unwinded lhs tensor
 | 
				
			||||||
 | 
					}       
 | 
				
			||||||
 | 
					//
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					template<typename T>
 | 
				
			||||||
 | 
					Tensor<T> Tensor<T>::flatten(int group) const
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					if(group>=shape.size()) laerror("too high group number in flatten");
 | 
				
			||||||
 | 
					if(is_flat()) return *this;
 | 
				
			||||||
 | 
					if(group>=0) //single group
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
						if(shape[group].number==1) return *this;
 | 
				
			||||||
 | 
						if(shape[group].symmetry==0)
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
							Tensor<T> r(*this);
 | 
				
			||||||
 | 
							r.split_index_group(group);
 | 
				
			||||||
 | 
							return r;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					if(group<0 && !is_compressed())
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
						Tensor<T> r(*this);
 | 
				
			||||||
 | 
					        for(int g=0; g<shape.size(); ++g) if(shape[g].number>1) r.split_index_group(g);
 | 
				
			||||||
 | 
					        return r;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					//general case 
 | 
				
			||||||
 | 
					int newsize;
 | 
				
			||||||
 | 
					if(group<0) newsize=rank();
 | 
				
			||||||
 | 
					else newsize=shape.size()+shape[group].number-1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					//build new shape
 | 
				
			||||||
 | 
					NRVec<indexgroup> newshape(newsize);
 | 
				
			||||||
 | 
					int gg=0;
 | 
				
			||||||
 | 
					for(int g=0; g<shape.size(); ++g)
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
						if((group<0 ||g==group) && shape[g].number>1) //flatten this group
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
							for(int i=0; i<shape[g].number; ++i)
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
								newshape[gg].symmetry=0;
 | 
				
			||||||
 | 
								newshape[gg].number=1;
 | 
				
			||||||
 | 
								newshape[gg].range=shape[g].range;
 | 
				
			||||||
 | 
					#ifndef LA_TENSOR_ZERO_OFFSET
 | 
				
			||||||
 | 
								newshape[gg].offset = shape[g].offset;
 | 
				
			||||||
 | 
					#endif 
 | 
				
			||||||
 | 
								gg++;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						else //preserve this group
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
							newshape[gg++] = shape[g];
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					std::cout <<"Flatten new shape = "<<newshape<<std::endl;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					//decompress the tensor data
 | 
				
			||||||
 | 
					Tensor<T> r(newshape);
 | 
				
			||||||
 | 
					help_tt<T> = this;
 | 
				
			||||||
 | 
					r.loopover(flatten_callback);
 | 
				
			||||||
 | 
					return r;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
template<typename T>
 | 
					template<typename T>
 | 
				
			||||||
Tensor<T> Tensor<T>::unwind_indices(const INDEXLIST &il) const
 | 
					Tensor<T> Tensor<T>::unwind_indices(const INDEXLIST &il) const
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
@ -1012,7 +1085,7 @@ void Tensor<T>::split_index_group(int group)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
if(group<0||group >= shape.size()) laerror("illegal index group number");
 | 
					if(group<0||group >= shape.size()) laerror("illegal index group number");
 | 
				
			||||||
if(shape[group].number==1) return; //nothing to split
 | 
					if(shape[group].number==1) return; //nothing to split
 | 
				
			||||||
if(shape[group].symmetry!=0) laerror("only non-symmetric index group can be splitted");
 | 
					if(shape[group].symmetry!=0) laerror("only non-symmetric index group can be splitted, use flatten instead");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
NRVec<indexgroup> newshape(shape.size()+shape[group].number-1);
 | 
					NRVec<indexgroup> newshape(shape.size()+shape[group].number-1);
 | 
				
			||||||
int gg=0;
 | 
					int gg=0;
 | 
				
			||||||
@ -1133,16 +1206,19 @@ for(int i=0; i<r; ++i)
 | 
				
			|||||||
	//std::cout << "resulting U "<<u<<std::endl;
 | 
						//std::cout << "resulting U "<<u<<std::endl;
 | 
				
			||||||
	//std::cout << "resulting W "<<w<<std::endl;
 | 
						//std::cout << "resulting W "<<w<<std::endl;
 | 
				
			||||||
	//std::cout << "resulting VT "<<vt<<std::endl;
 | 
						//std::cout << "resulting VT "<<vt<<std::endl;
 | 
				
			||||||
 | 
						int umnr=um.nrows();
 | 
				
			||||||
 | 
						int umnc=um.ncols();
 | 
				
			||||||
	um.resize(0,0); //deallocate
 | 
						um.resize(0,0); //deallocate
 | 
				
			||||||
	int preserve=mini;
 | 
						int preserve=mini;
 | 
				
			||||||
	for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;}
 | 
						for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;}
 | 
				
			||||||
	if(preserve==0) laerror("singular tensor in Tucker decomposition");
 | 
						if(preserve==0) laerror("singular tensor in Tucker decomposition");
 | 
				
			||||||
	NRMat<T> umnew;
 | 
						NRMat<T> umnew;
 | 
				
			||||||
 | 
						//std::cout <<"TEST "<<i<<" mini preserve "<<mini<<" "<<preserve<<std::endl;
 | 
				
			||||||
	if(preserve<mini)
 | 
						if(preserve<mini)
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
		vt=vt.submatrix(0,preserve-1,0,um.ncols()-1);
 | 
							vt=vt.submatrix(0,preserve-1,0,umnc-1);
 | 
				
			||||||
		w=w.subvector(0,preserve-1);
 | 
							w=w.subvector(0,preserve-1);
 | 
				
			||||||
		umnew=u.submatrix(0,um.nrows()-1,0,preserve-1);
 | 
							umnew=u.submatrix(0,umnr-1,0,preserve-1);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	else umnew=u;
 | 
						else umnew=u;
 | 
				
			||||||
	ret[(inverseorder? r-i-1 : i)]=vt.transpose(true);
 | 
						ret[(inverseorder? r-i-1 : i)]=vt.transpose(true);
 | 
				
			||||||
@ -1197,6 +1273,9 @@ else
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
template class Tensor<double>;
 | 
					template class Tensor<double>;
 | 
				
			||||||
template class Tensor<std::complex<double> >;
 | 
					template class Tensor<std::complex<double> >;
 | 
				
			||||||
template std::ostream & operator<<(std::ostream &s, const Tensor<double> &x);
 | 
					template std::ostream & operator<<(std::ostream &s, const Tensor<double> &x);
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										11
									
								
								tensor.h
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								tensor.h
									
									
									
									
									
								
							@ -40,14 +40,16 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
//TODO:
 | 
					//TODO:
 | 
				
			||||||
//@@@!!!!!! - implement index names  and contractions, unwinding etc. by named index list
 | 
					//@@@!!!!!! - implement index names  and contractions, unwinding etc. by named index list
 | 
				
			||||||
 | 
					//@@@index names flat or in groups ?
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
//@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result
 | 
					//@@@contraction inside one tensor - compute resulting shape, loopover the shape, create index into the original tensor + loop over the contr. index, do the summation, store result
 | 
				
			||||||
//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex
 | 
					//@@@ will need to store vector of INDEX to the original tensor for the result's flatindex
 | 
				
			||||||
//@@@ will not be particularly efficient
 | 
					//@@@ will not be particularly efficient
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
//@@@conversions to/from fourindex
 | 
					//@@@conversions to/from fourindex, optional negarive rande for beta spin handling
 | 
				
			||||||
 | 
					//@@@ optional distinguish covariant and contravariant check in contraction
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
//@@@!!!!!!!!!!!const loopover and grouploopover
 | 
					//maybe const loopover and grouploopover to avoid problems with shallowly copied tensors
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
 | 
					//@@@?general permutation of individual indices - check the indices in sym groups remain adjacent, calculate result's shape, loopover the result and permute using unwind_callback
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
@ -162,6 +164,7 @@ public:
 | 
				
			|||||||
	NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index)
 | 
						NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bool is_flat() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1) return false; return true;};
 | 
						bool is_flat() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1) return false; return true;};
 | 
				
			||||||
 | 
						bool is_compressed() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1&&shape[i].symmetry!=0) return true; return false;};
 | 
				
			||||||
	void clear() {data.clear();};
 | 
						void clear() {data.clear();};
 | 
				
			||||||
	int rank() const {return myrank;};
 | 
						int rank() const {return myrank;};
 | 
				
			||||||
	int calcrank(); //is computed from shape
 | 
						int calcrank(); //is computed from shape
 | 
				
			||||||
@ -240,9 +243,11 @@ public:
 | 
				
			|||||||
//								 Note that *this tensor can be e.g. antisymmetric while rhs is not and is being antisymmetrized by the PermutationAlgebra
 | 
					//								 Note that *this tensor can be e.g. antisymmetric while rhs is not and is being antisymmetrized by the PermutationAlgebra
 | 
				
			||||||
//								 The efficiency is not optimal, even when avoiding the outer product, the calculation is done indexing element by element
 | 
					//								 The efficiency is not optimal, even when avoiding the outer product, the calculation is done indexing element by element
 | 
				
			||||||
//								 More efficient would be applying permutation algebra symbolically and efficiently computing term by term
 | 
					//								 More efficient would be applying permutation algebra symbolically and efficiently computing term by term
 | 
				
			||||||
	void split_index_group(int group); //formal split of a non-symmetric index group WITHOUT the need for data reorganization
 | 
					
 | 
				
			||||||
 | 
						void split_index_group(int group); //formal in-place split of a non-symmetric index group WITHOUT the need for data reorganization
 | 
				
			||||||
	void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups  WITHOUT the need for data reorganization
 | 
						void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups  WITHOUT the need for data reorganization
 | 
				
			||||||
	Tensor merge_index_groups(const NRVec<int> &groups) const;
 | 
						Tensor merge_index_groups(const NRVec<int> &groups) const;
 | 
				
			||||||
 | 
						Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them
 | 
				
			||||||
	NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened 
 | 
						NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened 
 | 
				
			||||||
	Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
 | 
						Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user