small improvements in Tucker

This commit is contained in:
Jiri Pittner 2025-10-22 17:08:41 +02:00
parent 12cf5b76a5
commit d46c2d7235

View File

@ -1091,6 +1091,9 @@ return r;
} }
//NOTE: Tucker of rank=2 is inherently inefficient - result is a diagonal tensor stored in full and 2 calls to SVD
//we could avoid the second SVD, but the wasteful storage and erconstruction would remain
//
template<typename T> template<typename T>
NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr, bool inverseorder) NRVec<NRMat<T> > Tensor<T>::Tucker(typename LA_traits<T>::normtype thr, bool inverseorder)
{ {
@ -1101,11 +1104,13 @@ copyonwrite();
if(r==1) //create an analogous output for the trivial case if(r==1) //create an analogous output for the trivial case
{ {
typename LA_traits<T>::normtype N=data.norm();
data*= (1/N);
ret[0]=NRMat<T>(data,data.size(),1); ret[0]=NRMat<T>(data,data.size(),1);
shape[0].range=1; shape[0].range=1;
data.resize(calcsize()); data.resize(calcsize());
calcrank(); calcrank();
data[0]=1; data[0]=(T)N;
return ret; return ret;
} }
@ -1123,7 +1128,11 @@ for(int i=0; i<r; ++i)
int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols()); NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols());
NRVec<typename LA_traits<T>::normtype> w(mini); NRVec<typename LA_traits<T>::normtype> w(mini);
std::cout << "decomposing "<<um<<std::endl;
singular_decomposition(um,&u,w,&vt,0); singular_decomposition(um,&u,w,&vt,0);
std::cout << "resulting U "<<u<<std::endl;
std::cout << "resulting W "<<w<<std::endl;
std::cout << "resulting VT "<<vt<<std::endl;
um.resize(0,0); //deallocate um.resize(0,0); //deallocate
int preserve=mini; int preserve=mini;
for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;} for(int k=0; k<mini; ++k) if(w[k]<thr) {preserve=k; break;}