Compare commits

..

36 Commits

Author SHA1 Message Date
26ed939901 tensor: bugfix in unwind_index and linear_transform of single group 2025-12-15 20:32:01 +01:00
671b924c8c lanczos: little bug fix 2025-12-15 14:36:16 +01:00
1965f4f653 lanczos: first working version 2025-12-12 15:54:32 +01:00
7e90168443 davidson - comment 2025-12-12 14:22:15 +01:00
4500752146 lanczos: skeleton initial commit 2025-12-10 17:12:00 +01:00
62d9e18043 diagonalize3 for n smaller than dimension 2025-12-10 17:11:26 +01:00
6fe5dd8be6 FIXED a recently introduced bug in vec.h 2025-12-09 17:45:38 +01:00
1cb536421f davidson: added root targeting 2025-12-09 16:03:04 +01:00
3d284d544a diagonalize3 wrapper for dsteqr 2025-12-09 15:21:18 +01:00
c70e5c5f18 davidson for complex hermitean 2025-12-05 18:14:55 +01:00
651f614c91 get_git_version bash 2025-12-02 13:16:52 +01:00
b84b4571cb SMat row sums 2025-12-01 17:40:30 +01:00
3a176ddb13 support for Orca format in binary put() and get() 2025-11-28 14:20:05 +01:00
253a554e8e fourindex: added comment/enum 2025-11-27 14:56:22 +01:00
76d00b6b2b fourindex: by default ignore assignments to zero elements due to antisymmetry 2025-11-27 14:11:54 +01:00
831404d08d fourindex: adding set() for compatibility with templated use 2025-11-26 17:46:26 +01:00
b556b06442 twoside_transform for complex and for non-symetric case 2025-11-26 16:57:21 +01:00
3da3efa57c tensor: improved diagnostics 2025-11-20 21:00:33 +01:00
ad1c4ee968 tensor: linear_transform implemented 2025-11-20 18:17:34 +01:00
d136c2314d tensor: index name preservation in tucker 2025-11-20 16:41:57 +01:00
e867a7a8c9 tensor: generalized reinterpretation as matrix 2025-11-20 16:24:17 +01:00
a342032b58 simplified (inverse)Tucker for non-iverting index order 2025-11-20 16:00:53 +01:00
54642a71cc increased storage for precomputed simplicial() 2025-11-20 15:28:17 +01:00
7ba44efaef added network sort up to size 8 2025-11-20 15:18:58 +01:00
37973a7161 tensor: added some timing 2025-11-19 16:19:45 +01:00
e0f5485b94 t.cc example for profiling 2025-11-19 15:45:57 +01:00
c67d07ace0 gitignore gmon.out 2025-11-19 15:45:29 +01:00
ba91c144fb gitignore for profiling files 2025-11-19 15:27:09 +01:00
c067029b1d tensor: bugfixes and hermiticity enforcement 2025-11-19 14:58:45 +01:00
78569ca701 tensor: permutation of indices inside a symmetry group 2025-11-18 18:09:04 +01:00
20a61e2fb9 tensor: support for complex (anti)hermitian tensors 2025-11-18 17:30:58 +01:00
417a7d1d1a tensor: add_permuted_contractions works 2025-11-18 15:11:18 +01:00
71c890c39b working on add_permuted_contractions 2025-11-16 14:56:51 +01:00
ba5adcd5e6 rename conjugate_by to conjugated_by 2025-11-16 14:56:29 +01:00
5f74028ab6 improved index out of range diagnostics 2025-11-16 09:25:09 +01:00
898645ed94 improved out of range diagnostics 2025-11-15 20:57:57 +01:00
23 changed files with 1527 additions and 191 deletions

4
.gitignore vendored
View File

@@ -58,4 +58,8 @@ test_reg
test_regsurf
*.tar.bz2
.*.swp
*.gcda
*.gcno
*.gcov
gmon.out
# CVS default ignores end

View File

@@ -1,5 +1,5 @@
lib_LTLIBRARIES = libla.la
include_HEADERS = LA_version.h tensor.h miscfunc.h simple.h vecmat3.h quaternion.h fortran.h cuda_la.h auxstorage.h davidson.h laerror.h mat.h qsort.h vec.h bisection.h diis.h la.h noncblas.h smat.h numbers.h bitvector.h fourindex.h la_traits.h la_random.h nonclass.h sparsemat.h sparsesmat.h csrmat.h conjgrad.h gmres.h matexp.h permutation.h polynomial.h contfrac.h graph.h reg.h regsurf.h
include_HEADERS = LA_version.h tensor.h miscfunc.h simple.h vecmat3.h quaternion.h fortran.h cuda_la.h auxstorage.h davidson.h lanczos.h laerror.h mat.h qsort.h vec.h bisection.h diis.h la.h noncblas.h smat.h numbers.h bitvector.h fourindex.h la_traits.h la_random.h nonclass.h sparsemat.h sparsesmat.h csrmat.h conjgrad.h gmres.h matexp.h permutation.h polynomial.h contfrac.h graph.h reg.h regsurf.h
libla_la_SOURCES = simple.cc tensor.cc miscfunc.cc quaternion.cc vecmat3.cc vec.cc mat.cc smat.cc sparsemat.cc sparsesmat.cc csrmat.cc laerror.cc noncblas.cc numbers.cc bitvector.cc strassen.cc nonclass.cc cuda_la.cc fourindex.cc permutation.cc polynomial.cc contfrac.cc graph.cc la_random.cc reg.cc regsurf.cc
nodist_libla_la_SOURCES = version.cc
check_PROGRAMS = t test tX test_reg test_regsurf

View File

@@ -33,12 +33,13 @@ namespace LA {
//Note that for efficiency in a direct CI case the diagonalof() should cache its result
//@@@should work for complex hermitian-only too, but was not tested yet (maybe somwhere complex conjugation will have to be added)
//works for complex hermitian-only too, but does not converge so well for more than 1 root
//@@@ for large krylov spaces >200 it can occur 'convergence problem in sygv/syev in diagonalize()'
//@@@options: left eigenvectors by matrix transpose, overridesymmetric (for nrmat)
//@@@small matrix gdiagonalize - shift complex roots up (option to gdiagonalize?)
//@@@test gdiagonalize whether it sorts the roots and what for complex ones
//@@@implement left eigenvectors for nonsymmetric case
//@@@implement start from larger Krylov space than a single vector - useful for multiroot solutions
//Davidson algorithm: J. Comp. Phys. 17:817 (1975)
@@ -48,7 +49,7 @@ template <typename T, typename Matrix>
extern void davidson(const Matrix &bigmat, NRVec<T> &eivals, NRVec<T> *eivecs, const char *eivecsfile,
int nroots=1, const bool verbose=0, const double eps=1e-6,
const bool incore=1, int maxit=100, const int maxkrylov = 500,
void (*initguess)(NRVec<T> &)=NULL)
void (*initguess)(NRVec<T> &)=NULL, const typename LA_traits<T>::normtype *target=NULL)
{
bool flag=0;
int n=bigmat.nrows();
@@ -155,15 +156,40 @@ else
for(int i=0; i<=krylovsize; ++i) {r[i]/=LA_traits<T>::realpart(beta[i]); ri[i]/=LA_traits<T>::realpart(beta[i]);}
}
if(target) //resort eigenpairs by distance from the target
{
NRVec<typename LA_traits<T>::normtype> key(krylovsize+1);
for(int i=0; i<=krylovsize; ++i) key[i] = abs(r[i] - *target);
NRPerm<int> p(krylovsize+1);
key.sort(0,p);
NRVec<typename LA_traits<T>::normtype> rp(maxkrylov);
NRMat<T> smallVp(maxkrylov,maxkrylov);
for(int i=0; i<=krylovsize; ++i)
{
rp[i]= r[p[i+1]-1];
for(int j=0; j<=krylovsize; ++j) smallVp(j,i) = smallV(j,p[i+1]-1);
}
r = rp;
smallV = smallVp;
}
typename LA_traits<T>::normtype eival_n=r[nroot];
oldnroot=nroot;
typename LA_traits<T>::normtype test=std::abs(smallV(krylovsize,nroot));
//std::cout <<"NROOT = "<<nroot<<" TEST = "<<test<<std::endl;
if(test>100.)
{
std::cout <<"Divergence in Davidson\n";
flag=1;
goto finished;
}
if(test<eps) nroot++;
if(it==0) nroot = 0;
for(int iroot=0; iroot<=std::min(krylovsize,nroots-1); ++iroot)
{
test = std::abs(smallV(krylovsize,iroot));
if(test>eps) nroot=std::min(nroot,iroot);
if(test>eps) nroot=std::min(nroot,iroot); //return to previous root
if(verbose && iroot<=std::max(oldnroot,nroot))
{
std::cout <<"Davidson: iter="<<it <<" dim="<<krylovsize<<" root="<<iroot<<" eigenvalue="<<r[iroot]<<"\n";
@@ -229,7 +255,8 @@ for(j=0; j<=krylovsize; ++j)
typename LA_traits<T>::normtype vnorm2;
if(!incore) s0->get(vec2,j);
do {
T ab = vec1*(incore?v0[j]:vec2) /smallS(j,j);
// For COMPLEX the order matters, the first one gets conjuagted, changed vec1 to the right!
T ab = (incore?v0[j]:vec2)*vec1 /smallS(j,j);
vec1.axpy(-ab,incore?v0[j]:vec2);
vnorm2 = vec1.norm();
if(vnorm2==0)

View File

@@ -133,7 +133,7 @@ typedef enum {
T2ijab_unitary=7,
T2IjAb_unitary=8,
antisymtwoelectronrealdiracAB=9,rdm2AB=9,twoelectroncomplexmullikan_without_conjugation=9,
twoelectronrealmullikanreducedsymAA=10,
twoelectronrealmullikanreducedsymAA=10, rdm2SS=10,
twoelectronrealmullikanreducedsymAB=11,
twoelectroncomplexmullikan=12,
} fourindexsymtype; //only permutation-nonequivalent elements are stored
@@ -155,7 +155,7 @@ static const int fourindex_permutations[fourindex_n_symmetrytypes][max_fourindex
{{0,1,2,3,1},{2,3,0,1,-1}}, //T2IjAb_unitary (antihermitian exponent)
{{0,1,2,3,1},{2,3,0,1,1}}, //antisymtwoelectronrealdiracAB
{{0,1,2,3,1},{2,3,0,1,1},{1,0,3,2,1},{3,2,1,0,1}}, //twoelectronrealmullikanreducedsymAA (e.g. unitary downfolded CC eff. integrals)
{{0,1,2,3,1},{1,0,3,2,1}}, //twoelectronrealmullikanreducedsymAB (e.g. unitary downfolded CC eff. integrals)
{{0,1,2,3,1},{1,0,3,2,1}}, //twoelectronrealmullikanreducedsymAB (e.g. unitary downfolded CC eff. integrals, or RDM2 spin-summed)
{{0,1,2,3,1},{2,3,0,1,1},{1,0,3,2,2},{3,2,1,0,2}}, //twoelectroncomplexmullikan ... 2 means complex conjugation
};
@@ -1244,7 +1244,7 @@ if (!NRSMat<T>::v) laerror("access to unallocated fourindex_dense");
#endif
return (*this).NRSMat<T>::operator() ((j-1)*nnbas+i-1,(b-1)*nnbas+a-1);
}
inline void set(unsigned int i, unsigned int j, unsigned int k, unsigned int l, T elem) {this->operator()(i,j,k,l)=elem;};
void print(std::ostream &out) const
{
unsigned int i,j,a,b;
@@ -1385,7 +1385,12 @@ if(i<j) elem = -elem;
if(k<l) elem = -elem;
int I = ASMat_index_1(i,j);
int J = ASMat_index_1(k,l);
if (I<0 || J<0) laerror("assignment to nonexisting element");
if (I<0 || J<0)
#ifdef FORBID_NONEXISTENT_ASSIGNMENTS
laerror("assignment to nonexisting element");
#else
return;
#endif
#ifdef DEBUG
if (I>=NRSMat<T>::nn || J>=NRSMat<T>::nn) laerror("index out of range");
if (!NRSMat<T>::v) laerror("access to unallocated fourindex_dense");
@@ -1401,7 +1406,12 @@ if(i<j) elem = -elem;
if(k<l) elem = -elem;
int I = ASMat_index_1(i,j);
int J = ASMat_index_1(k,l);
if (I<0 || J<0) laerror("assignment to nonexisting element");
if (I<0 || J<0)
#ifdef FORBID_NONEXISTENT_ASSIGNMENTS
laerror("assignment to nonexisting element");
#else
return;
#endif
#ifdef DEBUG
if (I>=NRSMat<T>::nn || J>=NRSMat<T>::nn) laerror("index out of range");
if (!NRSMat<T>::v) laerror("access to unallocated fourindex_dense");

View File

@@ -1,4 +1,4 @@
#!/bin/csh -f
#!/bin/bash
cat <<!
#include "LA_version.h"
const char LA::version[] =

1
la.h
View File

@@ -28,6 +28,7 @@
#include "bitvector.h"
#include "conjgrad.h"
#include "davidson.h"
#include "lanczos.h"
#include "diis.h"
#include "fourindex.h"
#include "gmres.h"

View File

@@ -300,11 +300,14 @@ static void copy(std::complex<C> *dest, std::complex<C> *src, size_t n) {memcpy(
static void clear(std::complex<C> *dest, size_t n) {memset(dest,0,n*sizeof(std::complex<C>));}
static void copyonwrite(std::complex<C> &x) {};
static bool is_plaindata() {return true;}
static bool is_complex() {return true;}
static void clearme(std::complex<C> &x) {x=0;};
static void deallocate(std::complex<C> &x) {};
static inline std::complex<C> conjugate(const std::complex<C> &x) {return std::complex<C>(x.real(),-x.imag());};
static inline C realpart(const std::complex<C> &x) {return x.real();}
static inline C imagpart(const std::complex<C> &x) {return x.imag();}
static inline void setrealpart(std::complex<C> &x, const C &y) {reinterpret_cast<C(&)[2]>(x)[0]=y;}
static inline void setimagpart(std::complex<C> &x, const C &y) {reinterpret_cast<C(&)[2]>(x)[1]=y;}
};
@@ -357,11 +360,14 @@ static void copy(C *dest, C *src, size_t n) {memcpy(dest,src,n*sizeof(C));}
static void clear(C *dest, size_t n) {memset(dest,0,n*sizeof(C));}
static void copyonwrite(C &x) {};
static bool is_plaindata() {return true;}
static bool is_complex() {return false;}
static void clearme(C &x) {x=0;};
static void deallocate(C &x) {};
static inline C conjugate(const C &x) {return x;};
static inline C realpart(const C &x) {return x;}
static inline C imagpart(const C &x) {return 0;}
static inline void setrealpart(C &x, const C &y) {x=y;}
static inline void setimagpart(C &x, const C &y) {}
};
@@ -386,14 +392,15 @@ static inline bool bigger(const X<C> &x, const X<C> &y) {return x>y;} \
static inline bool smaller(const X<C> &x, const X<C> &y) {return x<y;} \
static inline normtype norm (const X<C> &x) {return x.norm();} \
static inline void axpy (X<C>&s, const X<C> &x, const C c) {s.axpy(c,x);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions,transp);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions,transp);} \
static void multiput(size_t n,int fd, const X<C> *x, bool dimensions=1) {for(size_t i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(size_t n,int fd, X<C> *x, bool dimensions=1) {for(size_t i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0, bool orcaformat=false) {x.put(fd,dimensions,transp,orcaformat);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0, bool orcaformat=false) {x.get(fd,dimensions,transp,orcaformat);} \
static void multiput(size_t n,int fd, const X<C> *x, bool dimensions=1, bool orcaformat=false) {for(size_t i=0; i<n; ++i) x[i].put(fd,dimensions,false,orcaformat);} \
static void multiget(size_t n,int fd, X<C> *x, bool dimensions=1, bool orcaformat=false) {for(size_t i=0; i<n; ++i) x[i].get(fd,dimensions,false,orcaformat);} \
static void copy(X<C> *dest, X<C> *src, size_t n) {for(size_t i=0; i<n; ++i) dest[i]=src[i];} \
static void clear(X<C> *dest, size_t n) {for(size_t i=0; i<n; ++i) dest[i].clear();}\
static void copyonwrite(X<C> &x) {x.copyonwrite();}\
static bool is_plaindata() {return false;}\
static bool is_complex() {return LA_traits<C>::is_complex();}\
static void clearme(X<C> &x) {x.clear();}\
static void deallocate(X<C> &x) {x.dealloc();}\
};
@@ -428,14 +435,15 @@ static inline bool bigger(const C &x, const C &y) {return x>y;} \
static inline bool smaller(const C &x, const C &y) {return x<y;} \
static inline normtype norm (const X<C> &x) {return x.norm();} \
static inline void axpy (X<C>&s, const X<C> &x, const C c) {s.axpy(c,x);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0) {x.put(fd,dimensions);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0) {x.get(fd,dimensions);} \
static void multiput(size_t n,int fd, const X<C> *x, bool dimensions=1) {for(size_t i=0; i<n; ++i) x[i].put(fd,dimensions);} \
static void multiget(size_t n,int fd, X<C> *x, bool dimensions=1) {for(size_t i=0; i<n; ++i) x[i].get(fd,dimensions);} \
static void put(int fd, const X<C> &x, bool dimensions=1, bool transp=0, bool orcaformat=false) {x.put(fd,dimensions,false,orcaformat);} \
static void get(int fd, X<C> &x, bool dimensions=1, bool transp=0, bool orcaformat=false) {x.get(fd,dimensions,false,orcaformat);} \
static void multiput(size_t n,int fd, const X<C> *x, bool dimensions=1, bool orcaformat=false) {for(size_t i=0; i<n; ++i) x[i].put(fd,dimensions,false,orcaformat);} \
static void multiget(size_t n,int fd, X<C> *x, bool dimensions=1, bool orcaformat=false) {for(size_t i=0; i<n; ++i) x[i].get(fd,dimensions,false,orcaformat);} \
static void copy(C *dest, C *src, size_t n) {for(size_t i=0; i<n; ++i) dest[i]=src[i];} \
static void clear(C *dest, size_t n) {for(size_t i=0; i<n; ++i) dest[i].clear();} \
static void copyonwrite(X<C> &x) {x.copyonwrite();} \
static bool is_plaindata() {return false;}\
static bool is_complex() {return LA_traits<C>::is_complex();}\
static void clearme(X<C> &x) {x.clear();} \
static void deallocate(X<C> &x) {x.dealloc();} \
};

212
lanczos.h Normal file
View File

@@ -0,0 +1,212 @@
/*
LA: linear algebra C++ interface library
Copyright (C) 2025 Jiri Pittner <jiri.pittner@jh-inst.cas.cz> or <jiri@pittnerovi.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _lanczos_h
#define _lanczos_h
#include "vec.h"
#include "smat.h"
#include "mat.h"
#include "sparsemat.h"
#include "nonclass.h"
#include "auxstorage.h"
//TODO:
//@@@implement restart when Krylov space is exceeded
//@@@implement inital guess of more than 1 vector (also in davidson) and block version of the methods
namespace LA {
//Lanczos diagonalization of hermitean matrix
//matrix can be any class which has nrows(), ncols(), diagonalof(), issymmetric(), and gemv() available
//does not even have to be explicitly stored - direct CI
//therefore the whole implementation must be a template in a header
template <typename T, typename Matrix>
extern void lanczos(const Matrix &bigmat, NRVec<T> &eivals, NRVec<T> *eivecs, const char *eivecsfile,
int nroots=1, const bool verbose=0, const double eps=1e-6,
const bool incore=1, int maxit=100, const int maxkrylov = 1000,
void (*initguess)(NRVec<T> &)=NULL, const typename LA_traits<T>::normtype *target=NULL)
{
if(!bigmat.issymmetric()) laerror("lanczos only for hermitean matrices");
bool flag=0;
int n=bigmat.nrows();
if ( n!= (int)bigmat.ncols()) laerror("non-square matrix in lanczos");
if(eivals.size()<nroots) laerror("too small eivals dimension in lanczos");
NRVec<T> vec1(n),vec2(n);
NRVec<T> *v0,*v1;
AuxStorage<T> *s0,*s1;
if(incore)
{
v0 = new NRVec<T>[maxkrylov];
}
else
{
s0 = new AuxStorage<T>;
}
int i,j;
if(nroots>=maxkrylov) nroots =maxkrylov-1;
int nroot=0;
int oldnroot;
//default guess based on lowest diagonal element of the matrix
if(initguess) initguess(vec1);
else
{
const T *diagonal = bigmat.diagonalof(vec2,false,true);
typename LA_traits<T>::normtype t=1e100; int i,j;
vec1=0;
for(i=0, j= -1; i<n; ++i) if(LA_traits<T>::realpart(diagonal[i])<t) {t=LA_traits<T>::realpart(diagonal[i]); j=i;}
vec1[j]=1;
}
NRVec<typename LA_traits<T>::normtype> alpha(maxkrylov),beta(maxkrylov-1);
//initial step
vec1.normalize();
if(incore) v0[0]=vec1; else s0->put(vec1,0);
bigmat.gemv(0,vec2,'n',1,vec1); //avoid bigmat.operator*(vec), since that needs to allocate another n-sized vector
{
T tmp=vec2*vec1;
alpha[0]= LA_traits<T>::realpart(tmp);
if(LA_traits<T>::imagpart(tmp)>1e-6) laerror("matrix probably not hermitian in lanczos");
}
vec2.axpy(-alpha[0],vec1);
NRVec<typename LA_traits<T>::normtype> r(1); r[0]=alpha[0];
NRVec<typename LA_traits<T>::normtype> rold;
NRMat<typename LA_traits<T>::normtype> smallV;
int krylovsize=1;
//iterative Lanczos
int it=0;
for(j=1; j<maxkrylov;++j)
{
++it;
if(it>maxit) {std::cout <<"too many interations in lanczos\n"; flag=1; goto finished;}
++krylovsize;
//extend the Krylov space (last w vector expected in vec2, last v vector in vec1)
beta[j-1] = vec2.norm();
if(beta[j-1] > 0)
{
vec2 /= beta[j-1];
}
else
{
laerror("zero norm in lanczos");
//could generate an arbitrary vector and orthonormalize it
}
if(incore) v0[j]=vec2; else s0->put(vec2,j);
vec1 *= -beta[j-1];
bigmat.gemv(1,vec1,'n',1,vec2);
{
T tmp=vec1*vec2;
alpha[j]= LA_traits<T>::realpart(tmp);
if(LA_traits<T>::imagpart(tmp)>1e-6) laerror("matrix probably not hermitian in lanczos");
}
vec1.axpy(-alpha[j],vec2);
vec1.swap(vec2); //move new w vector to vec2, new v vector to vec1
//diagonalize the tridiagonal
smallV.resize(j+1,j+1);
rold=r;
r=alpha.subvector(0,j);
NRVec<typename LA_traits<T>::normtype> rr=beta.subvector(0,j-1);
diagonalize3(r,rr,&smallV);
if(target) //resort eigenpairs by distance from the target
{
NRVec<typename LA_traits<T>::normtype> key(j+1);
for(int i=0; i<=j; ++i) key[i] = abs(r[i] - *target);
NRPerm<int> p(j+1);
key.sort(0,p);
NRVec<typename LA_traits<T>::normtype> rp(j+1);
NRMat<typename LA_traits<T>::normtype> smallVp(j+1,j+1);
for(int i=0; i<=j; ++i)
{
rp[i]= r[p[i+1]-1];
for(int k=0; k<=j; ++k) smallVp(k,i) = smallV(k,p[i+1]-1);
}
r = rp;
smallV = smallVp;
}
if(verbose)
{
for(int iroot=0; iroot<std::min(krylovsize,nroots); ++iroot)
{
std::cout <<"Lanczos: iter="<<it <<" dim="<<krylovsize<<" root="<<iroot<<" eigenvalue="<<r[iroot]<<"\n";
}
std::cout.flush();
}
//convergence test when we have enough roots even in rold
if(krylovsize>nroots)
{
bool conv=true;
for(int iroot=0; iroot<nroots; ++iroot)
if(abs(r[iroot]-rold[iroot])>eps) conv=false;
if(conv)
{
flag=0;
goto converged;
}
}
}
flag=1;
goto finished;
converged:
AuxStorage<typename LA_traits<T>::elementtype> *ev;
if(eivecsfile) ev = new AuxStorage<typename LA_traits<T>::elementtype>(eivecsfile);
if(verbose) {std::cout << "Lanczos converged in "<<it<<" iterations.\n"; std::cout.flush();}
for(nroot=0; nroot<nroots; ++nroot)
{
eivals[nroot]=r[nroot];
if(eivecs)
{
vec1=0;
for(j=0; j<krylovsize; ++j )
{
if(!incore) s0->get(vec2,j);
vec1.axpy(smallV(j,nroot),incore?v0[j]:vec2);
}
vec1.normalize();
if(eivecs) eivecs[nroot]|=vec1;
if(eivecsfile)
{
ev->put(vec1,nroot);
}
}
}
if(eivecsfile) delete ev;
finished:
if(incore) {delete[] v0;}
else {delete s0;}
if(flag) laerror("no convergence in lanczos");
}
}//namespace
#endif

21
mat.cc
View File

@@ -121,12 +121,12 @@ const NRVec<T> NRMat<T>::row(const int i, int l) const {
* @see NRVec<T>::put()
******************************************************************************/
template <typename T>
void NRMat<T>::put(int fd, bool dim, bool transp) const {
void NRMat<T>::put(int fd, bool dim, bool transp, bool orcaformat) const {
#ifdef CUDALA
if(location != cpu) {
NRMat<T> tmp = *this;
tmp.moveto(cpu);
tmp.put(fd, dim, transp);
tmp.put(fd, dim, transp, orcaformat);
return;
}
#endif
@@ -134,6 +134,11 @@ void NRMat<T>::put(int fd, bool dim, bool transp) const {
if(dim){
if(sizeof(int) != write(fd,&(transp?mm:nn),sizeof(int))) laerror("write failed");
if(sizeof(int) != write(fd,&(transp?nn:mm),sizeof(int))) laerror("write failed");
if(orcaformat)
{
int tmp=sizeof(T);
if(sizeof(int) != write(fd,&tmp,sizeof(int))) laerror("write failed");
}
}
if(transp){ //not particularly efficient
@@ -167,12 +172,12 @@ void NRMat<T>::put(int fd, bool dim, bool transp) const {
* @see NRVec<T>::get(), copyonwrite()
******************************************************************************/
template <typename T>
void NRMat<T>::get(int fd, bool dim, bool transp){
void NRMat<T>::get(int fd, bool dim, bool transp, bool orcaformat){
#ifdef CUDALA
if(location != cpu){
NRMat<T> tmp;
tmp.moveto(cpu);
tmp.get(fd, dim, transp);
tmp.get(fd, dim, transp,orcaformat);
tmp.moveto(getlocation());
*this = tmp;
return;
@@ -183,6 +188,12 @@ void NRMat<T>::get(int fd, bool dim, bool transp){
if(dim){
if(sizeof(int) != read(fd, &nn0, sizeof(int))) laerror("read failed");
if(sizeof(int) != read(fd, &mm0, sizeof(int))) laerror("read failed");
if(orcaformat)
{
int tmp;
if(sizeof(int) != read(fd, &tmp, sizeof(int))) laerror("read failed");
if(tmp!=sizeof(T)) laerror("mismatch in orca format");
}
if(transp) resize(mm0, nn0); else resize(nn0, mm0);
}else{
copyonwrite();
@@ -2137,6 +2148,8 @@ NRMat< std::complex<double> >::operator*(const NRSMat< std::complex<double> > &r
******************************************************************************/
template<typename T>
NRMat<T>& NRMat<T>::conjugateme() {
if(!LA_traits<T>::is_complex()) return *this;
copyonwrite();
#ifdef CUDALA
if(location != cpu) laerror("general conjugation only on CPU");
#endif

50
mat.h
View File

@@ -305,9 +305,9 @@ public:
inline size_t size() const;
//! unformatted input
void get(int fd, bool dimensions = 1, bool transposed = false);
void get(int fd, bool dimensions = 1, bool transposed = false, bool orcaformat=false);
//! unformatted output
void put(int fd, bool dimensions = 1, bool transposed = false) const;
void put(int fd, bool dimensions = 1, bool transposed = false, bool orcaformat=false) const;
//! formatted output
void fprintf(FILE *f, const char *format, const int modulo) const;
//! formatted input
@@ -744,7 +744,9 @@ template <typename T>
inline T* NRMat<T>::operator[](const int i) {
#ifdef DEBUG
if (_LA_count_check && *count != 1) laerror("matrix with *count>1 used as l-value");
if (i < 0 || i >= nn) laerror("Mat [] out of range");
if(i<0||i>=nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<std::endl;
if (i < 0) laerror("Mat [] out of range - low");
if (i >= nn) laerror("Mat [] out of range - high");
if (!v) laerror("unallocated matrix");
#endif
#ifdef MATPTR
@@ -761,7 +763,9 @@ inline T* NRMat<T>::operator[](const int i) {
template <typename T>
inline const T* NRMat<T>::operator[](const int i) const {
#ifdef DEBUG
if (i < 0 || i >= nn) laerror("index out of range");
if(i<0||i>=nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<std::endl;
if (i < 0) laerror("index out of range - low");
if (i >= nn) laerror("index out of range - high");
if (!v) laerror("unallocated matrix");
#endif
NOT_GPU(*this);
@@ -783,7 +787,11 @@ template <typename T>
inline T& NRMat<T>::operator()(const int i, const int j){
#ifdef DEBUG
if (_LA_count_check && *count != 1) laerror("NRMat::operator(,) used as l-value for a matrix with count > 1");
if (i < 0 || i >= nn && nn > 0 || j < 0 || j >= mm && mm > 0) laerror("index out of range");
if(i<0||i>=nn||j<0||j>mm) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<" "<<0<<" "<<j<<" "<<mm-1<<std::endl;
if (i < 0) laerror("first index out of range - low");
if (i >= nn) laerror("first index out of range - high");
if (j < 0) laerror("second index out of range - low");
if (j >= mm) laerror("second index out of range - high");
if (!v) laerror("unallocated matrix");
#endif
NOT_GPU(*this);
@@ -804,7 +812,11 @@ template <typename T>
inline const T& NRMat<T>::operator()(const int i, const int j) const{
T ret;
#ifdef DEBUG
if (i<0 || i>=nn && nn>0 || j<0 || j>=mm && mm>0) laerror("index out of range");
if(i<0||i>=nn||j<0||j>mm) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<" "<<0<<" "<<j<<" "<<mm-1<<std::endl;
if (i < 0) laerror("first index out of range - low");
if (i >= nn) laerror("first index out of range - high");
if (j < 0) laerror("second index out of range - low");
if (j >= mm) laerror("second index out of range - high");
if (!v) laerror("unallocated matrix");
#endif
NOT_GPU(*this);
@@ -825,7 +837,11 @@ template <typename T>
inline const T NRMat<T>::get_ij(const int i, const int j) const{
T ret;
#ifdef DEBUG
if (i<0 || i>=nn || j<0 || j>=mm) laerror("index out of range");
if(i<0||i>=nn||j<0||j>mm) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<" "<<0<<" "<<j<<" "<<mm-1<<std::endl;
if (i < 0) laerror("first index out of range - low");
if (i >= nn) laerror("first index out of range - high");
if (j < 0) laerror("second index out of range - low");
if (j >= mm) laerror("second index out of range - high");
if (!v) laerror("unallocated matrix");
#endif
#ifdef CUDALA
@@ -1388,8 +1404,12 @@ public:
inline const T& operator() (const int i, const int j) const {
#ifdef DEBUG
if (i<1 || i>NRMat<T>::nn || j<1 || j>NRMat<T>::mm) laerror("index out of range");
if (!NRMat<T>::v) laerror("unallocated matrix");
if(i<=0||i>NRMat<T>::nn||j<=0||j>NRMat<T>::mm) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<<NRMat<T>::nn<<" "<<1<<" "<<j<<" "<<NRMat<T>::mm<<std::endl;
if (i < 1) laerror("first index out of range - low");
if (i > NRMat<T>::nn) laerror("first index out of range - high");
if (j < 1) laerror("second index out of range - low");
if (j > NRMat<T>::mm) laerror("second index out of range - high");
if (!NRMat<T>::v) laerror("unallocated matrix");
#endif
NOT_GPU(*this);
#ifdef MATPTR
@@ -1402,7 +1422,11 @@ public:
inline T& operator() (const int i, const int j) {
#ifdef DEBUG
if (_LA_count_check && *NRMat<T>::count != 1) laerror("matrix with *count > 1 used as l-value");
if (i<1 || i>NRMat<T>::nn || j<1 || j>NRMat<T>::mm) laerror("index out of range");
if(i<=0||i>NRMat<T>::nn||j<=0||j>NRMat<T>::mm) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<<NRMat<T>::nn<<" "<<1<<" "<<j<<" "<<NRMat<T>::mm<<std::endl;
if (i < 1) laerror("first index out of range - low");
if (i > NRMat<T>::nn) laerror("first index out of range - high");
if (j < 1) laerror("second index out of range - low");
if (j > NRMat<T>::mm) laerror("second index out of range - high");
if (!NRMat<T>::v) laerror("unallocated matrix");
#endif
NOT_GPU(*this);
@@ -1416,7 +1440,11 @@ public:
inline const T get_ij(const int i, const int j) const {
T ret;
#ifdef DEBUG
if (i<1 || i>NRMat<T>::nn || j<1 || j>NRMat<T>::mm) laerror("index out of range");
if(i<=0||i>NRMat<T>::nn||j<=0||j>NRMat<T>::mm) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<<NRMat<T>::nn<<" "<<1<<" "<<j<<" "<<NRMat<T>::mm<<std::endl;
if (i < 1) laerror("first index out of range - low");
if (i > NRMat<T>::nn) laerror("first index out of range - high");
if (j < 1) laerror("second index out of range - low");
if (j > NRMat<T>::mm) laerror("second index out of range - high");
if (!NRMat<T>::v) laerror("unallocated matrix");
#endif
#ifdef CUDALA

View File

@@ -767,7 +767,7 @@ return(bern[n]);
//
//enlarge storage size if needed
//
#define SIMPLICIAL_MAXD 8
#define SIMPLICIAL_MAXD 16
#define SIMPLICIAL_MAXN 1024
unsigned long long simplicial(int d, int n)
@@ -783,7 +783,7 @@ if(d==1) return n;
//resize precomputed part as needed
if(n>maxn)
{
if(n>=SIMPLICIAL_MAXN) laerror("storage overflow in simplicial");
if(n>=SIMPLICIAL_MAXN) laerror("storage overflow in simplicial, increase MAXN");
int newdim=2*n;
if(newdim<2*maxn) newdim=2*maxn;
if(newdim>=SIMPLICIAL_MAXN) newdim=SIMPLICIAL_MAXN-1;
@@ -797,7 +797,7 @@ if(n>maxn)
if(d>maxd)
{
if(d>=SIMPLICIAL_MAXD) laerror("storage overflow in simplicial");
if(d>=SIMPLICIAL_MAXD) laerror("storage overflow in simplicial, increase MAXD");
for(int dd=maxd+1; dd<=d; ++dd)
{
stored[dd-2][0]=0;
@@ -817,7 +817,7 @@ int inverse_simplicial(int d, unsigned long long s)
if(s==0) return 0;
if(d==0 || s==1) return 1;
if(d==1) return (int)s;
if(d>=SIMPLICIAL_MAXD) laerror("storage overflow in inverse_simplicial");
if(d>=SIMPLICIAL_MAXD) laerror("storage overflow in inverse_simplicial - increase MAXD");
static int maxd=0;
static Polynomial<double> polynomials[SIMPLICIAL_MAXD];

View File

@@ -944,7 +944,31 @@ extern "C" void FORNAME(zggev)(const char *JOBVL, const char *JOBVR, const FINT
std::complex<double> *VL, const FINT *LDVL, std::complex<double> *VR, const FINT *LDVR,
std::complex<double> *WORK, const FINT *LWORK, double *RWORK, FINT *INFO );
//tridiagonal
extern "C" void FORNAME(dsteqr)(const char *compz, const FINT *n, double *d, double *d1, double *z, const FINT *ldz, double *work, FINT *info);
void diagonalize3(NRVec<double> &d, NRVec<double> &d1, NRMat<double> *v, const bool corder, int n0)
{
FINT n = n0? n0: d.size();
if(d1.size()<n-1) laerror("inconsistent dimensions in diagonalize3");
if(v) {if(n!=v->nrows()||n!=v->ncols()) laerror("inconsistent dimensions in diagonalize3");}
d.copyonwrite();
d1.copyonwrite();
if(v) v->copyonwrite();
FINT ldz=n;
FINT info;
int worksize=2*n-2;
if(worksize<1) worksize=1;
double *work = new double[worksize];
char job=v?'i':'n';
FORNAME(dsteqr)(&job,&n,&d[0],&d1[0],(v?&(*v)(0,0):NULL),&ldz,work,&info);
delete[] work;
if (v && corder) v->transposeme();
if (info < 0) laerror("illegal argument in dsteqr() fo diagonalize3");
if (info > 0) laerror("convergence problem in dsteqr() fo diagonalize3");
}
//statics for sorting

View File

@@ -68,22 +68,43 @@ return positive_power(z,i);
template <class T>
const NRSMat<T> twoside_transform(const NRSMat<T> &S, const NRMat<T> &C, bool transp=0) //calculate C^dagger S C
const NRSMat<T> twoside_transform(const NRSMat<T> &S, const NRMat<T> &C, bool transp=0) //calculate C^dagger S C for symmetric/hermitian S
{
char tchar='t';
if(LA_traits<T>::is_complex()) tchar='c';
if(transp)
{
NRMat<T> tmp = C * S;
NRMat<T> result(C.nrows(),C.nrows());
result.gemm((T)0,tmp,'n',C,'t',(T)1);
result.gemm((T)0,tmp,'n',C,tchar,(T)1);
return NRSMat<T>(result);
}
NRMat<T> tmp = S * C;
NRMat<T> result(C.ncols(),C.ncols());
result.gemm((T)0,C,'t',tmp,'n',(T)1);
result.gemm((T)0,C,tchar,tmp,'n',(T)1);
return NRSMat<T>(result);
}
template <class T>
const NRMat<T> twoside_transform(const NRMat<T> &S, const NRMat<T> &C, bool transp=0) //calculate C^dagger S C for non-symmetric S
{
char tchar='t';
if(LA_traits<T>::is_complex()) tchar='c';
if(transp)
{
NRMat<T> tmp = C * S;
NRMat<T> result(C.nrows(),C.nrows());
result.gemm((T)0,tmp,'n',C,tchar,(T)1);
return result;
}
NRMat<T> tmp = S * C;
NRMat<T> result(C.ncols(),C.ncols());
result.gemm((T)0,C,tchar,tmp,'n',(T)1);
return result;
}
template <class T>
@@ -162,6 +183,9 @@ extern void gdiagonalize(NRMat<std::complex<double> > &a, NRVec<double> &wr, NRV
NRMat<std::complex<double> > *vl=NULL, NRMat<std::complex<double> > *vr=NULL, const bool corder=1, int n=0, const int sorttype=0, const int biorthonormalize=0,
NRMat<std::complex<double> > *b=NULL, NRVec<std::complex<double> > *beta=NULL);
//diagonalization of symmetric real tridiagonal matrix
extern void diagonalize3(NRVec<double> &d, NRVec<double> &d1, NRMat<double> *v, const bool corder=1, int n0=0);
//complex,real,imaginary parts of various entities
template<typename T>
extern const typename LA_traits<T>::realtype realpart(const T&);

View File

@@ -189,7 +189,7 @@ return r;
template <typename T>
NRPerm<T> NRPerm<T>::conjugate_by(const NRPerm<T> &q, bool inverse) const
NRPerm<T> NRPerm<T>::conjugated_by(const NRPerm<T> &q, bool inverse) const
{
#ifdef DEBUG
if(!this->is_valid() || !q.is_valid()) laerror("multiplication of invalid permutation");
@@ -224,7 +224,7 @@ return r;
template <typename T>
CyclePerm<T> CyclePerm<T>::conjugate_by(const CyclePerm<T> &q) const
CyclePerm<T> CyclePerm<T>::conjugated_by(const CyclePerm<T> &q) const
{
#ifdef DEBUG
if(!this->is_valid() || !q.is_valid()) laerror("multiplication of invalid permutation");
@@ -780,10 +780,10 @@ return res;
}
template <typename T, typename R>
PermutationAlgebra<T,R> PermutationAlgebra<T,R>::conjugate_by(const NRPerm<T> &q, bool reverse) const
PermutationAlgebra<T,R> PermutationAlgebra<T,R>::conjugated_by(const NRPerm<T> &q, bool reverse) const
{
PermutationAlgebra<T,R> res(this->size());
for(int i=0; i<this->size(); ++i) {res[i].perm = (*this)[i].perm.conjugate_by(q,reverse); res[i].weight=(*this)[i].weight;}
for(int i=0; i<this->size(); ++i) {res[i].perm = (*this)[i].perm.conjugated_by(q,reverse); res[i].weight=(*this)[i].weight;}
return res;
}

View File

@@ -73,7 +73,7 @@ public:
NRPerm operator*(const CyclePerm<T> &r) const;
template<typename R> PermutationAlgebra<T,R> operator*(const PermutationAlgebra<T,R> &pa) const;
NRPerm multiply(const NRPerm<T> &q, bool inverse) const; //multiplication but optionally q inversed
NRPerm conjugate_by(const NRPerm &q, bool reverse=false) const; //q^-1 p q or q p q^-1
NRPerm conjugated_by(const NRPerm &q, bool reverse=false) const; //q^-1 p q or q p q^-1
NRPerm commutator(const NRPerm &q, bool inverse=false) const; //p^-1 q^-1 p q or q^-1 p^-1 q p
int parity() const; //returns +/- 1
void randomize(void); //uniformly random by Fisher-Yates shuffle
@@ -199,7 +199,7 @@ public:
PermutationAlgebra operator*(const NRPerm<T> &rhs) const; //applied to all terms
PermutationAlgebra cutleft(int n) const; //applied to all terms
PermutationAlgebra cutright(int n) const; //applied to all terms
PermutationAlgebra conjugate_by(const NRPerm<T> &q, bool reverse=false) const; //q^-1 p q or q p q^-1 , applied to all terms
PermutationAlgebra conjugated_by(const NRPerm<T> &q, bool reverse=false) const; //q^-1 p q or q p q^-1 , applied to all terms
PermutationAlgebra commutator(const NRPerm<T> &q, bool inverse=false) const; //applied to all terms
PermutationAlgebra operator&(const PermutationAlgebra &rhs) const; //each term with each
PermutationAlgebra operator|(const PermutationAlgebra &rhs) const; //each term with each
@@ -237,7 +237,7 @@ public:
void readfrom(const std::string &line);
CyclePerm operator*(const CyclePerm &q) const; //q is rhs and applied first, this applied second
NRPerm<T> operator*(const NRPerm<T> &r) const;
CyclePerm conjugate_by(const CyclePerm &q) const; //q^-1 p q
CyclePerm conjugated_by(const CyclePerm &q) const; //q^-1 p q
PERM_RANK_TYPE order() const; //lcm of cycle lengths
bool operator==(const CyclePerm &rhs) const {return NRPerm<T>(*this) == NRPerm<T>(rhs);}; //cycle representation is not unique, cannot inherit operator== from NRVec
void simplify(bool keep1=false); //remove cycles of size 0 or 1

35
qsort.h
View File

@@ -356,6 +356,35 @@ int parity=0;
return parity;
}
template<typename T>
inline int netsort_7(T *data)
{
int parity=0;
CONDSWAP(0,6);CONDSWAP(2,3);CONDSWAP(4,5);
CONDSWAP(0,2);CONDSWAP(1,4);CONDSWAP(3,6);
CONDSWAP(0,1);CONDSWAP(2,5);CONDSWAP(3,4);
CONDSWAP(1,2);CONDSWAP(4,6);
CONDSWAP(2,3);CONDSWAP(4,5);
CONDSWAP(1,2);CONDSWAP(3,4);CONDSWAP(5,6);
return parity;
}
template<typename T>
inline int netsort_8(T *data)
{
int parity=0;
CONDSWAP(0,2);CONDSWAP(1,3);CONDSWAP(4,6);CONDSWAP(5,7);
CONDSWAP(0,4);CONDSWAP(1,5);CONDSWAP(2,6);CONDSWAP(3,7);
CONDSWAP(0,1);CONDSWAP(2,3);CONDSWAP(4,5);CONDSWAP(6,7);
CONDSWAP(2,4);CONDSWAP(3,5);
CONDSWAP(1,4);CONDSWAP(3,6);
CONDSWAP(1,2);CONDSWAP(3,4);CONDSWAP(5,6);
return parity;
}
//see https://bertdobbelaere.github.io/sorting_networks.html for more
#undef CONDSWAP
@@ -384,6 +413,12 @@ switch(n)
case 6:
return netsort_6(data);
break;
case 7:
return netsort_7(data);
break;
case 8:
return netsort_8(data);
break;
default:
return ptrqsortup<T,int>(data,data+n-1);
break;

22
smat.cc
View File

@@ -41,12 +41,12 @@ namespace LA {
* @see NRMat<T>::get(), NRSMat<T>::copyonwrite()
******************************************************************************/
template <typename T>
void NRSMat<T>::put(int fd, bool dim, bool transp) const {
void NRSMat<T>::put(int fd, bool dim, bool transp, bool orcaformat) const {
#ifdef CUDALA
if(location != cpu){
NRSMat<T> tmp= *this;
tmp.moveto(cpu);
tmp.put(fd,dim,transp);
tmp.put(fd,dim,transp,orcaformat);
return;
}
#endif
@@ -54,6 +54,11 @@ void NRSMat<T>::put(int fd, bool dim, bool transp) const {
if(dim){
if(sizeof(int) != write(fd,&nn,sizeof(int))) laerror("cannot write");
if(sizeof(int) != write(fd,&nn,sizeof(int))) laerror("cannot write");
if(orcaformat)
{
int tmp=sizeof(T);
if(sizeof(int) != write(fd,&tmp,sizeof(int))) laerror("cannot write");
}
}
LA_traits<T>::multiput((size_t)nn*(nn+1)/2,fd,v,dim);
}
@@ -66,12 +71,12 @@ void NRSMat<T>::put(int fd, bool dim, bool transp) const {
* @see NRSMat<T>::put(), NRSMat<T>::copyonwrite()
******************************************************************************/
template <typename T>
void NRSMat<T>::get(int fd, bool dim, bool transp) {
void NRSMat<T>::get(int fd, bool dim, bool transp, bool orcaformat) {
#ifdef CUDALA
if(location != cpu){
NRSMat<T> tmp;
tmp.moveto(cpu);
tmp.get(fd,dim,transp);
tmp.get(fd,dim,transp,orcaformat);
tmp.moveto(location);
*this = tmp;
return;
@@ -82,6 +87,12 @@ void NRSMat<T>::get(int fd, bool dim, bool transp) {
errno = 0;
if(dim){
if(2*sizeof(int) != read(fd,&nn0,2*sizeof(int))) laerror("cannot read");
if(orcaformat)
{
int tmp;
if(sizeof(int) != read(fd,&tmp,sizeof(int))) laerror("cannot read");
if(tmp!=sizeof(T)) laerror("mismatch in orca format");
}
resize(nn0[0]);
}else{
copyonwrite();
@@ -872,6 +883,9 @@ NRSMat<std::complex<double> > NRSMat<std::complex<double> >::inverse() {return
******************************************************************************/
template<typename T>
NRSMat<T>& NRSMat<T>::conjugateme() {
if(!LA_traits<T>::is_complex()) return *this;
copyonwrite();
#ifdef CUDALA
if(location != cpu) laerror("general conjugation only on CPU");
#endif

52
smat.h
View File

@@ -154,6 +154,8 @@ public:
inline const T& operator[](const size_t ij) const;
inline T& operator[](const size_t ij);
//NOTE: it stores the matrix as symemtric and operator() assumes it is symmetric, does not support complex hermitean as that would require a smart pointer for l-value version
//complex conjugation options are available for BLAS routines to facilitate hermitan matrices
inline const T& operator()(const int i, const int j) const;
inline T& operator()(const int i, const int j);
@@ -179,10 +181,23 @@ public:
return s;
}
const NRVec<T> rsum() const
{
NRVec<T> r(nn);
for(int i=0; i<nn;++i)
{
r[i]=(T)0;
for(int j=0; j<nn; ++j) r[i] += (*this)(i,j);
}
return r;
}
const NRVec<T> csum() const {return rsum();};
const T trace() const;
void get(int fd, bool dimensions = 1, bool transp = 0);
void put(int fd, bool dimensions = 1, bool transp = 0) const;
void get(int fd, bool dimensions = 1, bool transp = 0, bool orcaformat=false);
void put(int fd, bool dimensions = 1, bool transp = 0, bool orcaformat=false) const;
void copyonwrite(bool detachonly=false, bool deep=true);
@@ -626,7 +641,9 @@ template <typename T>
inline T& NRSMat<T>::operator[](const size_t ij) {
#ifdef DEBUG
if(_LA_count_check && *count != 1) laerror("T& NRSMat<T>::operator[] used for matrix with count>1");
if(ij<0 || ij>=NN2) laerror("T& NRSMat<T>::operator[] out of range");
if(ij<0||ij>=NN2) std::cout<<"INDEX PROBLEM "<<0<<" "<<ij<<" "<<NN2-1<<std::endl;
if(ij<0) laerror("T& NRSMat<T>::operator[] out of range - low");
if(ij>=NN2) laerror("T& NRSMat<T>::operator[] out of range - high");
if(!v) laerror("T& NRSMat<T>::operator[] used for unallocated NRSmat<T> object");
#endif
NOT_GPU(*this);
@@ -645,7 +662,9 @@ inline T& NRSMat<T>::operator[](const size_t ij) {
template <typename T>
inline const T & NRSMat<T>::operator[](const size_t ij) const {
#ifdef DEBUG
if(ij<0 || ij>=NN2) laerror("T& NRSMat<T>::operator[] out of range");
if(ij<0||ij>=NN2) std::cout<<"INDEX PROBLEM "<<0<<" "<<ij<<" "<<NN2-1<<std::endl;
if(ij<0) laerror("T& NRSMat<T>::operator[] out of range - low");
if(ij>=NN2) laerror("T& NRSMat<T>::operator[] out of range - high");
if(!v) laerror("T& NRSMat<T>::operator[] used for unallocated NRSmat<T> object");
#endif
NOT_GPU(*this);
@@ -752,7 +771,11 @@ template <typename T>
inline T & NRSMat<T>::operator()(const int i, const int j) {
#ifdef DEBUG
if(_LA_count_check && *count != 1) laerror("T & NRSMat<T>::operator()(const int, const int) used for matrix with count > 1");
if(i<0 || i>=nn || j<0 || j>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) out of range");
if(i<0||i>=nn||j<0||j>=nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<" "<<0<<" "<<j<<" "<<nn-1<<std::endl;
if(i<0) laerror("T & NRSMat<T>::operator()(const int, const int) first index out of range - low");
if(i>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) first index out of range - high");
if(j<0) laerror("T & NRSMat<T>::operator()(const int, const int) second index out of range - low");
if(j>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) second index out of range - high");
if(!v) laerror("T & NRSMat<T>::operator()(const int, const int) used for unallocated NRSmat<T> object");
#endif
NOT_GPU(*this);
@@ -770,7 +793,12 @@ inline T & NRSMat<T>::operator()(const int i, const int j) {
template <typename T>
inline const T & NRSMat<T>::operator()(const int i, const int j) const {
#ifdef DEBUG
if(i<0 || i>=nn || j<0 || j>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) out of range");
if(i<0||i>=nn||j<0||j>=nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<" "<<0<<" "<<j<<" "<<nn-1<<std::endl;
if(i<0) laerror("T & NRSMat<T>::operator()(const int, const int) first index out of range - low");
if(i>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) first index out of range - high");
if(j<0) laerror("T & NRSMat<T>::operator()(const int, const int) second index out of range - low");
if(j>=nn) laerror("T & NRSMat<T>::operator()(const int, const int) second index out of range - high");
if(!v) laerror("T & NRSMat<T>::operator()(const int, const int) used for unallocated NRSmat<T> object");
if(!v) laerror("T & NRSMat<T>::operator()(const int, const int) used for unallocated NRSmat<T> object");
#endif
NOT_GPU(*this);
@@ -1304,14 +1332,22 @@ public:
inline const T& operator() (const int i, const int j) const {
#ifdef DEBUG
if(i<=0||j<=0||i>NRSMat<T>::nn||j>NRSMat<T>::nn) laerror("index in const T& NRSMat<T>::operator() (const int, const int) out of range");
if(i<=0||i>NRSMat<T>::nn||j<=0||j>NRSMat<T>::nn) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<<NRSMat<T>::nn<<" "<<1<<" "<<j<<" "<<NRSMat<T>::nn<<std::endl;
if(i<=0) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) first indexout of range - low");
if(i>NRSMat<T>::nn) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) first indexout of range - high");
if(j<=0) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) second index out of range - low");
if(j>NRSMat<T>::nn) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) second index out of range - high");
#endif
return NRSMat<T>::v[SMat_index_1(i,j)];
}
inline T& operator() (const int i, const int j){
#ifdef DEBUG
if(i<=0||j<=0||i>NRSMat<T>::nn||j>NRSMat<T>::nn) laerror("index in T& NRSMat<T>::operator() (const int, const int) out of range");
if(i<=0||i>NRSMat<T>::nn||j<=0||j>NRSMat<T>::nn) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<<NRSMat<T>::nn<<" "<<1<<" "<<j<<" "<<NRSMat<T>::nn<<std::endl;
if(i<=0) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) first indexout of range - low");
if(i>NRSMat<T>::nn) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) first indexout of range - high");
if(j<=0) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) second index out of range - low");
if(j>NRSMat<T>::nn) laerror("index in const T& NRSMat_from1<T>::operator() (const int, const int) second index out of range - high");
#endif
return NRSMat<T>::v[SMat_index_1(i,j)];
}

366
t.cc
View File

@@ -1137,7 +1137,7 @@ if(0)
{
int n,m;
cin>>n >>m;
NRSMat<double> a(n,n);
NRSMat<double> a(n);
NRVec<double> rr(n);
for(int i=0;i<n;++i) for(int j=0;j<=i;++j)
@@ -3733,6 +3733,7 @@ INDEXGROUP shape;
{
shape.number=r;
shape.symmetry= 1;
//shape.symmetry= -1;
shape.range=n;
shape.offset=0;
}
@@ -4030,6 +4031,7 @@ cout <<"Error = "<<(xx-x).norm()<<endl;
}
if(0)
{
//check symmetrizer/antisymmetrizer in general case
@@ -4067,6 +4069,7 @@ cout <<"xx = "<<xx.shape<< " "<<xx.names<<endl;
cout <<"Error = "<<(xx-xxx).norm()<<endl;
}
if(0)
{
int r=4;
@@ -4169,6 +4172,52 @@ cout<<"resulting index order = "<<y.names<<endl;
}
if(0)
{
int nn;
cin>>nn;
int r=4;
NRVec<INDEXGROUP> s(r);
for(int i=0; i<r; ++i)
{
s[i].number=1;
s[i].symmetry=0;
s[i].range=nn;
s[i].offset=0;
}
Tensor<double> x(s); x.randomize(1.);
INDEXNAME xlist[] = {"i","j","k","l"};
x.names=NRVec<INDEXNAME>(xlist);
Tensor<double> y(s); y.randomize(1.);
INDEXNAME ylist[] = {"m","n","j","i"};
y.names=NRVec<INDEXNAME>(ylist);
Tensor<double>z(s); z.clear();
INDEXNAME zlist[] = {"m","n","k","l"};
z.names=NRVec<INDEXNAME>(zlist);
Tensor<double>zz(z);
INDEX i1[]={{0,0},{1,0}};
INDEX i2[]={{3,0},{2,0}};
NRVec<INDEX> il1(i1);
NRVec<INDEX> il2(i2);
Tensor<double> zzz = x.contractions(il1,y,il2,1,false,false);
//cout <<"zzz names = "<<zzz.names<<endl;
z.add_permuted_contractions("",x,y,1,0,false,false);
zz.copyonwrite();
for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int n=0; n<nn; ++n)
{
double s=0;
for(int i=0; i<nn; ++i) for(int j=0; j<nn; ++j) s += x(i,j,k,l)*y(m,n,j,i);
zz.lhs(m,n,k,l) = s;
//cout <<"test "<<k<<" "<<l<<" "<<m<<" "<<n<<" "<<zz(k,l,m,n)<<" "<<zzz(m,n,k,l)<< " : "<<z(m,n,k,l) <<endl;
}
cout <<"Error = "<<(z-zz).norm()<<endl;
}
if(0)
{
int nn;
@@ -4215,7 +4264,7 @@ for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int
cout <<"Error = "<<(z-zz).norm()<<endl;
}
if(1)
if(0)
{
int nn;
cin>>nn;
@@ -4254,12 +4303,323 @@ for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int
Tensor<double> zzz(s);
for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int n=0; n<nn; ++n)
{
zzz.lhs(k,l,m,n) = 1/3.*(zz(k,l,m,n)-zz(l,k,m,n)+zz(m,k,l,n));
zzz.lhs(k,l,m,n) = (zz(k,l,m,n)-zz(l,k,m,n)+zz(m,k,l,n));
}
cout <<"Error = "<<(z-zzz).norm()<<endl;
}
if(0)
{
//check symmetrizer/antisymmetrizer in general case
int r,n,sym;
cin>>r>>n>>sym;
NRVec<INDEXGROUP> shape(3);
shape[0].number=2;
shape[0].symmetry=0;
shape[0].range=n+1;
shape[0].offset=0;
shape[1].number=r;
shape[1].symmetry= sym;
shape[1].range=n;
shape[1].offset=0;
shape[2].number=2;
shape[2].symmetry=0;
shape[2].range=n+2;
shape[2].offset=0;
Tensor<complex<double> > x(shape); x.randomize(1.);
x.defaultnames();
cout <<"x= "<<x.shape << " "<<x.names<<endl;
Tensor<complex<double> > xf=x.flatten(1);
cout <<"xf= "<<xf.shape << " "<<xf.names<<endl;
Tensor<complex<double> > xxx=x.unwind_index_group(1);
cout <<"xxx= "<<xxx.shape<<" "<<xxx.names<<endl;
INDEXLIST il(r);
for(int i=0; i<r; ++i) il[i]= {1+i,0};
Tensor<complex<double> > xx = xf.merge_indices(il,sym);
cout <<"xx = "<<xx.shape<< " "<<xx.names<<endl;
cout <<"Error = "<<(xx-xxx).norm()<<endl;
}
if(0)
{
int nn;
cin>>nn;
int r=4;
NRVec<INDEXGROUP> s(r);
for(int i=0; i<r; ++i)
{
s[i].number=1;
s[i].symmetry=0;
s[i].range=nn;
s[i].offset=0;
}
Tensor<complex<double> > x(s); x.randomize(1.);
INDEXNAME xlist[] = {"i","j","k","l"};
x.names=NRVec<INDEXNAME>(xlist);
Tensor<complex<double> > y(s); y.randomize(1.);
INDEXNAME ylist[] = {"n","m","j","i"};
y.names=NRVec<INDEXNAME>(ylist);
Tensor<complex<double> >z(s); z.clear();
INDEXNAME zlist[] = {"k","l","m","n"};
z.names=NRVec<INDEXNAME>(zlist);
Tensor<complex<double> >zz(z);
z.add_permuted_contractions("k/l,m",x,y,1,0,false,false);
zz.copyonwrite();
for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int n=0; n<nn; ++n)
{
complex<double> s=0;
for(int i=0; i<nn; ++i) for(int j=0; j<nn; ++j) s += x(i,j,k,l)*y(n,m,j,i);
zz.lhs(k,l,m,n) = s;
}
Tensor<complex<double> > zzz(s);
for(int k=0; k<nn; ++k) for(int l=0; l<nn; ++l) for(int m=0; m<nn; ++m) for(int n=0; n<nn; ++n)
{
zzz.lhs(k,l,m,n) = (zz(k,l,m,n)-zz(l,k,m,n)+zz(m,k,l,n));
}
cout <<"Error = "<<(z-zzz).norm()<<endl;
}
if(0)
{
//for profiling and timing
int nn;
cin>>nn;
int r=4;
NRVec<INDEXGROUP> s(r);
for(int i=0; i<r; ++i)
{
s[i].number=1;
s[i].symmetry=0;
s[i].range=nn;
s[i].offset=0;
}
Tensor<double> x(s); x.randomize(1.);
INDEXNAME xlist[] = {"i","j","k","l"};
x.names=NRVec<INDEXNAME>(xlist);
Tensor<double> y(s); y.randomize(1.);
INDEXNAME ylist[] = {"n","m","j","i"};
y.names=NRVec<INDEXNAME>(ylist);
Tensor<double>z(s); z.clear();
INDEXNAME zlist[] = {"k","l","m","n"};
z.names=NRVec<INDEXNAME>(zlist);
z.add_permuted_contractions("k/l,m",x,y,1,0,false,false);
cout <<z.norm()<<endl;
}
if(0)
{
//tucker test order
int r,n;
bool inv;
cin>>r>>n>>inv;
NRVec<INDEXGROUP> shape(r);
for(int i=0; i<r; ++i)
{
shape[i].number=1;
shape[i].symmetry=0;
shape[i].range=n+i;
shape[i].offset=0;
}
Tensor<double> x(shape);
x.randomize(1.);
x.defaultnames();
cout<<x;
Tensor<double> x0(x);
x0.copyonwrite();
NRVec<NRMat<double> > dec=x.Tucker(1e-12,inv);
cout<<"Tucker\n"<<x<<endl;
cout<<dec;
Tensor<double> y = x.inverseTucker(dec,inv);
cout <<"invTucker\n"<<y;
cout <<"Error = "<<(x0-y).norm()<<endl;
if(inv==false)
{
Tensor<double> z = x.linear_transform(dec);
cout <<"Error2 = "<<(x0-z).norm()<<endl;
}
}
if(0)
{
//test linear transform
int r=3;
int n,sym;
cin>>n>>sym;
NRVec<INDEXGROUP> shape(2);
{
shape[0].number=r;
shape[0].symmetry=sym;
shape[0].range=n;
shape[0].offset=0;
shape[1].number=1;
shape[1].symmetry=0;
shape[1].range=n;
shape[1].offset=0;
}
Tensor<double> x(shape);
x.randomize(1.);
NRVec<NRMat<double> > t(2);
t[0].resize(n+2,n); t[0].randomize(1.);
t[1].resize(n+2,n); t[1].randomize(1.);
Tensor<double> xt=x.linear_transform(t);
shape.copyonwrite();
shape[0].range=n+2;
shape[1].range=n+2;
Tensor<double> y(shape);
//this is the most stupid way to compute the transform ;-)
for(int i=0; i<n+2; ++i) for(int j=0; j<n+2; ++j) for(int k=0; k<n+2; ++k) for(int l=0; l<n+2; ++l)
{
y.lhs(i,j,k,l)=0;
for(int ii=0; ii<n; ++ii) for(int jj=0; jj<n; ++jj) for(int kk=0; kk<n; ++kk) for(int ll=0; ll<n; ++ll)
y.lhs(i,j,k,l) += x(ii,jj,kk,ll) * t[0](i,ii) * t[0](j,jj) * t[0](k,kk) * t[1](l,ll) ;
}
cout <<"Error = "<<(xt-y).norm()<<endl;
}
if(0)
{
int n,m;
bool which;
cin>>n >>m >>which;
NRSMat<double> a(n);
NRVec<double> rr(n);
for(int i=0;i<n;++i) for(int j=0;j<=i;++j)
{
a(i,j)= RANDDOUBLE();
if(i==j) a(i,i)+= .5*(i-n*.5);
}
NRSMat<double> aa;
NRMat<double> vv(n,n);
aa=a; diagonalize(aa,rr,&vv);
NRVec<double> r(m);
NRVec<double> *eivecs = new NRVec<double>[m];
cout <<"Exact energies " <<rr<<endl;
if(which) lanczos(a,r,eivecs,NULL,m,1,1e-6,1,200,300);
else davidson(a,r,eivecs,NULL,m,1,1e-6,1,200,300);
cout <<"Iterative energies " <<r;
cout <<"Eigenvectors compare:\n";
for(int i=0; i<m; ++i)
{
cout <<eivecs[i];
for(int j=0; j<n;++j) cout <<vv[j][i]<<" ";
cout <<endl;
}
}
if(0)
{
int n,m;
bool which;
cin>>n >>m>>which ;
NRSMat<complex<double> > a(n);
a.randomize(.1);
for(int i=0;i<n;++i)
{
a(i,i)= RANDDOUBLE() + .5*(i-n);
}
//cout <<"test matrix = "<<a;
NRVec<double> rr(n);
NRSMat<complex<double> > aa;
NRMat<complex<double> > vv(n,n);
aa=a; diagonalize(aa,rr,&vv);
cout <<"Exact energies "<<rr;
NRVec<complex<double> > r(m);
NRVec<complex<double> > *eivecs = new NRVec<complex<double> >[m];
if(which) lanczos(a,r,eivecs,NULL,m,true,1e-5,true,10*n,n);
else davidson(a,r,eivecs,NULL,m,true,1e-5,true,10*n,n);
cout <<"Davidson/Lanczos energies " <<r;
cout <<"Exact energies "<<rr;
cout <<"Eigenvectors compare:\n";
for(int i=0; i<m; ++i)
{
cout <<eivecs[i];
for(int j=0; j<n;++j) cout <<vv[j][i]<<" ";
cout <<endl;
}
}
if(1)
{
int r,n,sym;
cin>>r>>n>>sym;
NRVec<INDEXGROUP> shape(3);
shape[0].number=2;
shape[0].symmetry=0;
shape[0].range=n+1;
shape[0].offset=0;
shape[1].number=r;
shape[1].symmetry= sym;
shape[1].range=n;
shape[1].offset=0;
shape[2].number=2;
shape[2].symmetry=0;
shape[2].range=n+2;
shape[2].offset=0;
Tensor<double> x(shape); x.randomize(1.);
x.defaultnames();
cout <<"x= "<<x.shape << " "<<x.names<<endl;
NRVec<INDEXGROUP> yshape(2);
yshape[0].number=1;
yshape[0].symmetry=0;
yshape[0].range=n;
yshape[0].offset=0;
yshape[1].number=1;
yshape[1].symmetry= 0;
yshape[1].range=n+3;
yshape[1].offset=0;
Tensor<double> y(yshape); y.randomize(1.);
INDEX posit(1,0);
//Tensor<double> z=x.unwind_index(1,1);
//Tensor<double> z=x.contraction(INDEX(1,1),y,INDEX(0,0),1,false,false);
Tensor<double> z=x.linear_transform(1,y);
cout <<z.shape;
//check
}
}//main

460
tensor.cc
View File

@@ -24,6 +24,8 @@
#include <complex>
#include "bitvector.h"
#include <string.h>
#include <bits/stdc++.h>
#include <time.h>
namespace LA {
@@ -62,6 +64,8 @@ for(int i=0; i<shape.size(); ++i)
case 0:
s *= groupsizes[i] = longpow(sh->range,sh->number);
break;
case 2:
case -2:
case 1:
s *= groupsizes[i] = simplicial(sh->number,sh->range);
break;
@@ -85,7 +89,11 @@ LA_largeindex subindex(int *sign, const INDEXGROUP &g, const NRVec<LA_index> &I)
#ifdef DEBUG
if(I.size()<=0) laerror("empty index group in subindex");
if(g.number!=I.size()) laerror("mismatch in the number of indices in a group");
for(int i=0; i<I.size(); ++i) if(I[i]<g.offset || I[i] >= g.offset+g.range) laerror("index out of range in tensor subindex");
for(int i=0; i<I.size(); ++i) if(I[i]<g.offset || I[i] >= g.offset+g.range)
{
std::cout<<"TENSOR INDEX PROBLEM in group " <<g<<" with index "<<I<<std::endl;
laerror("index out of range in tensor subindex");
}
#endif
switch(I.size()) //a few special cases for efficiency
@@ -100,18 +108,17 @@ switch(I.size()) //a few special cases for efficiency
break;
case 2:
{
*sign=1;
if(g.symmetry==0) return (I[1]-g.offset)*g.range+I[0]-g.offset;
if(g.symmetry==0) {*sign=1; return (I[1]-g.offset)*g.range+I[0]-g.offset;};
LA_index i0,i1;
if(I[0]>I[1]) {i1=I[0]; i0=I[1]; if(g.symmetry<0) *sign = -1;} else {i1=I[1]; i0=I[0];}
if(I[0]>I[1]) {i1=I[0]; i0=I[1]; *sign=g.symmetry;} else {i1=I[1]; i0=I[0]; *sign=1;}
i0 -= g.offset;
i1 -= g.offset;
if(g.symmetry<0)
if(g.symmetry == -1) //antisymmetric
{
if(i0==i1) {*sign=0; return -1;}
return i1*(i1-1)/2+i0;
}
else
else //symmetric, hermitian, antihermitian
{
return i1*(i1+1)/2+i0;
}
@@ -120,10 +127,9 @@ switch(I.size()) //a few special cases for efficiency
default: //general case
{
*sign=1;
if(g.symmetry==0) //rectangular case
{
*sign=1;
LA_largeindex r=0;
for(int i=I.size()-1; i>=0; --i)
{
@@ -138,9 +144,9 @@ switch(I.size()) //a few special cases for efficiency
NRVec<LA_index> II(I);
II.copyonwrite();
if(g.offset!=0) II -= g.offset;
int parity=netsort(II.size(),&II[0]);
if(g.symmetry<0 && (parity&1)) *sign= -1;
if(g.symmetry<0) //antisymmetric
int nswaps=netsort(II.size(),&II[0]);
*sign= (nswaps&1) ? g.symmetry : 1;
if(g.symmetry == -1) //antisymmetric - do not store zero diagonal
{
for(int i=0; i<I.size()-1; ++i)
if(II[i]==II[i+1])
@@ -150,7 +156,7 @@ switch(I.size()) //a few special cases for efficiency
for(int i=0; i<II.size(); ++i) r += simplicial(i+1,II[i]-i);
return r;
}
else //symmetric
else //symmetric, hermitian, antihermitian
{
LA_largeindex r=0;
for(int i=0; i<II.size(); ++i) r += simplicial(i+1,II[i]);
@@ -177,6 +183,8 @@ switch(g.symmetry)
s /= g.range;
}
break;
case 2:
case -2:
case 1:
for(int i=g.number; i>0; --i)
{
@@ -217,6 +225,21 @@ for(int g=shape.size()-1; g>=0; --g)
return I;
}
//group-like multiplication table to combine symmetry adjustments due to several index groups
static const int signmultab[5][5] = {
{1,2,0,-2,-1},
{2,1,0,-1,-2},
{0,0,0,0,0},
{-2,-1,0,1,2},
{-1,-2,0,2,1}
};
static inline int signmult(int s1, int s2)
{
return signmultab[s1+2][s2+2];
}
template<typename T>
@@ -232,7 +255,7 @@ for(int i=0; i<I.size(); ++i)
{
if(I[i][j] <shape[i].offset || I[i][j] >= shape[i].offset+shape[i].range)
{
std::cerr<<"error in index group no. "<<i<<" index no. "<<j<<std::endl;
std::cout<<"TENSOR INDEX PROBLEM group no. "<<i<<" index no. "<<j<<" should be between "<<shape[i].offset<<" and "<<shape[i].offset+shape[i].range-1<<std::endl;
laerror("tensor index out of range");
}
}
@@ -246,7 +269,7 @@ for(int g=0; g<shape.size(); ++g) //loop over index groups
int gsign;
LA_largeindex groupindex = subindex(&gsign,shape[g],I[g]);
//std::cout <<"INDEX TEST group "<<g<<" cumsizes "<< cumsizes[g]<<" groupindex "<<groupindex<<std::endl;
*sign *= gsign;
if(LA_traits<T>::is_complex()) *sign = signmult(*sign,gsign); else *sign *= gsign;
if(groupindex == -1) return -1;
r += groupindex * cumsizes[g];
}
@@ -272,7 +295,7 @@ for(int g=0; g<shape.size(); ++g) //loop over index groups
gstart=gend+1;
LA_largeindex groupindex = subindex(&gsign,shape[g],subI);
//std::cout <<"FLATINDEX TEST group "<<g<<" cumsizes "<< cumsizes[g]<<" groupindex "<<groupindex<<std::endl;
*sign *= gsign;
if(LA_traits<T>::is_complex()) *sign = signmult(*sign,gsign); else *sign *= gsign;
if(groupindex == -1) return -1;
r += groupindex * cumsizes[g];
}
@@ -404,6 +427,8 @@ switch(sh->symmetry)
istart= sh->offset;
iend= sh->offset+sh->range-1;
break;
case 2:
case -2:
case 1:
istart= sh->offset;
if(igroup==sh->number-1) iend= sh->offset+sh->range-1;
@@ -469,6 +494,8 @@ switch(sh->symmetry)
istart= sh->offset;
iend= sh->offset+sh->range-1;
break;
case 2:
case -2:
case 1:
istart= sh->offset;
if(igroup==sh->number-1) iend= sh->offset+sh->range-1;
@@ -686,6 +713,9 @@ return q;
template<typename T>
Tensor<T> Tensor<T>::permute_index_groups(const NRPerm<int> &p) const
{
if(p.size()!=shape.size()) laerror("permutation size mismatch in permute_index_groups");
if(p.is_identity()) return *this;
//std::cout <<"permute_index_groups permutation = "<<p<<std::endl;
NRVec<INDEXGROUP> newshape=shape.permuted(p,true);
//std::cout <<"permute_index_groups newshape = "<<newshape<<std::endl;
@@ -809,6 +839,7 @@ if(group==0 && index==0 && shape[0].symmetry==0) //formal split of 1 index witho
}
if(shape[group].number==1) return unwind_index_group(group); //single index in the group
//general case - recalculate the shape and allocate the new tensor
NRVec<INDEXGROUP> newshape(shape.size()+1);
newshape[0].number=1;
@@ -829,7 +860,10 @@ for(int i=0; i<shape.size(); ++i)
--newshape[i+1].number;
flatindex += index;
}
else flatindex += shape[i].number;
else
{
if(i<group) flatindex += shape[i].number;
}
}
@@ -874,7 +908,7 @@ template<typename T>
static void flatten_callback(const SUPERINDEX &I, T *v)
{
FLATINDEX J = superindex2flat(I);
//std::cout <<"TEST flatten_callback: from "<<JP<<" TO "<<J<<std::endl;
//std::cout <<"TEST flatten_callback: "<<J<<std::endl;
*v = (*help_tt<T>)(J); //rhs operator() generates the redundant elements for the unwinded lhs tensor
}
//
@@ -1112,13 +1146,21 @@ for(int i=0; i<nn; ++i) for(int j=0; j<mm; ++j)
template<>
void auxmatmult<double>(int nn, int mm, int kk, double *r, const double *a, const double *b, double alpha, double beta, bool conjugate)
{
clock_t t0=clock();
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, nn, mm, kk, alpha, a, kk, b, kk, beta, r, mm);
#ifdef TIMING
std::cout<<"cblas_dgemm CPU time " << (clock()-t0)/(double)CLOCKS_PER_SEC<<std::endl;
#endif
}
template<>
void auxmatmult<std::complex<double> >(int nn, int mm, int kk, std::complex<double> *r, const std::complex<double> *a, const std::complex<double> *b, std::complex<double> alpha, std::complex<double> beta, bool conjugate)
{
clock_t t0=clock();
cblas_zgemm(CblasRowMajor, CblasNoTrans, (conjugate?CblasConjTrans:CblasTrans), nn, mm, kk, &alpha, a, kk, b, kk, &beta, r, mm);
#ifdef TIMING
std::cout<<"cblas_zgemm CPU time " << (clock()-t0)/(double)CLOCKS_PER_SEC<<std::endl;
#endif
}
@@ -1131,7 +1173,7 @@ if(rhsgroup<0||rhsgroup>=rhs.shape.size()) laerror("wrong rhsgroup number in con
if(rhs1.shape[group].offset != rhs.shape[rhsgroup].offset) laerror("incompatible index offset in contraction");
if(rhs1.shape[group].range != rhs.shape[rhsgroup].range) laerror("incompatible index range in contraction");
if(rhs1.shape[group].symmetry != rhs.shape[rhsgroup].symmetry) laerror("incompatible index symmetry in addgroupcontraction");
if(rhs1.shape[group].symmetry == 1) laerror("addgroupcontraction not implemented for symmetric index groups");
if(rhs1.shape[group].symmetry !=0 && rhs1.shape[group].symmetry != -1) laerror("addgroupcontraction only implemented for nonsymmetric and antisymmetric index groups");
#ifdef LA_TENSOR_INDEXPOSITION
if(rhs1.shape[group].upperindex ^ rhs.shape[rhsgroup].upperindex == false) laerror("can contact only upper with lower index");
#endif
@@ -1175,6 +1217,8 @@ if(kk!=rhsu.groupsizes[0]) laerror("internal error in addgroupcontraction");
T factor=alpha;
if(u.shape[0].symmetry== -1) factor=alpha*(T)factorial(u.shape[0].number);
if(u.shape[0].symmetry== 1) laerror("addgroupcontraction not implemented for symmetric index groups");
if(u.shape[0].symmetry== 2) laerror("addgroupcontraction not implemented for hermitean index groups");
if(u.shape[0].symmetry== -2) laerror("addgroupcontraction not implemented for antihermitean index groups");
nn=1; for(int i=1; i<u.shape.size(); ++i) nn*= u.groupsizes[i];
mm=1; for(int i=1; i<rhsu.shape.size(); ++i) mm*= rhsu.groupsizes[i];
data.copyonwrite();
@@ -1278,8 +1322,12 @@ for(int i=0; i<il1.size(); ++i)
for(int j=0; j<i; ++j) if(il1[i]==il1[j]||il2[i]==il2[j]) laerror("repeated index in the list");
}
clock_t t0=clock();
const Tensor<T> u = conjugate1? (rhs1.unwind_indices(il1)).conjugateme() : rhs1.unwind_indices(il1);
const Tensor<T> rhsu = rhs2.unwind_indices(il2);
#ifdef TIMING
std::cout <<"unwinding CPU time = "<<(clock()-t0)/(double)CLOCKS_PER_SEC<<std::endl;
#endif
NRVec<INDEXGROUP> newshape(u.shape.size()+rhsu.shape.size()-2*il1.size());
@@ -1405,6 +1453,7 @@ template<typename T>
static const PermutationAlgebra<int,T> *help_pa;
static bool help_inverse;
static bool help_conjugate;
template<typename T>
static T help_alpha;
@@ -1416,7 +1465,10 @@ FLATINDEX J = superindex2flat(I);
for(int p=0; p<help_pa<T>->size(); ++p)
{
FLATINDEX Jp = J.permuted((*help_pa<T>)[p].perm,help_inverse);
*v += help_alpha<T> * (*help_pa<T>)[p].weight * (*help_tt<T>)(Jp);
bool conj=false;
if(help_conjugate) conj= ((*help_pa<T>)[p].perm).parity()<0;
T tmp= (*help_tt<T>)(Jp);
*v += help_alpha<T> * (*help_pa<T>)[p].weight * (conj ? LA_traits<T>::conjugate(tmp) : tmp);
}
}
@@ -1449,7 +1501,7 @@ for(int p=0; p<help_pa<T>->size(); ++p)
template<typename T>
void Tensor<T>::apply_permutation_algebra(const Tensor<T> &rhs, const PermutationAlgebra<int,T> &pa, bool inverse, T alpha, T beta)
void Tensor<T>::apply_permutation_algebra(const Tensor<T> &rhs, const PermutationAlgebra<int,T> &pa, bool inverse, T alpha, T beta, bool conjugate)
{
if(beta!=(T)0) {if(beta!=(T)1) *this *= beta;} else clear();
if(alpha==(T)0) return;
@@ -1462,9 +1514,12 @@ if(rhs.is_named())
NRVec<INDEXNAME> namperm = rhs.names.permuted(pa[0].perm,!inverse);
if(is_named())
{
std::cout <<"LHS names = "<<names<<std::endl;
std::cout <<"permuted RHS names = "<<namperm<<std::endl;
if(names!=namperm) laerror("inconsistent index names in apply_permutation_algebra");
if(names!=namperm)
{
std::cout <<"LHS names = "<<names<<std::endl;
std::cout <<"permuted RHS names = "<<namperm<<std::endl;
laerror("inconsistent index names in apply_permutation_algebra");
}
}
else
names=namperm;
@@ -1473,6 +1528,7 @@ if(rhs.is_named())
help_tt<T> = &rhs;
help_pa<T> = &pa;
help_inverse = inverse;
help_conjugate= LA_traits<T>::is_complex() ? conjugate : false;
help_alpha<T> = alpha;
loopover(permutationalgebra_callback);
}
@@ -1641,7 +1697,7 @@ if(is_named() && rhs.is_named() && names!=rhs.names) laerror("incompatible tenso
T factor=1;
for(int i=0; i<shape.size(); ++i)
{
if(shape[i].symmetry==1) laerror("unsupported index group symmetry in dot");
if(shape[i].symmetry==1||shape[i].symmetry==2||shape[i].symmetry== -2) laerror("unsupported index group symmetry in dot");
if(shape[i].symmetry== -1) factor *= (T)factorial(shape[i].number);
}
return factor * data.dot(rhs.data);
@@ -1673,15 +1729,17 @@ if(r==1) //create an analogous output for the trivial case
}
//loop over all indices; relies on the fact that unwinding does not change order of remaining indices
//for inverseorder=false, to avoid inverting order by permute_index_groups we repeatedly unwind the LAST index, and all indices rotate at this position with the first one in the last iteration due to the unwind!
NRVec<INDEXNAME> names_saved = names;
for(int i=0; i<r; ++i)
{
INDEX I=indexposition(i,shape);
INDEX I= inverseorder? indexposition(i,shape) : indexposition(r-1,shape);
NRMat<T> um;
NRVec<INDEXGROUP> ushape;
{
Tensor<T> uu=unwind_index(I);
ushape=uu.shape; //ushape.copyonwrite(); should not be needed
um=uu.matrix();
ushape=uu.shape;
um=uu.matrix(1);
}
int mini=um.nrows(); if(um.ncols()<mini) mini=um.ncols(); //compact SVD, expect descendingly sorted values
NRMat<T> u(um.nrows(),mini),vt(mini,um.ncols());
@@ -1706,28 +1764,30 @@ for(int i=0; i<r; ++i)
umnew=u.submatrix(0,umnr-1,0,preserve-1);
}
else umnew=u;
ret[(inverseorder? r-i-1 : i)]=vt.transpose(true);
ret[r-i-1]=vt.transpose(true);
umnew.diagmultr(w);
//rebuild tensor of the preserved shape from matrix
ushape.copyonwrite();
ushape[0].range=preserve;
{
NRVec<T> newdata(umnew);
umnew.resize(0,0);//deallocate
*this = Tensor(ushape,newdata);
names=names_saved;
}
}
if(!is_flat()) laerror("this should not happen");
if(!inverseorder)
if(inverseorder)
{
NRPerm<int> p(r);
for(int i=1; i<=r; ++i) p[r-i+1]=i;
*this = permute_index_groups(p);
NRPerm<int> p(rank()); p.identity();
names=names_saved.permuted(p.reverse());
}
else
names=names_saved;
return ret;
}
template<typename T>
Tensor<T> Tensor<T>::inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder) const
{
@@ -1735,9 +1795,11 @@ if(rank()!=x.size()) laerror("input of inverseTucker does not match rank");
Tensor<T> tmp(*this);
Tensor<T> r;
if(!is_flat()) laerror("inverseTucker only for flat tensors as produced by Tucker");
if(inverseorder)
{
for(int i=0; i<rank(); ++i)
{
Tensor<T> mat(x[i],true);
Tensor<T> mat(x[i],true); //flat tensor from a matrix
r= tmp.contraction(i,0,mat,0,0,(T)1,false,false);
if(i<rank()-1)
{
@@ -1745,17 +1807,134 @@ for(int i=0; i<rank(); ++i)
r.deallocate();
}
}
if(!inverseorder)
}
else //not inverseroder
{
for(int i=rank()-1; i>=0; --i)
{
NRPerm<int> p(r.rank());
for(int i=1; i<=r.rank(); ++i) p[r.rank()-i+1]=i;
return r.permute_index_groups(p);
Tensor<T> mat(x[i],true); //flat tensor from a matrix
r= tmp.contraction(rank()-1,0,mat,0,0,(T)1,false,false); //the current index will be the last after previous contractions
if(i>0)
{
tmp=r;
r.deallocate();
}
}
}
if(inverseorder)
{
NRPerm<int> p(rank()); p.identity();
r.names=names.permuted(p.reverse());
}
else
return r;
r.names=names;
return r;
}
template<typename T>
Tensor<T> Tensor<T>::linear_transform(const NRVec<NRMat<T> > &x) const
{
if(x.size()!=shape.size()) laerror("wrong number of transformation matrices in linear_transform");
for(int i=0; i<shape.size(); ++i) if(x[i].ncols()!=shape[i].range) laerror("mismatch of transformation matrix size in linear_transform");
Tensor<T> tmp(*this);
Tensor<T> r;
//do the groups in reverse order to preserve index ordering
for(int g=shape.size()-1; g>=0; --g)
{
Tensor<T> mat(x[g],true); //flat tensor from a matrix
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
{
#ifdef LA_TENSOR_INDEXPOSITION
//what should we do with index position?
//either set upperindex of mat appropriately, or request ignoring that in contractions?
#endif
r= tmp.contraction(indexposition(rank()-1,tmp.shape),mat,INDEX(0,0),(T)1,false,false); //always the last index
if(i>0)
{
tmp=r;
r.deallocate();
}
}
//the group's indices are now individual ones leftmost in tensor r, restore group size and symmetry
if(shape[g].number>1)
{
INDEXLIST il(shape[g].number);
for(int i=0; i<shape[g].number; ++i)
{
il[i].group=i;
il[i].index=0;
}
r = r.merge_indices(il,shape[g].symmetry);
}
if(g>0)
{
tmp=r;
r.deallocate();
}
}
r.names=names;
return r;
}
template<typename T>
Tensor<T> Tensor<T>::linear_transform(const int g, const Tensor<T> &x) const
{
if(g<0||g>=shape.size()) laerror("wrong index group number in linear_transform");
if(x.rank()!=2 || x.shape.size()!=2 || x.shape[0].number!=1||x.shape[1].number!=1) laerror("wrong tensor shape for linear_transform");
if(x.shape[0].range!=shape[g].range) laerror("index range mismatch in linear_transform");
Tensor<T> tmp(*this);
Tensor<T> r;
//contract all indices in the reverse order
int gnow=g;
for(int i=shape[g].number-1; i>=0; --i) //indices in the group in reverse order
{
r= tmp.contraction(INDEX(gnow,i),x,INDEX(0,0),(T)1,false,false);
++gnow; //new group number in r, one index was added left
if(i>0)
{
tmp=r;
r.deallocate();
}
}
//the group's indices are now individual ones leftmost in tensor r, restore group size and symmetry
if(shape[g].number>1)
{
INDEXLIST il(shape[g].number);
for(int i=0; i<shape[g].number; ++i)
{
il[i].group=i;
il[i].index=0;
}
r = r.merge_indices(il,shape[g].symmetry);
}
//permute the group (now 0) to its original position
if(g!=0)
{
NRPerm<int> p(r.shape.size());
for(int i=0; i<g; ++i) p[i+1] = (i+1)+1;
p[g+1]=1;
for(int i=g+1; i<r.shape.size();++i) p[i+1] = i+1;
//std::cout<<"perm "<<p;
r=r.permute_index_groups(p);
}
//preserve names
r.names=names;
return r;
}
template<typename T>
int Tensor<T>::findflatindex(const INDEXNAME nam) const
{
@@ -1855,6 +2034,7 @@ if(!basicperm.is_valid()) laerror("internal error in merge_indices");
//prepare permutation algebra
PermutationAlgebra<int,T> pa;
bool doconjugate=false;
if(sym==0)
{
pa.resize(1);
@@ -1863,6 +2043,7 @@ if(sym==0)
}
else
{
doconjugate = sym>=2||sym<= -2;
PermutationAlgebra<int,int> sa = sym>0 ? symmetrizer<int>(il.size()) : antisymmetrizer<int>(il.size());
//std::cout <<"SA = "<<sa<<std::endl;
pa.resize(sa.size());
@@ -1878,11 +2059,13 @@ else
//std::cout <<"Use PA = "<<pa<<std::endl;
Tensor<T> r(newshape);
r.apply_permutation_algebra(*this,pa,false,(T)1/(T)pa.size(),0);
r.apply_permutation_algebra(*this,pa,false,(T)1/(T)pa.size(),0,doconjugate);
if(is_named()) r.names=names.permuted(basicperm,true);
return r;
}
template<typename T>
void Tensor<T>::canonicalize_shape()
{
@@ -1891,8 +2074,8 @@ const INDEXGROUP *sh = &(* const_cast<const NRVec<INDEXGROUP> *>(&shape))[0];
for(int i=0; i<shape.size(); ++i)
{
if(sh[i].number==1 && sh[i].symmetry!=0) {shape.copyonwrite(); shape[i].symmetry=0;}
if(sh[i].symmetry>1 ) {shape.copyonwrite(); shape[i].symmetry=1;}
if(sh[i].symmetry<-1) {shape.copyonwrite(); shape[i].symmetry= -1;}
int maxlegal = LA_traits<T>::is_complex() ? 2 : 1;
if(sh[i].symmetry> maxlegal || sh[i].symmetry< -maxlegal) laerror("illegal index group symmetry specified");
}
}
@@ -1999,7 +2182,7 @@ foundname:
finished:
names0= NRVec<INDEXNAME> (names);
std::cout <<"names parsed "<<names0;
//std::cout <<"names parsed "<<names0;
groups0.resize(groups.size());
int i=0;
@@ -2007,7 +2190,7 @@ for(typename std::list<std::list<int> >::const_iterator ii=groups.begin(); ii!=g
{
groups0[i++] = NRVec_from1<int>(*ii);
}
std::cout<<"groups parsed "<<groups0;
//std::cout<<"groups parsed "<<groups0;
free(txt);
}
@@ -2033,10 +2216,14 @@ for(int i=0; i<rhs1.names.size(); ++i) for(int j=0; j<rhs1.names.size(); ++j)
}
INDEXLIST il1(nc),il2(nc);
bitvector is_c_index1(rhs1.names.size());
bitvector is_c_index2(rhs2.names.size());
int ii=0;
for(int i=0; i<rhs1.names.size(); ++i) for(int j=0; j<rhs1.names.size(); ++j)
for(int i=0; i<rhs1.names.size(); ++i) for(int j=0; j<rhs2.names.size(); ++j)
if(rhs1.names[i]==rhs2.names[j])
{
is_c_index1.set(i);
is_c_index2.set(j);
il1[ii] = indexposition(i,rhs1.shape);
il2[ii] = indexposition(j,rhs2.shape);
++ii;
@@ -2045,8 +2232,11 @@ for(int i=0; i<rhs1.names.size(); ++i) for(int j=0; j<rhs1.names.size(); ++j)
//std::cout<<"contraction list1 = "<<il1<<std::endl;
//std::cout<<"contraction list2 = "<<il2<<std::endl;
Tensor<T> tmp=rhs1.contractions(il1,rhs2,il2,alpha,conjugate1,conjugate2);
if(rank()!=tmp.rank()) laerror("rank mismatch in add_permuted_contractions");
//make a dry-run to get only the list of names, concatenate names of both tensors unless they are the contraction ones
std::list<INDEXNAME> listnames;
for(int j=0; j<rhs2.names.size(); ++j) if(!is_c_index2[j]) listnames.push_back(rhs2.names[j]);
for(int j=0; j<rhs1.names.size(); ++j) if(!is_c_index1[j]) listnames.push_back(rhs1.names[j]);
NRVec<INDEXNAME> tmpnames(listnames);
//generate the antisymmetrizer, adding also indices not involved as a constant subpermutation
//
@@ -2056,14 +2246,14 @@ NRVec<NRVec_from1<int> > antigroups;
parse_antisymmetrizer(antisymmetrizer,antigroups,antinames);
//check the names make sense and fill in the possibly missing ones as separate group
if(antinames.size()>tmp.names.size()) laerror("too many indices in the antisymmetrizet");
bitvector isexplicit(tmp.names.size());
if(antinames.size()>tmpnames.size()) laerror("too many indices in the antisymmetrizer");
bitvector isexplicit(tmpnames.size());
isexplicit.clear();
for(int i=0; i<antinames.size(); ++i)
{
for(int j=0; j<i; ++j) if(antinames[i]==antinames[j]) laerror("repeated names in the antisymmetrizer");
for(int j=0; j<tmp.names.size(); ++j)
if(antinames[i]==tmp.names[j])
for(int j=0; j<tmpnames.size(); ++j)
if(antinames[i]==tmpnames[j])
{
isexplicit.set(j);
goto namefound;
@@ -2074,15 +2264,15 @@ for(int i=0; i<antinames.size(); ++i)
if(isexplicit.population()!=antinames.size()) laerror("internal error in add_permuted_contractions");
//fill in additional names
if(antinames.size()<tmp.names.size())
if(antinames.size()<tmpnames.size())
{
int lastgroup=antigroups.size()-1;
int lastclass;
if(antigroups.size()==0) lastclass=0;
else lastclass=antigroups[antigroups.size()-1][antigroups[antigroups.size()-1].size()];
int lastname=antinames.size()-1;
antinames.resize(tmp.names.size(),true);
antigroups.resize(antigroups.size()+tmp.names.size()-antinames.size(),true);
antigroups.resize(antigroups.size()+tmpnames.size()-antinames.size(),true);
antinames.resize(tmpnames.size(),true);
for(int j=0; j<names.size(); ++j)
if(!isexplicit[j])
{
@@ -2090,40 +2280,164 @@ if(antinames.size()<tmp.names.size())
++lastname;
++lastgroup;
antigroups[lastgroup].resize(1);
antigroups[lastgroup][0]=lastclass;
antinames[lastname] = tmp.names[j];
antigroups[lastgroup][1]=lastclass;
antinames[lastname] = tmpnames[j];
}
}
std::cout <<"final antisymmmetrizer names and groups"<<antinames<<antigroups;
//std::cout <<"final antisymmmetrizer names and groups"<<antinames<<antigroups;
//std::cout <<"LHS names = "<<names<<std::endl;
//std::cout <<"TMP names = "<<tmpnames<<std::endl;
//prepare the antisymmetrizer
PermutationAlgebra<int,int> pa = general_antisymmetrizer(antigroups,-2,true);
//std::cout <<"initial antisymmetrizer = "<<pa;
//find permutation between antisym and TMP index order
NRPerm<int> antiperm=find_indexperm(antinames,tmp.names);
NRPerm<int> antiperm=find_indexperm(antinames,tmpnames);
//std::cout<<"permutation from rhs to antisymmetrizer = "<<antiperm<<std::endl;
//@@@conjugate the PA by antiperm or its inverse
//conjugate the PA by antiperm or its inverse
pa= pa.conjugated_by(antiperm,true);
//@@@recast the PA
//find permutation which will bring indices of tmp to the order as in *this: this->names[i] == tmpnames[p[i]]
NRPerm<int> basicperm=find_indexperm(names,tmpnames);
PermutationAlgebra<int,T> pb = basicperm.inverse()*pa;
//std::cout <<"permutation from rhs to lhs = "<<basicperm<<std::endl;
//@@@permutationalgebra::is_identity() a pokd ano, neaplikovat ale vratit tmp tenzor resp. s nim udelat axpy
//save some work if the PA is trivial - contract only
if(pb.is_identity())
{
addcontractions(rhs1,il1,rhs2,il2,alpha,beta,false,conjugate1,conjugate2);
return;
}
//std::cout <<"LHS names = "<<names<<std::endl;
//std::cout <<"TMP names = "<<tmp.names<<std::endl;
//create an intermediate contracted tensor and the permute it
clock_t t0=clock();
Tensor<T> tmp=rhs1.contractions(il1,rhs2,il2,alpha,conjugate1,conjugate2);
#ifdef TIMING
std::cout<<"contractions CPU time = "<<(1.*clock()-t0)/CLOCKS_PER_SEC<<std::endl;
#endif
//find permutation which will bring indices of tmp to the order as in *this: this->names[i] == tmp.names[p[i]]
NRPerm<int> basicperm=find_indexperm(names,tmp.names);
std::cout <<"Basic permutation = "<<basicperm<<std::endl;
//@@@probably only onw of those will work
PermutationAlgebra<int,T> pb = basicperm*pa;
apply_permutation_algebra(tmp,pb,false,(T)1/(T)pb.size(),beta);
//equivalently possible (for trivial pa)
//PermutationAlgebra<int,T> pb = basicperm.inverse()*pa;
//apply_permutation_algebra(tmp,pb,true,(T)1/(T)pb.size(),beta);
if(rank()!=tmp.rank()) laerror("rank mismatch in add_permuted_contractions");
if(tmp.names!=tmpnames) laerror("internal error in add_permuted_contractions");
clock_t t1=clock();
apply_permutation_algebra(tmp,pb,true,(T)1,beta);
#ifdef TIMING
std::cout<<"apply_permutation_algebra CPU time = "<<(1.*clock()-t1)/CLOCKS_PER_SEC<<std::endl;
#endif
}
template<typename T>
void Tensor<T>::permute_inside_group(int g, const NRPerm<int> &p, bool inverse)
{
if(g<0||g>=shape.size()) laerror("group out of range");
if(shape[g].symmetry==0) laerror("permutation possible only inside symmetric index groups");
if(shape[g].number!=p.size()) laerror("permutation size mismatch to index number");
if(!p.is_valid()) laerror("invalid permutation in permute_inside_group");
int par=p.parity();
if(par<0)
{
switch(shape[g].symmetry)
{
case 1:
break;
case -1:
data.negateme();
break;
case 2:
data.conjugateme();
break;
case -2:
data.negateconjugateme();
break;
}
}
if(is_named())
{
int ii=0;
for(int i=0; i<g; ++i) ii+= shape[i].number;
NRVec<INDEXNAME> tmp=(names.subvector(ii,ii+p.size()-1)).permuted(p,inverse);
for(int i=0; i<p.size(); ++i) names[ii+i]=tmp[i];
}
}
static void checkindex(const NRVec<INDEXGROUP> &shape, const SUPERINDEX &I, bool &zeroreal, bool &zeroimag)
{
#ifdef DEBUG
if(shape.size()!=I.size()) laerror("inconsistent shape and index in checkindex");
#endif
zeroreal=zeroimag=false;
for(int g=0; g<I.size(); ++g)
{
#ifdef DEBUG
if(I[g].size()!=shape[g].number) laerror("inconsistent2 shape and index in checkindex");
#endif
if(shape[g].symmetry == 2 || shape[g].symmetry == -2)
{
bool sameindex=false;
for(int i=1; i<shape[g].number; ++i) for(int j=0; j<i; ++j)
if(I[g][i]==I[g][j])
{
sameindex=true;
goto checkfailed; //for this group, but do the remaining ones for further restrictions
}
checkfailed:
if(sameindex)
{
if(shape[g].symmetry == 2) zeroimag=true;
if(shape[g].symmetry == -2) zeroreal=true;
}
}
}
}
template<typename T>
static void hermiticity_callback(const SUPERINDEX &I, T *v)
{
bool zeroimag;
bool zeroreal;
checkindex(help_t<T>->shape,I,zeroreal,zeroimag);
if(zeroreal) LA_traits<T>::setrealpart(*v,0);
if(zeroimag) LA_traits<T>::setimagpart(*v,0);
}
template<typename T>
static void hermiticity_callback2(const SUPERINDEX &I, const T *v)
{
bool zeroimag;
bool zeroreal;
checkindex(help_tt<T>->shape,I,zeroreal,zeroimag);
if(zeroreal && LA_traits<T>::realpart(*v)!=(T)0 || zeroimag && LA_traits<T>::imagpart(*v)!=(T)0) throw true;
}
template<typename T>
void Tensor<T>::enforce_hermiticity()
{
if(!has_hermiticity()) return;
help_t<T> = this;
loopover(hermiticity_callback);
}
template<typename T>
bool Tensor<T>::fulfills_hermiticity() const
{
if(!has_hermiticity()) return true;
help_tt<T> = this;
try {
constloopover(hermiticity_callback2);
}
catch(bool failed) {return false;}
return true;
}
template class Tensor<double>;

142
tensor.h
View File

@@ -19,8 +19,9 @@
//a simple tensor class with arbitrary symmetry of index subgroups
//stored in an efficient way
//indices can optionally have names and by handled by name
//each index group has a specific symmetry (nosym,sym,antisym)
//indices can optionally have names and be handled by name
//each index group has a specific symmetry (antihermitean= -2, antisym= -1, nosymmetry= 0, symmetric= 1,hermitean=2)
//NOTE: diagonal elements of antihermitean and hermitean matrices are stored including the zero imag/real part and the zeroness is NOT checked and similarly for higher rank tensors
//additional symmetry between index groups (like in 2-electron integrals) is not supported directly, you would need to nest the class to Tensor<Tensor<T> >
//leftmost index is least significant (changing fastest) in the storage order
//presently only a rudimentary implementation
@@ -48,6 +49,54 @@
namespace LA {
template<typename T>
static inline T signeddata(const int sgn, const T &data)
{
if(LA_traits<T>::is_complex()) //condition known at compile time
{
switch(sgn)
{
case 2:
return LA_traits<T>::conjugate(data);
break;
case 1:
return data;
break;
case -1:
return -data;
break;
case -2:
return -LA_traits<T>::conjugate(data);
break;
case 0:
return 0;
break;
}
return 0;
}
else // for real
{
if(sgn>0) return data;
if(sgn<0) return -data;
return 0;
}
}
static bool didwarn=false;
static inline bool ptr_ok(void *ptr)
{
#ifdef DEBUG
if(ptr==NULL && !didwarn)
{
std::cout<<"Warning: write to zero element of antisym tensor ignored\n";
std::cerr<<"Warning: write to zero element of antisym tensor ignored\n";
didwarn=true;
}
#endif
return (bool)ptr;
}
template<typename T>
class Signedpointer
@@ -57,19 +106,11 @@ int sgn;
public:
Signedpointer(T *p, int s) : ptr(p),sgn(s) {};
//dereferencing *ptr should be ignored for sgn==0
const T operator=(const T rhs)
{
if(sgn>0) *ptr = rhs;
if(sgn<0) *ptr = -rhs;
#ifdef DEBUG
if(sgn==0) laerror("dereferencing lhs Signedpointer to nonexistent tensor element");
#endif
return rhs;
}
T& operator*=(const T rhs) {*ptr *= rhs; return *ptr;}
T& operator/=(const T rhs) {*ptr /= rhs; return *ptr;}
T& operator+=(const T rhs) {if(sgn>0) *ptr += rhs; else *ptr -= rhs; return *ptr;}
T& operator-=(const T rhs) {if(sgn>0) *ptr -= rhs; else *ptr += rhs; return *ptr;}
const T operator=(const T rhs) {if(ptr_ok(ptr)) *ptr = signeddata(sgn,rhs); return rhs;}
void operator*=(const T rhs) {if(ptr_ok(ptr)) *ptr *= rhs;}
void operator/=(const T rhs) {if(ptr_ok(ptr)) *ptr /= rhs;}
void operator+=(T rhs) {if(ptr_ok(ptr)) *ptr += signeddata(sgn,rhs);}
void operator-=(T rhs) {if(ptr_ok(ptr)) *ptr -= signeddata(sgn,rhs);}
};
@@ -104,7 +145,7 @@ class LA_traits<INDEXNAME> {
typedef class INDEXGROUP {
public:
int number; //number of indices
int symmetry; //-1 0 or 1, later 2 for hermitian and -2 for antihermitian? - would need change in operator() and Signedpointer
int symmetry; //-1 0 or 1, later 2 for hermitian and -2 for antihermitian
#ifdef LA_TENSOR_ZERO_OFFSET
static const LA_index offset = 0; //compiler can optimize away some computations
#else
@@ -156,6 +197,8 @@ struct INDEX
int group;
int index;
bool operator==(const INDEX &rhs) const {return group==rhs.group && index==rhs.index;};
INDEX() {};
INDEX(int g, int i): group(g), index(i) {};
};
typedef NRVec<INDEX> INDEXLIST; //collection of several indices
@@ -197,13 +240,16 @@ public:
int myrank;
NRVec<LA_largeindex> groupsizes; //group sizes of symmetry index groups (a function of shape but precomputed for efficiency)
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency); always cumsizes[0]=1, index group 0 is the innermost-loop one
public:
NRVec<INDEXNAME> names;
//indexing
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
LA_largeindex index(int *sign, const FLATINDEX &I) const; //map the tensor indices to the position in data
LA_largeindex vindex(int *sign, LA_index i1, va_list args) const; //map list of indices to the position in data
SUPERINDEX inverse_index(LA_largeindex s) const; //inefficient, but possible if needed
int myflatposition(int group, int index) const {return flatposition(group,index,shape);};
int myflatposition(const INDEX &i) const {return flatposition(i,shape);};
INDEX myindexposition(int flatindex) const {return indexposition(flatindex,shape);};
//constructors
Tensor() : myrank(-1) {};
@@ -222,13 +268,19 @@ public:
explicit Tensor(const NRVec<T> &x);
explicit Tensor(const NRMat<T> &x, bool flat=false);
explicit Tensor(const NRSMat<T> &x);
NRMat<T> matrix() const {return NRMat<T>(data,data.size()/groupsizes[0],groupsizes[0],0);}; //reinterpret as matrix with column index being the tensor's leftmost index group (typically the unwound single index)
NRMat<T> matrix(int n=1) const //reinterpret as matrix with column index being the tensor's n leftmost index groups (typically the unwound single index)
{
if(n<0||n>shape.size()) laerror("wrong number n in tensor:: matrix()");
LA_largeindex ncol = (n==shape.size()) ? data.size() : cumsizes[n];
return NRMat<T>(data,data.size()/ncol,ncol,0);
};
bool is_named() const {if(names.size()==0) return false; if(names.size()!=myrank) laerror("bad number of index names"); return true;};
bool is_uniquely_named() const; //no repeated names
bool is_flat() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1) return false; return true;};
bool is_compressed() const {for(int i=0; i<shape.size(); ++i) if(shape[i].number>1&&shape[i].symmetry!=0) return true; return false;};
bool has_symmetry() const {for(int i=0; i<shape.size(); ++i) if(shape[i].symmetry!=0) return true; return false;};
bool has_hermiticity() const {if(!LA_traits<T>::is_complex()) return false; for(int i=0; i<shape.size(); ++i) if(shape[i].symmetry < -1 || shape[i].symmetry > 1) return true; return false;};
void clear() {data.clear();};
void defaultnames(const char *basename="i") {names.resize(rank()); for(int i=0; i<rank(); ++i) sprintf(names[i].name,"%s%03d",basename,i);}
int rank() const {return myrank;};
@@ -239,12 +291,19 @@ public:
void copyonwrite() {shape.copyonwrite(); groupsizes.copyonwrite(); cumsizes.copyonwrite(); data.copyonwrite(); names.copyonwrite();};
void resize(const NRVec<INDEXGROUP> &s) {shape=s; data.resize(calcsize()); calcrank(); names.clear();};
void deallocate() {data.resize(0); shape.resize(0); groupsizes.resize(0); cumsizes.resize(0); names.resize(0);};
inline Signedpointer<T> lhs(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
inline T operator()(const SUPERINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
inline T operator()(const FLATINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
inline Signedpointer<T> lhs(LA_index i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); return Signedpointer<T>(&data[i],sign); };
inline T operator()(LA_index i1...) const {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
inline Signedpointer<T> lhs(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I);
if(sign==0) return Signedpointer<T>(NULL,sign);
else return Signedpointer<T>(&data[i],sign);};
inline T operator()(const SUPERINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I);
if(sign==0) return Signedpointer<T>(NULL,sign);
else return Signedpointer<T>(&data[i],sign);};
inline T operator()(const FLATINDEX &I) const {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Signedpointer<T> lhs(LA_index i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args);
if(sign==0) return Signedpointer<T>(NULL,sign);
else return Signedpointer<T>(&data[i],sign); };
inline T operator()(LA_index i1...) const {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; else return signeddata(sign,data[i]);};
inline Tensor& operator=(const Tensor &rhs) {myrank=rhs.myrank; shape=rhs.shape; groupsizes=rhs.groupsizes; cumsizes=rhs.cumsizes; data=rhs.data; names=rhs.names; return *this;};
@@ -267,23 +326,29 @@ public:
NRVec<INDEX> findindexlist(const NRVec<INDEXNAME> &names) const;
void renameindex(const INDEXNAME namfrom, const INDEXNAME nameto) {int i=findflatindex(namfrom); names[i]=nameto;};
inline Tensor& operator+=(const Tensor &rhs)
Tensor& operator+=(const Tensor &rhs)
{
#ifdef DEBUG
if(shape!=rhs.shape) laerror("incompatible tensors for operation");
#endif
if(is_named() && rhs.is_named() && names!=rhs.names) laerror("incompatible names for operation");
data+=rhs.data;
return *this;
}
inline Tensor& operator-=(const Tensor &rhs)
Tensor& operator-=(const Tensor &rhs)
{
#ifdef DEBUG
if(shape!=rhs.shape) laerror("incompatible tensors for operation");
#endif
if(is_named() && rhs.is_named() && names!=rhs.names) laerror("incompatible names for operation");
data-=rhs.data;
return *this;
}
Tensor& axpy(const T alpha, const Tensor &rhs)
{
if(shape!=rhs.shape) laerror("incompatible tensors for operation");
if(is_named() && rhs.is_named() && names!=rhs.names) laerror("incompatible names for operation");
data.axpy(alpha,rhs.data);
return *this;
}
inline Tensor operator+(const Tensor &rhs) const {Tensor r(*this); r+=rhs; return r;};
inline Tensor operator-(const Tensor &rhs) const {Tensor r(*this); r-=rhs; return r;};
@@ -292,7 +357,9 @@ public:
void put(int fd, bool with_names=false) const;
void get(int fd, bool with_names=false);
inline void randomize(const typename LA_traits<T>::normtype &x) {data.randomize(x);};
void enforce_hermiticity(); //zero out real/imag parts for repeated indices appropriately
bool fulfills_hermiticity() const; //check it is so
inline void randomize(const typename LA_traits<T>::normtype &x) {data.randomize(x); enforce_hermiticity();};
void loopover(void (*callback)(const SUPERINDEX &, T *)); //loop over all elements
void constloopover(void (*callback)(const SUPERINDEX &, const T *)) const; //loop over all elements
@@ -333,7 +400,7 @@ public:
Tensor innercontraction(const NRVec<INDEXNAME> &nl1, const NRVec<INDEXNAME> &nl2) const {return innercontraction(findindexlist(nl1),findindexlist(nl2));};
Tensor innercontraction(const INDEXNAME &n1, const INDEXNAME &n2) const {return innercontraction(findindex(n1),findindex(n2));};
void apply_permutation_algebra(const Tensor &rhs, const PermutationAlgebra<int,T> &pa, bool inverse=false, T alpha=1, T beta=0); //general (not optimally efficient) symmetrizers, antisymmetrizers etc. acting on the flattened index list:
void apply_permutation_algebra(const Tensor &rhs, const PermutationAlgebra<int,T> &pa, bool inverse=false, T alpha=1, T beta=0, bool conjugate_by_parity=false); //general (not optimally efficient) symmetrizers, antisymmetrizers etc. acting on the flattened index list:
void apply_permutation_algebra(const NRVec<Tensor> &rhsvec, const PermutationAlgebra<int,T> &pa, bool inverse=false, T alpha=1, T beta=0); //avoids explicit outer product but not vectorized, rather inefficient
// this *=beta; for I over this: this(I) += alpha * sum_P c_P rhs(P(I))
// PermutationAlgebra can represent e.g. general_antisymmetrizer in Kucharski-Bartlett notation, or Grassmann products building RDM from cumulants
@@ -344,14 +411,18 @@ public:
void split_index_group(int group); //formal in-place split of a non-symmetric index group WITHOUT the need for data reorganization or names rearrangement
void split_index_group1(int group); //formal in-place split of the leftmost index in a non-symmetric index group WITHOUT the need for data reorganization or names rearrangement
void merge_adjacent_index_groups(int groupfrom, int groupto); //formal merge of non-symmetric index groups WITHOUT the need for data reorganization or names rearrangement
void permute_inside_group(int group, const NRPerm<int> &p, bool inverse=false); //permute indices inside a symmetric index group only
Tensor merge_index_groups(const NRVec<int> &groups) const;
Tensor flatten(int group= -1) const; //split and uncompress a given group or all of them, leaving flat index order the same
Tensor merge_indices(const INDEXLIST &il, int symmetry=0) const; //opposite to flatten (merging with optional symmetrization/antisymmetrization and compression)
Tensor merge_indices(const NRVec<INDEXNAME> &nl, int symmetry=0) const {return merge_indices(findindexlist(nl),symmetry);};
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=true); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=true) const; //rebuild the original tensor from Tucker
NRVec<NRMat<T> > Tucker(typename LA_traits<T>::normtype thr=1e-12, bool inverseorder=false); //HOSVD-Tucker decomposition, return core tensor in *this, flattened
Tensor inverseTucker(const NRVec<NRMat<T> > &x, bool inverseorder=false) const; //rebuild the original tensor from Tucker
Tensor linear_transform(const NRVec<NRMat<T> > &x) const; //linear transform by a different matrix per each index group, preserving group symmetries
Tensor linear_transform(const int g, const Tensor<T> &x) const; //linear transform a single group, preserve its position and symmetry, x must be rank-2 flat tensor (this is useful for raising/lowering indices)
Tensor linear_transform(const int g, const NRMat<T> &x) const {Tensor<T> mat(x,true); return linear_transform(g,mat);}; //linear transform a single group, preserve its position and symmetry
};
@@ -615,7 +686,6 @@ f=fourindex_dense<antisymtwoelectronrealdirac,T,I>(range,NRSMat<T>(mat)); //symm
template <typename T>
std::ostream & operator<<(std::ostream &s, const Tensor<T> &x);

114
vec.cc
View File

@@ -869,6 +869,7 @@ return -1;
******************************************************************************/
template<typename T>
NRVec<T>& NRVec<T>::conjugateme() {
if(!LA_traits<T>::is_complex()) return *this;
copyonwrite();
#ifdef CUDALA
if(location != cpu) laerror("general conjugation only on CPU");
@@ -877,6 +878,29 @@ copyonwrite();
return *this;
}
template<typename T>
NRVec<T>& NRVec<T>::negateconjugateme() {
if(!LA_traits<T>::is_complex()) negateme();
copyonwrite();
#ifdef CUDALA
if(location != cpu) laerror("general conjugation only on CPU");
#endif
for(int i=0; i<nn; ++i) v[i] = -LA_traits<T>::conjugate(v[i]);
return *this;
}
template<typename T>
NRVec<T>& NRVec<T>::negateme() {
copyonwrite();
#ifdef CUDALA
if(location != cpu) laerror("general conjugation only on CPU");
#endif
for(int i=0; i<nn; ++i) v[i] = -v[i];
return *this;
}
/***************************************************************************//**
* conjugate this complex vector
@@ -912,6 +936,96 @@ NRVec<std::complex<float> >& NRVec<std::complex<float> >::conjugateme() {
return *this;
}
template<>
NRVec<std::complex<double> >& NRVec<std::complex<double> >::negateconjugateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_dscal((size_t)nn, -1.0, ((double *)v) , 2);
#ifdef CUDALA
}else{
cublasDscal((size_t)nn, -1.0, ((double *)v) , 2);
}
#endif
return *this;
}
template<>
NRVec<std::complex<float> >& NRVec<std::complex<float> >::negateconjugateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_sscal((size_t)nn, -1.0, ((float *)v) , 2);
#ifdef CUDALA
}else{
cublasSscal((size_t)nn, -1.0, ((float *)v) , 2);
}
#endif
return *this;
}
template<>
NRVec<std::complex<double> >& NRVec<std::complex<double> >::negateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_dscal((size_t)nn*2, -1.0, ((double *)v) , 1);
#ifdef CUDALA
}else{
cublasDscal((size_t)nn*2, -1.0, ((double *)v) , 1);
}
#endif
return *this;
}
template<>
NRVec<std::complex<float> >& NRVec<std::complex<float> >::negateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_sscal((size_t)nn*2, -1.0, ((float *)v) , 1);
#ifdef CUDALA
}else{
cublasSscal((size_t)nn*2, -1.0, ((float *)v) , 1);
}
#endif
return *this;
}
template<>
NRVec<double>& NRVec<double>::negateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_dscal((size_t)nn, -1.0, v , 1);
#ifdef CUDALA
}else{
cublasDscal((size_t)nn, -1.0, v , 1);
}
#endif
return *this;
}
template<>
NRVec<float>& NRVec<float>::negateme() {
copyonwrite();
#ifdef CUDALA
if(location == cpu){
#endif
cblas_sscal((size_t)nn, -1.0, v , 1);
#ifdef CUDALA
}else{
cublasSscal((size_t)nn, -1.0, v , 1);
}
#endif
return *this;
}
/***************************************************************************//**
* sum up the elements of current vector of general type <code>T</code>

80
vec.h
View File

@@ -303,15 +303,18 @@ public:
NRVec& conjugateme();
inline NRVec conjugate() const {NRVec r(*this); r.conjugateme(); return r;};
NRVec& negateconjugateme();
NRVec& negateme();
//! determine the actual value of the reference counter
inline int getcount() const {return count?*count:0;}
//! compute the Euclidean inner product (with conjugation in complex case)
inline const T operator*(const NRVec &rhs) const;
inline const T dot(const NRVec &rhs) const {return *this * rhs;};
//! compute the Euclidean inner product (with conjugation in complex case) with a stride-vector
inline const T dot(const T *a, const int stride = 1) const;
inline const T dot(const T *a, const int stride = 1, bool conjugate=true) const;
inline const T dot(const NRVec &rhs, bool conjugate=true) const {return dot(&(rhs[0]),1,conjugate);};
void gemv(const T beta, const NRMat<T> &a, const char trans, const T alpha, const NRVec &x);
void gemv(const T beta, const NRSMat<T> &a, const char trans /**< just for compatibility reasons */, const T alpha, const NRVec &x);
@@ -452,12 +455,12 @@ public:
//! routine for formatted output
void fprintf(FILE *f, const char *format, const int modulo) const;
//! routine for unformatted output
void put(int fd, bool dimensions=1, bool transp=0) const;
void put(int fd, bool dimensions=1, bool transp=0, bool orca_format=false) const;
//! routine for formatted input
void fscanf(FILE *f, const char *format);
//! routine for unformatted input
void get(int fd, bool dimensions=1, bool transp=0);
void get(int fd, bool dimensions=1, bool transp=0, bool orca_format=false);
//! constructor creating vector from sparse matrix
explicit NRVec(const SparseMat<T> &rhs);
@@ -620,7 +623,9 @@ template <typename T>
inline T& NRVec_from1<T>::operator[](const int i) {
#ifdef DEBUG
if(_LA_count_check && *NRVec<T>::count != 1) laerror("possible use of NRVec[] with count>1 as l-value");
if(i < 1 || i > NRVec<T>::nn) laerror("out of range");
if(i<1||i > NRVec<T>::nn) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<< NRVec<T>::nn<<std::endl;
if(i < 1) laerror("out of range - low");
if(i > NRVec<T>::nn) laerror("out of range - high");
if(!NRVec<T>::v) laerror("unallocated NRVec");
#endif
NOT_GPU(*this);
@@ -637,7 +642,9 @@ inline T& NRVec_from1<T>::operator[](const int i) {
template <typename T>
inline const T& NRVec_from1<T>::operator[](const int i) const {
#ifdef DEBUG
if(i < 1 || i > NRVec<T>::nn) laerror("out of range");
if(i<1||i > NRVec<T>::nn) std::cout<<"INDEX PROBLEM "<<1<<" "<<i<<" "<< NRVec<T>::nn<<std::endl;
if(i < 1) laerror("out of range - low");
if(i > NRVec<T>::nn) laerror("out of range - high");
if(!NRVec<T>::v) laerror("unallocated NRVec");
#endif
NOT_GPU(*this);
@@ -1066,10 +1073,25 @@ inline const T NRVec<T>::operator*(const NRVec<T> &rhs) const {
NOT_GPU(rhs);
T dot(0);
for(register int i=0; i<nn; ++i) dot += v[i]*rhs.v[i];
if(LA_traits<T>::is_complex()) for(register int i=0; i<nn; ++i) dot += LA_traits<T>::conjugate(v[i])*rhs.v[i];
else for(register int i=0; i<nn; ++i) dot += v[i]*rhs.v[i];
return dot;
}
/***************************************************************************
* compute dot product with optional conjugation
*/
template<typename T>
inline const T NRVec<T>::dot(const T *a, const int stride , bool conjugate) const
{
NOT_GPU(*this);
T dot(0);
if(LA_traits<T>::is_complex() && conjugate) for(register int i=0; i<nn; ++i) dot += LA_traits<T>::conjugate(v[i])*a[i*stride];
else for(register int i=0; i<nn; ++i) dot += v[i]*a[i*stride];
return dot;
}
/***************************************************************************//**
* indexing operator giving the element at given position with range checking in
* the DEBUG mode
@@ -1080,7 +1102,9 @@ template <typename T>
inline T& NRVec<T>::operator[](const int i) {
#ifdef DEBUG
if(_LA_count_check && *count != 1) laerror("possible use of NRVec[] with count>1 as l-value");
if(i < 0 || i >= nn) laerror("out of range");
if(i<0||i >= nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<std::endl;
if(i < 0) laerror("out of range - low");
if(i >= nn) laerror("out of range - high");
if(!v) laerror("unallocated NRVec");
#endif
NOT_GPU(*this);
@@ -1097,7 +1121,9 @@ inline T& NRVec<T>::operator[](const int i) {
template <typename T>
inline const T& NRVec<T>::operator[](const int i) const {
#ifdef DEBUG
if(i < 0 || i >= nn) laerror("out of range");
if(i<0||i >= nn) std::cout<<"INDEX PROBLEM "<<0<<" "<<i<<" "<<nn-1<<std::endl;
if(i < 0) laerror("out of range - low");
if(i >= nn) laerror("out of range - high");
if(!v) laerror("unallocated NRVec");
#endif
NOT_GPU(*this);
@@ -1790,9 +1816,9 @@ inline const std::complex<double> NRVec<std::complex<double> >::operator*(const
* @return \f$\sum_{i=1}^N\vec{x}_{i}\cdot y_{\mathrm{stride}\cdot(i-1) + 1}\f$
******************************************************************************/
template<>
inline const double NRVec<double>::dot(const double *y, const int stride) const {
inline const double NRVec<double>::dot(const double *y, const int stride, bool conjugate) const {
NOT_GPU(*this);
return cblas_ddot(nn, y, stride, v, 1);
return cblas_ddot(nn,v, 1,y,stride);
}
/***************************************************************************//**
@@ -1802,10 +1828,11 @@ inline const double NRVec<double>::dot(const double *y, const int stride) const
* @return \f$\sum_{i=1}^N\vec{x}_{i}\cdot \overbar{y_{\mathrm{stride}\cdot(i-1) + 1}}\f$
******************************************************************************/
template<>
inline const std::complex<double> NRVec<std::complex<double> >::dot(const std::complex<double> *y, const int stride) const {
inline const std::complex<double> NRVec<std::complex<double> >::dot(const std::complex<double> *y, const int stride, bool conjugate) const {
std::complex<double> dot;
NOT_GPU(*this);
cblas_zdotc_sub(nn, y, stride, v, 1, &dot);
if(conjugate) cblas_zdotc_sub(nn, v,1,y, stride, &dot);
else cblas_zdotu_sub(nn, v,1,y, stride, &dot);
return dot;
}
@@ -2004,12 +2031,12 @@ inline const std::complex<double> NRVec<std::complex<double> >::amin() const {
* @see NRMat<T>::put()
******************************************************************************/
template <typename T>
void NRVec<T>::put(int fd, bool dim, bool transp) const {
void NRVec<T>::put(int fd, bool dim, bool transp, bool orcaformat) const {
#ifdef CUDALA
if(location != cpu){
NRVec<T> tmp = *this;
tmp.moveto(cpu);
tmp.put(fd,dim,transp);
tmp.put(fd,dim,transp,orcaformat);
return;
}
#endif
@@ -2017,7 +2044,15 @@ void NRVec<T>::put(int fd, bool dim, bool transp) const {
int pad(1); //align at least 8-byte
if(dim){
if(sizeof(int) != write(fd,&nn,sizeof(int))) laerror("write failed");
if(sizeof(int) != write(fd,&pad,sizeof(int))) laerror("write failed");
if(!orcaformat)
{
if(sizeof(int) != write(fd,&pad,sizeof(int))) laerror("write failed");
}
if(orcaformat)
{
int tmp=sizeof(T);
if(sizeof(int) != write(fd,&tmp,sizeof(int))) laerror("write failed");
}
}
LA_traits<T>::multiput(nn,fd,v,dim);
}
@@ -2030,12 +2065,12 @@ void NRVec<T>::put(int fd, bool dim, bool transp) const {
* @see NRMat<T>::get(), copyonwrite()
******************************************************************************/
template <typename T>
void NRVec<T>::get(int fd, bool dim, bool transp) {
void NRVec<T>::get(int fd, bool dim, bool transp, bool orcaformat) {
#ifdef CUDALA
if(location != cpu){
NRVec<T> tmp;
tmp.moveto(cpu);
tmp.get(fd,dim,transp);
tmp.get(fd,dim,transp,orcaformat);
tmp.moveto(location);
*this = tmp;
return;
@@ -2044,7 +2079,14 @@ void NRVec<T>::get(int fd, bool dim, bool transp) {
int nn0[2]; //align at least 8-byte
errno = 0;
if(dim){
if(2*sizeof(int) != read(fd,&nn0,2*sizeof(int))) laerror("read failed");
int ndims=orcaformat?1:2;
if(ndims*sizeof(int) != read(fd,&nn0,ndims*sizeof(int))) laerror("read failed");
if(orcaformat)
{
int tmp;
if(sizeof(int) != read(fd,&tmp,sizeof(int))) laerror("read failed");
if(tmp!=sizeof(T)) laerror("mismatch in orca format");
}
resize(nn0[0]);
}else{
copyonwrite();