working on tensor : stream I/O
This commit is contained in:
parent
74a96d4eb6
commit
0ff55b66bb
6
t.cc
6
t.cc
@ -3206,9 +3206,10 @@ INDEXGROUP g;
|
|||||||
g.number=3;
|
g.number=3;
|
||||||
g.symmetry= -1;
|
g.symmetry= -1;
|
||||||
g.offset=1;
|
g.offset=1;
|
||||||
g.range=10;
|
g.range=5;
|
||||||
|
|
||||||
Tensor<double> epsilon(g);
|
Tensor<double> epsilon(g);
|
||||||
|
epsilon.clear();
|
||||||
cout <<epsilon.size()<<endl;
|
cout <<epsilon.size()<<endl;
|
||||||
|
|
||||||
for(LA_largeindex s=0; s<epsilon.size(); ++s)
|
for(LA_largeindex s=0; s<epsilon.size(); ++s)
|
||||||
@ -3237,6 +3238,9 @@ epsilon.lhs(1,2,3) -= 1.;
|
|||||||
|
|
||||||
cout <<epsilon(1,2,3)<<" "<<epsilon(3,2,1)<<endl;
|
cout <<epsilon(1,2,3)<<" "<<epsilon(3,2,1)<<endl;
|
||||||
|
|
||||||
|
for(int i=0; i<epsilon.data.size(); ++i) epsilon.data[i]=10*i;
|
||||||
|
cout <<epsilon.data;
|
||||||
|
cout <<epsilon;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
179
tensor.cc
179
tensor.cc
@ -35,6 +35,7 @@ for(int i=0; i<shape.size(); ++i)
|
|||||||
if(shape[i].number==0) laerror("empty index group");
|
if(shape[i].number==0) laerror("empty index group");
|
||||||
r+=shape[i].number;
|
r+=shape[i].number;
|
||||||
}
|
}
|
||||||
|
myrank=r;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,10 +311,188 @@ data.get(fd,true);
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
Tensor<T>::Tensor(const NRVec<T> &x)
|
||||||
|
: data(x)
|
||||||
|
{
|
||||||
|
myrank=1;
|
||||||
|
shape.resize(1);
|
||||||
|
shape[0].number=1;
|
||||||
|
shape[0].symmetry=0;
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
shape[0].offset=0;
|
||||||
|
#endif
|
||||||
|
shape[0].range=x.size();
|
||||||
|
calcsize();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
Tensor<T>::Tensor(const NRMat<T> &x)
|
||||||
|
: data(&x(0,0),x.nrows()*x.ncols())
|
||||||
|
{
|
||||||
|
myrank=2;
|
||||||
|
if(x.nrows()==x.ncols())
|
||||||
|
{
|
||||||
|
shape.resize(1);
|
||||||
|
shape[0].number=2;
|
||||||
|
shape[0].symmetry=0;
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
shape[0].offset=0;
|
||||||
|
#endif
|
||||||
|
shape[0].range=x.nrows();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
shape.resize(2);
|
||||||
|
shape[0].number=1; shape[1].number=1;
|
||||||
|
shape[0].symmetry=0; shape[1].symmetry=0;
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
shape[0].offset=0; shape[1].offset=0;
|
||||||
|
#endif
|
||||||
|
shape[0].range=x.ncols();
|
||||||
|
shape[1].range=x.nrows();
|
||||||
|
}
|
||||||
|
calcsize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
Tensor<T>::Tensor(const NRSMat<T> &x)
|
||||||
|
: data(NRVec<T>(x))
|
||||||
|
{
|
||||||
|
myrank=2;
|
||||||
|
shape.resize(1);
|
||||||
|
shape[0].number=2;
|
||||||
|
shape[0].symmetry=1;
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
shape[0].offset=0;
|
||||||
|
#endif
|
||||||
|
shape[0].range=x.nrows();
|
||||||
|
calcsize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void loopingroups(Tensor<T> &t, int ngroup, int igroup, T **p, SUPERINDEX &I, void (*callback)(const SUPERINDEX &, T *))
|
||||||
|
{
|
||||||
|
LA_index istart,iend;
|
||||||
|
switch(t.shape[ngroup].symmetry)
|
||||||
|
{
|
||||||
|
case 0:
|
||||||
|
istart= t.shape[ngroup].offset;
|
||||||
|
iend= t.shape[ngroup].offset+t.shape[ngroup].range-1;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
istart= t.shape[ngroup].offset;
|
||||||
|
if(igroup==t.shape[ngroup].number-1) iend= t.shape[ngroup].offset+t.shape[ngroup].range-1;
|
||||||
|
else iend = I[ngroup][igroup+1];
|
||||||
|
break;
|
||||||
|
case -1:
|
||||||
|
istart= t.shape[ngroup].offset + igroup;
|
||||||
|
if(igroup==t.shape[ngroup].number-1) iend= t.shape[ngroup].offset+t.shape[ngroup].range-1;
|
||||||
|
else iend = I[ngroup][igroup+1]-1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(LA_index i = istart; i<=iend; ++i)
|
||||||
|
{
|
||||||
|
I[ngroup][igroup]=i;
|
||||||
|
if(ngroup==0 && igroup==0)
|
||||||
|
{
|
||||||
|
int sign;
|
||||||
|
//std::cout <<"TEST "<<t.index(&sign,I)<<" ";
|
||||||
|
(*callback)(I,(*p)++);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int newigroup= igroup-1;
|
||||||
|
int newngroup=ngroup;
|
||||||
|
if(newigroup<0)
|
||||||
|
{
|
||||||
|
--newngroup;
|
||||||
|
newigroup=t.shape[newngroup].number-1;
|
||||||
|
}
|
||||||
|
loopingroups(t,newngroup,newigroup,p,I,callback);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void Tensor<T>::loopover(void (*callback)(const SUPERINDEX &, T *))
|
||||||
|
{
|
||||||
|
SUPERINDEX I(shape.size());
|
||||||
|
for(int i=0; i<I.size(); ++i) {I[i].resize(shape[i].number); I[i] = shape[i].offset;}
|
||||||
|
T *pp=&data[0];
|
||||||
|
loopingroups(*this,shape.size()-1,shape[shape.size()-1].number-1,&pp,I,callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static std::ostream *sout;
|
||||||
|
template<typename T>
|
||||||
|
static void outputcallback(const SUPERINDEX &I, T *v)
|
||||||
|
{
|
||||||
|
//print indices flat
|
||||||
|
for(int i=0; i<I.size(); ++i)
|
||||||
|
for(int j=0; j<I[i].size(); ++j) *sout << I[i][j]<<" ";
|
||||||
|
*sout<<" "<< *v<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::ostream & operator<<(std::ostream &s, const INDEXGROUP &x)
|
||||||
|
{
|
||||||
|
s<<x.number <<" "<<x.symmetry<<" ";
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
s<<x.offset<<" ";
|
||||||
|
#endif
|
||||||
|
s<< x.range<<std::endl;
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::istream & operator>>(std::istream &s, INDEXGROUP &x)
|
||||||
|
{
|
||||||
|
s>>x.number>>x.symmetry;
|
||||||
|
#ifndef LA_TENSOR_ZERO_OFFSET
|
||||||
|
s>>x.offset;
|
||||||
|
#endif
|
||||||
|
s>>x.range;
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
std::ostream & operator<<(std::ostream &s, const Tensor<T> &x)
|
||||||
|
{
|
||||||
|
s<<x.shape;
|
||||||
|
sout= &s;
|
||||||
|
const_cast<Tensor<T> *>(&x)->loopover(&outputcallback<T>);
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::istream & operator>>(std::istream &s, Tensor<T> &x)
|
||||||
|
{
|
||||||
|
s>>x.shape;
|
||||||
|
x.data.resize(x.calcsize()); x.calcrank();
|
||||||
|
FLATINDEX I(x.rank());
|
||||||
|
for(LA_largeindex i=0; i<x.data.size(); ++i)
|
||||||
|
{
|
||||||
|
for(int j=0; j<I.size(); ++j) s>>I[j];
|
||||||
|
T val; s>>val;
|
||||||
|
x.lhs(I) = val;
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template class Tensor<double>;
|
template class Tensor<double>;
|
||||||
template class Tensor<std::complex<double> >;
|
template class Tensor<std::complex<double> >;
|
||||||
|
template std::ostream & operator<<(std::ostream &s, const Tensor<double> &x);
|
||||||
|
template std::ostream & operator<<(std::ostream &s, const Tensor<std::complex<double> > &x);
|
||||||
|
template std::istream & operator>>(std::istream &s, Tensor<double> &x);
|
||||||
|
template std::istream & operator>>(std::istream &s, Tensor<std::complex<double> > &x);
|
||||||
|
|
||||||
|
|
||||||
}//namespace
|
}//namespace
|
||||||
|
58
tensor.h
58
tensor.h
@ -19,6 +19,8 @@
|
|||||||
|
|
||||||
//a simple tensor class with arbitrary symmetry of index subgroups
|
//a simple tensor class with arbitrary symmetry of index subgroups
|
||||||
//stored in an efficient way
|
//stored in an efficient way
|
||||||
|
//each index group has a specific symmetry (nosym,sym,antisym)
|
||||||
|
//additional symmetry between index groups (like in 2-electron integrals) is not supported directly, you would need to nest the class to Tensor<Tensor<T> >
|
||||||
//presently only a rudimentary implementation
|
//presently only a rudimentary implementation
|
||||||
//presently limited to 2G data size due to NRVec - maybe use a typedef LA_index
|
//presently limited to 2G data size due to NRVec - maybe use a typedef LA_index
|
||||||
//to uint64_t in the future in vector and matrix classes
|
//to uint64_t in the future in vector and matrix classes
|
||||||
@ -30,6 +32,8 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <cstdarg>
|
#include <cstdarg>
|
||||||
#include "vec.h"
|
#include "vec.h"
|
||||||
|
#include "mat.h"
|
||||||
|
#include "smat.h"
|
||||||
#include "miscfunc.h"
|
#include "miscfunc.h"
|
||||||
|
|
||||||
|
|
||||||
@ -70,6 +74,12 @@ LA_index range; //indices span this range
|
|||||||
inline bool operator!=(const indexgroup &rhs) const {return !((*this)==rhs);};
|
inline bool operator!=(const indexgroup &rhs) const {return !((*this)==rhs);};
|
||||||
} INDEXGROUP;
|
} INDEXGROUP;
|
||||||
|
|
||||||
|
|
||||||
|
std::ostream & operator<<(std::ostream &s, const INDEXGROUP &x);
|
||||||
|
std::istream & operator>>(std::istream &s, INDEXGROUP &x);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
class LA_traits<indexgroup> {
|
class LA_traits<indexgroup> {
|
||||||
public:
|
public:
|
||||||
@ -91,11 +101,13 @@ typedef NRVec<NRVec<LA_index> > SUPERINDEX; //all indices in the INDEXGROUP stru
|
|||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
class Tensor {
|
class Tensor {
|
||||||
int myrank;
|
public:
|
||||||
NRVec<indexgroup> shape;
|
NRVec<indexgroup> shape;
|
||||||
|
NRVec<T> data;
|
||||||
|
private:
|
||||||
|
int myrank;
|
||||||
NRVec<LA_largeindex> groupsizes; //group sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
NRVec<LA_largeindex> groupsizes; //group sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
||||||
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
||||||
NRVec<T> data;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
|
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
|
||||||
@ -105,10 +117,13 @@ public:
|
|||||||
|
|
||||||
//constructors
|
//constructors
|
||||||
Tensor() : myrank(0) {};
|
Tensor() : myrank(0) {};
|
||||||
Tensor(const NRVec<indexgroup> &s) : shape(s), data((int)calcsize()), myrank(calcrank()) {}; //general tensor
|
Tensor(const NRVec<indexgroup> &s) : shape(s), data((int)calcsize()) {calcrank();}; //general tensor
|
||||||
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(calcsize()); myrank=calcrank();}; //tensor with a single index group
|
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(calcsize()); calcrank();}; //tensor with a single index group
|
||||||
Tensor(const Tensor &rhs): myrank(rhs.myrank), shape(rhs.shape), groupsizes(rhs.groupsizes), cumsizes(rhs.cumsizes), data(rhs.data) {};
|
Tensor(const Tensor &rhs): myrank(rhs.myrank), shape(rhs.shape), groupsizes(rhs.groupsizes), cumsizes(rhs.cumsizes), data(rhs.data) {};
|
||||||
Tensor(int xrank, const NRVec<indexgroup> &xshape, const NRVec<LA_largeindex> &xgroupsizes, const NRVec<LA_largeindex> xcumsizes, const NRVec<T> &xdata) : myrank(xrank), shape(xshape), groupsizes(xgroupsizes), cumsizes(xcumsizes), data(xdata) {};
|
Tensor(int xrank, const NRVec<indexgroup> &xshape, const NRVec<LA_largeindex> &xgroupsizes, const NRVec<LA_largeindex> xcumsizes, const NRVec<T> &xdata) : myrank(xrank), shape(xshape), groupsizes(xgroupsizes), cumsizes(xcumsizes), data(xdata) {};
|
||||||
|
explicit Tensor(const NRVec<T> &x);
|
||||||
|
explicit Tensor(const NRMat<T> &x);
|
||||||
|
explicit Tensor(const NRSMat<T> &x);
|
||||||
|
|
||||||
void clear() {data.clear();};
|
void clear() {data.clear();};
|
||||||
int rank() const {return myrank;};
|
int rank() const {return myrank;};
|
||||||
@ -131,8 +146,23 @@ public:
|
|||||||
inline Tensor operator/(const T &a) const {Tensor r(*this); r /=a; return r;};
|
inline Tensor operator/(const T &a) const {Tensor r(*this); r /=a; return r;};
|
||||||
|
|
||||||
|
|
||||||
inline Tensor& operator+=(const Tensor &rhs) {if(shape!=rhs.shape) laerror("incompatible tensors for operation"); data+=rhs.data; return *this;}
|
inline Tensor& operator+=(const Tensor &rhs)
|
||||||
inline Tensor& operator-=(const Tensor &rhs) {if(shape!=rhs.shape) laerror("incompatible tensors for operation"); data-=rhs.data; return *this;}
|
{
|
||||||
|
#ifdef DEBUG
|
||||||
|
if(shape!=rhs.shape) laerror("incompatible tensors for operation");
|
||||||
|
#endif
|
||||||
|
data+=rhs.data;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Tensor& operator-=(const Tensor &rhs)
|
||||||
|
{
|
||||||
|
#ifdef DEBUG
|
||||||
|
if(shape!=rhs.shape) laerror("incompatible tensors for operation");
|
||||||
|
#endif
|
||||||
|
data-=rhs.data;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
inline Tensor operator+(const Tensor &rhs) const {Tensor r(*this); r+=rhs; return r;};
|
inline Tensor operator+(const Tensor &rhs) const {Tensor r(*this); r+=rhs; return r;};
|
||||||
inline Tensor operator-(const Tensor &rhs) const {Tensor r(*this); r-=rhs; return r;};
|
inline Tensor operator-(const Tensor &rhs) const {Tensor r(*this); r-=rhs; return r;};
|
||||||
|
|
||||||
@ -143,19 +173,25 @@ public:
|
|||||||
|
|
||||||
inline void randomize(const typename LA_traits<T>::normtype &x) {data.randomize(x);};
|
inline void randomize(const typename LA_traits<T>::normtype &x) {data.randomize(x);};
|
||||||
|
|
||||||
|
void loopover(void (*callback)(const SUPERINDEX &, T *));
|
||||||
|
|
||||||
//@@@TODO - unwinding to full size in a specified index
|
//@@@TODO - unwinding to full size in a specified index
|
||||||
//@@@contraction by a whole index group or by individual single index
|
//@@@contraction by a whole index group or by individual single index
|
||||||
//@@@TODO - contractions - basic and efficient? first contraction in a single index; between a given group+index in group at each tensor
|
//@@@TODO - contractions - basic and efficient? first contraction in a single index; between a given group+index in group at each tensor
|
||||||
//@@@symmetrize a group, antisymmetrize a group, expand a (anti)symmetric grtoup - obecne symmetry change krome +1 na -1 vse mozne
|
|
||||||
//@@@outer product and product with a contraction
|
//@@@outer product and product with a contraction
|
||||||
//@@@@permuteindexgroups
|
//@@@@symmetrize a group, antisymmetrize a group, expand a (anti)symmetric grtoup - obecne symmetry change krome +1 na -1 vse mozne
|
||||||
//@@@@@@explicit constructors from vec mat smat and dense fourindex
|
//@@@@@@permute index groups
|
||||||
//@@@@@@ dvojite rekurzivni loopover s callbackem - nebo iterator s funkci next???
|
|
||||||
//@@@@@@ stream i/o na zaklade tohoto
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::ostream & operator<<(std::ostream &s, const Tensor<T> &x);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::istream & operator>>(std::istream &s, Tensor<T> &x);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user