working on tensor
This commit is contained in:
parent
87dd0c5b65
commit
2b20bff532
3
t.cc
3
t.cc
@ -3212,8 +3212,9 @@ II[0][1]=2;
|
|||||||
II[0][2]=1;
|
II[0][2]=1;
|
||||||
|
|
||||||
epsilon *= 2.;
|
epsilon *= 2.;
|
||||||
|
epsilon.lhs(1,2,3) -= 1.;
|
||||||
|
|
||||||
cout <<epsilon(II)<<endl;
|
cout <<epsilon(1,2,3)<<" "<<epsilon(3,2,1)<<endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
tensor.cc
40
tensor.cc
@ -83,7 +83,7 @@ switch(I.size()) //a few special cases for efficiency
|
|||||||
//compressed storage case
|
//compressed storage case
|
||||||
NRVec<LA_index> II(I);
|
NRVec<LA_index> II(I);
|
||||||
II.copyonwrite();
|
II.copyonwrite();
|
||||||
II -= g.offset;
|
if(g.offset!=0) II -= g.offset;
|
||||||
int parity=netsort(II.size(),&II[0]);
|
int parity=netsort(II.size(),&II[0]);
|
||||||
if(g.symmetry<0 && (parity&1)) *sign= -1;
|
if(g.symmetry<0 && (parity&1)) *sign= -1;
|
||||||
if(g.symmetry<0) //antisymmetric
|
if(g.symmetry<0) //antisymmetric
|
||||||
@ -137,7 +137,7 @@ for(int g=0; g<shape.size(); ++g) //loop over index groups
|
|||||||
{
|
{
|
||||||
int gsign;
|
int gsign;
|
||||||
LA_largeindex groupindex = subindex(&gsign,shape[g],I[g]);
|
LA_largeindex groupindex = subindex(&gsign,shape[g],I[g]);
|
||||||
std::cout <<"INDEX TEST group "<<g<<" cumsizes "<< cumsizes[g]<<" groupindex "<<groupindex<<std::endl;
|
//std::cout <<"INDEX TEST group "<<g<<" cumsizes "<< cumsizes[g]<<" groupindex "<<groupindex<<std::endl;
|
||||||
*sign *= gsign;
|
*sign *= gsign;
|
||||||
if(groupindex == -1) return -1;
|
if(groupindex == -1) return -1;
|
||||||
r += groupindex * cumsizes[g];
|
r += groupindex * cumsizes[g];
|
||||||
@ -145,16 +145,45 @@ for(int g=0; g<shape.size(); ++g) //loop over index groups
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
//@@@@todo flatindex
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
LA_largeindex Tensor<T>::index(int *sign, const FLATINDEX &I) const
|
LA_largeindex Tensor<T>::index(int *sign, const FLATINDEX &I) const
|
||||||
{
|
{
|
||||||
|
#ifdef DEBUG
|
||||||
|
if(rank()!=I.size()) laerror("tensor rank mismatch in index");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
LA_largeindex r=0;
|
||||||
|
*sign=1;
|
||||||
|
int gstart=0;
|
||||||
|
for(int g=0; g<shape.size(); ++g) //loop over index groups
|
||||||
|
{
|
||||||
|
int gsign;
|
||||||
|
int gend= gstart+shape[g].number-1;
|
||||||
|
NRVec<LA_index> subI = I.subvector(gstart,gend);
|
||||||
|
gstart=gend+1;
|
||||||
|
LA_largeindex groupindex = subindex(&gsign,shape[g],subI);
|
||||||
|
//std::cout <<"FLATINDEX TEST group "<<g<<" cumsizes "<< cumsizes[g]<<" groupindex "<<groupindex<<std::endl;
|
||||||
|
*sign *= gsign;
|
||||||
|
if(groupindex == -1) return -1;
|
||||||
|
r += groupindex * cumsizes[g];
|
||||||
|
}
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
//@@@@todo vindex
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
LA_largeindex Tensor<T>::vindex(int *sign, int i1, va_list args) const
|
LA_largeindex Tensor<T>::vindex(int *sign, LA_index i1, va_list args) const
|
||||||
{
|
{
|
||||||
|
NRVec<LA_index> I(rank());
|
||||||
|
I[0]=i1;
|
||||||
|
for(int i=1; i<rank(); ++i)
|
||||||
|
{
|
||||||
|
I[i] = va_arg(args,LA_index);
|
||||||
|
}
|
||||||
|
va_end(args);
|
||||||
|
|
||||||
|
return index(sign,I);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -172,6 +201,7 @@ template<typename T>
|
|||||||
void Tensor<T>::get(int fd)
|
void Tensor<T>::get(int fd)
|
||||||
{
|
{
|
||||||
shape.get(fd,true);
|
shape.get(fd,true);
|
||||||
|
myrank=calcrank(); //is not stored but recomputed
|
||||||
cumsizes.get(fd,true);
|
cumsizes.get(fd,true);
|
||||||
data.get(fd,true);
|
data.get(fd,true);
|
||||||
}
|
}
|
||||||
|
26
tensor.h
26
tensor.h
@ -59,7 +59,11 @@ typedef class indexgroup {
|
|||||||
public:
|
public:
|
||||||
int number; //number of indices
|
int number; //number of indices
|
||||||
int symmetry; //-1 0 or 1
|
int symmetry; //-1 0 or 1
|
||||||
LA_index offset; //indices start at
|
#ifdef LA_TENSOR_ZERO_OFFSET
|
||||||
|
static const LA_index offset = 0; //compiler can optimiza away some computations
|
||||||
|
#else
|
||||||
|
LA_index offset; //indices start at a general offset
|
||||||
|
#endif
|
||||||
LA_index range; //indices span this range
|
LA_index range; //indices span this range
|
||||||
} INDEXGROUP;
|
} INDEXGROUP;
|
||||||
|
|
||||||
@ -83,6 +87,7 @@ typedef NRVec<NRVec<LA_index> > SUPERINDEX; //all indices in the INDEXGROUP stru
|
|||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
class Tensor {
|
class Tensor {
|
||||||
|
int myrank;
|
||||||
NRVec<indexgroup> shape;
|
NRVec<indexgroup> shape;
|
||||||
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
NRVec<LA_largeindex> cumsizes; //cumulative sizes of symmetry index groups (a function of shape but precomputed for efficiency)
|
||||||
NRVec<T> data;
|
NRVec<T> data;
|
||||||
@ -90,15 +95,17 @@ class Tensor {
|
|||||||
private:
|
private:
|
||||||
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
|
LA_largeindex index(int *sign, const SUPERINDEX &I) const; //map the tensor indices to the position in data
|
||||||
LA_largeindex index(int *sign, const FLATINDEX &I) const; //map the tensor indices to the position in data
|
LA_largeindex index(int *sign, const FLATINDEX &I) const; //map the tensor indices to the position in data
|
||||||
LA_largeindex vindex(int *sign, int i1, va_list args) const; //map list of indices to the position in data @@@must call va_end
|
LA_largeindex vindex(int *sign, LA_index i1, va_list args) const; //map list of indices to the position in data
|
||||||
|
//@@@reversed index
|
||||||
|
|
||||||
public:
|
public:
|
||||||
//constructors
|
//constructors
|
||||||
Tensor() {};
|
Tensor() : myrank(0) {};
|
||||||
Tensor(const NRVec<indexgroup> &s) : shape(s), data((int)getsize()) {data.clear();}; //general tensor
|
Tensor(const NRVec<indexgroup> &s) : shape(s), data((int)getsize()), myrank(calcrank()) {data.clear();}; //general tensor
|
||||||
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(getsize()); data.clear();}; //tensor with a single index group
|
Tensor(const indexgroup &g) {shape.resize(1); shape[0]=g; data.resize(getsize()); myrank=calcrank(); data.clear();}; //tensor with a single index group
|
||||||
|
|
||||||
int getrank() const; //is computed from shape
|
int rank() const {return myrank;};
|
||||||
|
int calcrank(); //is computed from shape
|
||||||
LA_largeindex getsize(); //set redundant data and return total size
|
LA_largeindex getsize(); //set redundant data and return total size
|
||||||
LA_largeindex size() const {return data.size();};
|
LA_largeindex size() const {return data.size();};
|
||||||
void copyonwrite() {shape.copyonwrite(); data.copyonwrite();};
|
void copyonwrite() {shape.copyonwrite(); data.copyonwrite();};
|
||||||
@ -106,8 +113,8 @@ public:
|
|||||||
inline T operator()(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
inline T operator()(const SUPERINDEX &I) {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
||||||
inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
|
inline Signedpointer<T> lhs(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); return Signedpointer<T>(&data[i],sign);};
|
||||||
inline T operator()(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
inline T operator()(const FLATINDEX &I) {int sign; LA_largeindex i=index(&sign,I); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
||||||
inline Signedpointer<T> lhs(int i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); return Signedpointer<T>(&data[i],sign); };
|
inline Signedpointer<T> lhs(LA_index i1...) {va_list args; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); return Signedpointer<T>(&data[i],sign); };
|
||||||
inline T operator()(int i1...) {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
inline T operator()(LA_index i1...) {va_list args; ; int sign; LA_largeindex i; va_start(args,i1); i= vindex(&sign, i1,args); if(sign==0) return 0; return sign>0 ?data[i] : -data[i];};
|
||||||
|
|
||||||
inline Tensor& operator*=(const T &a) {data*=a; return *this;};
|
inline Tensor& operator*=(const T &a) {data*=a; return *this;};
|
||||||
inline Tensor& operator/=(const T &a) {data/=a; return *this;};
|
inline Tensor& operator/=(const T &a) {data/=a; return *this;};
|
||||||
@ -119,6 +126,7 @@ public:
|
|||||||
//@@@TODO - contractions - basic and efficient
|
//@@@TODO - contractions - basic and efficient
|
||||||
//@@@dodelat indexy
|
//@@@dodelat indexy
|
||||||
//@@@ dvojite rekurzivni loopover s callbackem - nebo iterator s funkci next???
|
//@@@ dvojite rekurzivni loopover s callbackem - nebo iterator s funkci next???
|
||||||
|
//@@@nebo inverse index function?
|
||||||
//@@@ stream i/o na zaklade tohoto
|
//@@@ stream i/o na zaklade tohoto
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -126,7 +134,7 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
int Tensor<T>:: getrank() const
|
int Tensor<T>:: calcrank()
|
||||||
{
|
{
|
||||||
int r=0;
|
int r=0;
|
||||||
for(int i=0; i<shape.size(); ++i)
|
for(int i=0; i<shape.size(); ++i)
|
||||||
|
Loading…
Reference in New Issue
Block a user