67 #ifndef BGEOT_SPARSE_TENSORS
68 #define BGEOT_SPARSE_TENSORS
78 typedef gmm::uint32_type index_type;
79 typedef gmm::int32_type stride_type;
82 class tensor_ranges :
public std::vector<index_type> {
84 tensor_ranges() : std::vector<index_type>() {}
85 tensor_ranges(
size_type n) : std::vector<index_type>(n) {}
86 tensor_ranges(
size_type n, index_type V) : std::vector<index_type>(n,V) {}
87 bool is_zero_size()
const
89 for (dim_type i=0; i < this->size(); ++i)
95 typedef std::vector<stride_type> tensor_strides;
96 typedef std::vector<dim_type> index_set;
98 typedef scalar_type * TDIter;
100 std::ostream&
operator<<(std::ostream& o,
const tensor_ranges& r);
103 struct tensor_ranges_loop {
108 tensor_ranges_loop(
const tensor_ranges& t) : sz(t), cnt(t.size()), finished_(t.size() == 0) {
109 std::fill(cnt.begin(), cnt.end(), 0);
111 index_type index(dim_type i) {
return cnt[i]; }
112 bool finished()
const {
return finished_; }
115 while (++cnt[i] >= sz[i]) {
116 cnt[i] = 0; i++;
if (i >= sz.size()) { finished_ =
true;
break; }
128 mutable index_type card_;
129 mutable bool card_uptodate;
131 tensor_mask() { set_card(0); }
132 explicit tensor_mask(
const tensor_ranges& r_,
const index_set& idxs_) {
136 explicit tensor_mask(
const tensor_mask& tm1,
const tensor_mask& tm2,
bool and_op);
137 explicit tensor_mask(
const std::vector<const tensor_mask*>& tm);
138 explicit tensor_mask(
const std::vector<const tensor_mask*> tm1,
139 const std::vector<const tensor_mask*> tm2,
bool and_op);
140 void swap(tensor_mask &tm) {
141 r.swap(tm.r); idxs.swap(tm.idxs);
142 m.swap(tm.m); s.swap(tm.s);
143 std::swap(card_, tm.card_);
144 std::swap(card_uptodate, tm.card_uptodate);
146 void assign(
const tensor_ranges& r_,
const index_set& idxs_) {
147 r = r_; idxs = idxs_; eval_strides(); m.assign(size(),
false);
150 void assign(
const tensor_mask& tm) {
155 card_ = tm.card_; card_uptodate = tm.card_uptodate;
157 void assign(
const std::vector<const tensor_mask* >& tm);
158 void assign(
const tensor_mask& tm1,
const tensor_mask& tm2,
bool and_op);
160 void clear() { r.resize(0); idxs.resize(0); m.clear(); s.resize(0); set_card(0); }
161 const tensor_ranges& ranges()
const {
return r; }
162 const index_set& indexes()
const {
return idxs; }
163 const tensor_strides& strides()
const {
return s; }
164 index_set& indexes() {
return idxs; }
165 void eval_strides() {
166 s.resize(r.size()+1); s[0]=1;
167 for (index_type i=0; i < r.size(); ++i) {
171 index_type ndim()
const {
return index_type(r.size()); }
172 index_type size()
const {
return s[r.size()]; }
173 void set_card(index_type c)
const { card_ = c; card_uptodate =
true; }
174 void unset_card()
const { card_uptodate =
false; }
175 index_type card(
bool just_look=
false)
const {
176 if (!card_uptodate || just_look) {
177 index_type c = index_type(std::count_if(m.begin(), m.end(),
178 [](
const auto &x) {return x == true;}));
179 if (just_look)
return c;
184 index_type pos(tensor_ranges& global_r)
const {
186 for (index_type i=0; i < r.size(); ++i)
187 p+= s[i]*global_r[idxs[i]];
190 index_type lpos(tensor_ranges& local_r)
const {
192 for (index_type i=0; i < r.size(); ++i)
196 bool operator()(tensor_ranges& global_r)
const {
197 return m[pos(global_r)];
199 bool operator()(stride_type p)
const {
return m[p]; }
200 void set_mask_val(stride_type p,
bool v) { m[p]=v; card_uptodate =
false; }
204 Slice(dim_type d, index_type i0_) : dim(d), i0(i0_) {}
208 void set_slice(index_type dim, index_type range, index_type islice) {
209 r.resize(1); r[0] = range;
210 idxs.resize(1); idxs[0] = dim_type(dim);
211 m.clear(); m.assign(range,
false); m[islice] = 1; set_card(1);
214 explicit tensor_mask(index_type range, Slice slice) {
215 set_slice(slice.dim, range, slice.i0);
220 Diagonal(dim_type i0_, dim_type i1_) : i0(i0_), i1(i1_) {}
224 void set_diagonal(index_type n, index_type i0, index_type i1) {
226 r.resize(2); r[0] = r[1] = n;
227 idxs.resize(2); idxs[0] = dim_type(i0); idxs[1] = dim_type(i1);
228 m.assign(n*n,
false);
229 for (index_type i=0; i < n; ++i) m[n*i+i]=
true;
233 explicit tensor_mask(index_type n, Diagonal diag) {
234 set_diagonal(n, diag.i0, diag.i1);
236 void set_triangular(index_type n, index_type i0, index_type i1) {
238 r.resize(2); r[0] = r[1] = n;
239 idxs.resize(2); idxs[0] = dim_type(i0); idxs[1] = dim_type(i1);
240 m.assign(n*n,
false); unset_card();
241 for (index_type i=0; i < n; ++i)
242 for (index_type j=i; j < n; ++j) m[i*n+j]=
true;
245 void set_full(index_type dim, index_type range) {
247 r.resize(1); r[0] = range;
248 idxs.resize(1); idxs[0] = dim_type(dim);
249 m.assign(range,
true); set_card(range);
252 void set_empty(index_type dim, index_type range) {
254 r.resize(1); r[0] = range;
255 idxs.resize(1); idxs[0] = dim_type(dim);
256 m.assign(range,
false); set_card(0);
259 explicit tensor_mask(index_type dim, index_type range) {
260 set_full(dim, range);
263 m.assign(size(),
false); set_card(0);
265 void shift_dim_num_ge(dim_type dim,
int shift) {
266 for (dim_type i=0; i < idxs.size(); ++i) {
267 if (idxs[i] >= dim) idxs[i] = dim_type(idxs[i] + shift);
271 void gen_mask_pos(tensor_strides& p)
const {
275 for (tensor_ranges_loop l(r); !l.finished(); l.next()) {
276 if (m[lpos(l.cnt)]) p[i++] = lpos(l.cnt);
280 void unpack_strides(
const tensor_strides& packed, tensor_strides& unpacked)
const;
284 int max_dim()
const {
285 index_set::const_iterator it = std::max_element(idxs.begin(),idxs.end());
286 return (it == idxs.end() ? -1 : *it);
288 void check_assertions()
const;
289 void print(std::ostream &o)
const;
290 void print_()
const { print(cerr); }
295 typedef std::vector<tensor_mask> tensor_mask_container;
297 struct tensor_index_to_mask {
300 tensor_index_to_mask() : mask_num(
short_type(-1)),
302 bool is_valid() {
return mask_num !=
short_type(-1) &&
312 mutable std::vector<tensor_index_to_mask> idx2mask;
313 tensor_mask_container masks_;
318 void check_empty_mask() {
320 for (dim_type i=0; i < masks_.size(); ++i) {
321 masks_[i].set_zero();
326 static void find_linked_masks(dim_type mnum,
const tensor_shape &ts1,
const tensor_shape &ts2,
327 dal::bit_vector& treated1, dal::bit_vector& treated2,
328 std::vector<const tensor_mask*>& lstA,
329 std::vector<const tensor_mask*>& lstB) {
331 assert(mnum < ts1.masks().size());
332 assert(!treated1[mnum]);
334 lstA.push_back(&ts1.mask(mnum));
335 for (dim_type i=0; i < ts1.mask(mnum).indexes().size(); ++i) {
336 dim_type ii = ts1.mask(mnum).indexes()[i];
337 if (ts2.index_is_valid(ii) && !treated2[ts2.index_to_mask_num(ii)])
338 find_linked_masks(ts2.index_to_mask_num(ii),ts2,ts1,treated2,treated1,lstB,lstA);
343 dim_type index_to_mask_num(dim_type ii)
const {
344 if (index_is_valid(ii))
345 return dim_type(idx2mask[ii].mask_num);
else return dim_type(-1);
348 void clear() { masks_.resize(0); idx2mask.resize(0); }
349 void swap(tensor_shape& ts) {
350 idx2mask.swap(ts.idx2mask);
351 masks_.swap(ts.masks_);
353 dim_type ndim()
const {
return dim_type(idx2mask.size()); }
354 bool index_is_valid(dim_type ii)
const {
355 assert(ii < idx2mask.size());
return idx2mask[ii].is_valid();
357 const tensor_mask& index_to_mask(dim_type ii)
const {
358 assert(index_is_valid(ii));
return masks_[idx2mask[ii].mask_num];
360 dim_type index_to_mask_dim(dim_type ii)
const {
361 assert(index_is_valid(ii));
return dim_type(idx2mask[ii].mask_dim);
363 index_type dim(dim_type ii)
const
364 { assert(index_is_valid(ii));
return index_to_mask(ii).ranges()[index_to_mask_dim(ii)];
366 tensor_mask_container& masks() {
return masks_; }
367 const tensor_mask_container& masks()
const {
return masks_; }
368 const tensor_mask& mask(dim_type i)
const { assert(i<masks_.size());
return masks_[i]; }
369 stride_type card(
bool just_look=
false)
const {
371 for (dim_type i=0; i < masks().size(); ++i)
372 n *= masks()[i].card(just_look);
375 void push_mask(
const tensor_mask& m) { masks_.push_back(m); update_idx2mask(); }
376 void remove_mask(dim_type mdim) {
378 masks_.erase(masks_.begin()+mdim);
381 void remove_unused_dimensions() {
383 for (dim_type i=0; i < ndim(); ++i) {
384 if (index_is_valid(i))
385 masks_[idx2mask[i].mask_num].indexes()[idx2mask[i].mask_dim] = nd++;
387 set_ndim_noclean(nd);
391 void update_idx2mask()
const {
399 std::fill(idx2mask.begin(), idx2mask.end(), tensor_index_to_mask());
400 for (dim_type i=0; i < masks_.size(); ++i) {
401 for (dim_type j=0; j < masks_[i].indexes().size(); ++j) {
402 dim_type k = masks_[i].indexes()[j];
403 GMM_ASSERT3(k < idx2mask.size() && !idx2mask[k].is_valid(),
"");
404 idx2mask[k].mask_num = i; idx2mask[k].mask_dim = j;
408 void assign_shape(
const tensor_shape& other) {
409 masks_ = other.masks_;
410 idx2mask = other.idx2mask;
413 void set_ndim(dim_type n) {
415 idx2mask.resize(n); update_idx2mask();
417 void set_ndim_noclean(dim_type n) {idx2mask.resize(n);}
422 explicit tensor_shape(dim_type nd) : idx2mask(nd,tensor_index_to_mask()) {
425 explicit tensor_shape(
const tensor_ranges& r) {
429 void set_full(
const tensor_ranges& r) {
430 idx2mask.resize(r.size());
431 masks_.resize(r.size());
432 for (dim_type i=0; i < r.size(); ++i) masks_[i].set_full(i,r[i]);
436 void set_empty(
const tensor_ranges& r) {
437 idx2mask.resize(r.size());
438 masks_.resize(r.size());
439 for (dim_type i=0; i < r.size(); ++i) masks_[i].set_empty(i,r[i]);
445 void merge(
const tensor_shape &ts2,
bool and_op =
true) {
447 GMM_ASSERT3(ts2.ndim() == ndim(),
"");
448 if (ts2.ndim()==0)
return;
449 for (dim_type i = 0; i < ndim(); ++i)
450 if (index_is_valid(i) && ts2.index_is_valid(i))
451 GMM_ASSERT3(ts2.dim(i) == dim(i),
"");
453 tensor_mask_container new_mask;
454 dal::bit_vector mask_treated1; mask_treated1.sup(0,masks().size());
455 dal::bit_vector mask_treated2; mask_treated2.sup(0,ts2.masks().size());
456 std::vector<const tensor_mask*> lstA, lstB; lstA.reserve(10); lstB.reserve(10);
457 for (dim_type i = 0; i < ndim(); ++i) {
458 dim_type i1 = dim_type(index_to_mask_num(i));
459 dim_type i2 = dim_type(ts2.index_to_mask_num(i));
460 lstA.clear(); lstB.clear();
461 if (index_is_valid(i) && !mask_treated1[i1])
462 find_linked_masks(i1, *
this, ts2, mask_treated1, mask_treated2,
464 else if (ts2.index_is_valid(i) && !mask_treated2[i2])
465 find_linked_masks(i2, ts2, *
this, mask_treated2, mask_treated1,
468 GMM_ASSERT3(lstA.size() || lstB.size(),
"");
469 new_mask.push_back(tensor_mask(lstA,lstB,and_op));
476 void shift_dim_num_ge(dim_type dim_num,
int shift) {
477 for (dim_type m = 0; m < masks().size(); ++m)
478 masks()[m].shift_dim_num_ge(dim_num,shift);
483 void permute(
const std::vector<dim_type> p,
bool revert=
false) {
484 std::vector<dim_type> invp(ndim()); std::fill(invp.begin(), invp.end(), dim_type(-1));
487 for (dim_type i=0; i < p.size(); ++i) {
488 if (p[i] != dim_type(-1)) {
489 assert(invp[p[i]] == dim_type(-1));
493 for (dim_type i=0; i < invp.size(); ++i) assert(invp[i] != dim_type(-1));
496 for (dim_type m=0; m < masks().size(); ++m) {
497 for (dim_type i=0; i < masks()[m].indexes().size(); ++i) {
499 masks()[m].indexes()[i] = invp[masks()[m].indexes()[i]];
501 masks()[m].indexes()[i] = p[masks()[m].indexes()[i]];
504 set_ndim_noclean(dim_type(p.size()));
510 tensor_shape slice_shape(tensor_mask::Slice slice)
const {
511 assert(slice.dim < ndim() && slice.i0 < dim(slice.dim));
512 tensor_shape ts(ndim());
513 ts.push_mask(tensor_mask(dim(slice.dim), slice));
518 tensor_shape diag_shape(tensor_mask::Diagonal diag)
const {
519 assert(diag.i1 != diag.i0 && diag.i0 < ndim() && diag.i1 < ndim());
520 assert(dim(diag.i0) == dim(diag.i1));
521 tensor_shape ts(ndim());
522 ts.push_mask(tensor_mask(dim(diag.i0), diag));
539 void print(std::ostream& o)
const;
540 void print_()
const { print(cerr); }
549 class tensor_ref :
public tensor_shape {
550 std::vector< tensor_strides > strides_;
555 stride_type base_shift_;
557 void remove_mask(dim_type mdim) {
558 tensor_shape::remove_mask(mdim);
559 assert(strides_[mdim].size() == 0 ||
560 (strides_[mdim].size() == 1 && strides_[mdim][0] == 0));
561 strides_.erase(strides_.begin()+mdim);
564 void swap(tensor_ref& tr) {
565 tensor_shape::swap(tr);
566 strides_.swap(tr.strides_);
567 std::swap(pbase_, tr.pbase_);
568 std::swap(base_shift_, tr.base_shift_);
570 const std::vector< tensor_strides >& strides()
const {
return strides_; }
571 std::vector< tensor_strides >& strides() {
return strides_; }
572 TDIter base()
const {
return (pbase_ ? (*pbase_) : 0); }
573 TDIter *pbase()
const {
return pbase_; }
574 stride_type base_shift()
const {
return base_shift_; }
575 void set_base(TDIter &new_base) { pbase_ = &new_base; base_shift_ = 0; }
577 void clear() { strides_.resize(0); pbase_ = 0; base_shift_ = 0; tensor_shape::clear(); }
582 void ensure_0_stride() {
583 for (index_type i=0; i < strides_.size(); ++i) {
584 if (strides_[i].size() >= 1 && strides_[i][0] != 0) {
585 stride_type s = strides_[i][0];
587 for (index_type j=0; j < strides_[i].size(); ++j)
595 explicit tensor_ref(
const tensor_shape& ts) : tensor_shape(ts), pbase_(0), base_shift_(0) {
596 strides_.reserve(16);
599 explicit tensor_ref(
const tensor_ranges& r, TDIter *pbase__=0)
600 : tensor_shape(r), pbase_(pbase__), base_shift_(0) {
601 strides_.reserve(16);
604 void init_strides() {
605 strides_.resize(masks().size());
607 for (dim_type i = 0; i < strides_.size(); ++i) {
608 index_type n = mask(i).card();
609 strides_[i].resize(n);
610 for (index_type j=0;j<n;++j)
611 strides_[i][j] = j*s;
615 tensor_ref() : pbase_(0), base_shift_(0) { strides_.reserve(16); }
617 void set_sub_tensor(
const tensor_ref& tr,
const tensor_shape& sub);
622 explicit tensor_ref(
const tensor_ref& tr,
const tensor_shape& sub) {
623 set_sub_tensor(tr,sub);
629 explicit tensor_ref(
const tensor_ref& tr, tensor_mask::Slice slice);
632 explicit tensor_ref(
const tensor_ref& tr, tensor_mask::Diagonal diag) {
633 set_sub_tensor(tr, tr.diag_shape(diag));
637 void print(std::ostream& o)
const;
639 void print_()
const { print(cerr); }
642 std::ostream&
operator<<(std::ostream& o,
const tensor_mask& m);
643 std::ostream&
operator<<(std::ostream& o,
const tensor_shape& ts);
644 std::ostream&
operator<<(std::ostream& o,
const tensor_ref& tr);
647 struct packed_range {
648 const stride_type *pinc;
649 const stride_type *begin, *end;
654 struct packed_range_info {
656 dim_type original_masknum;
658 std::vector<stride_type> mask_pos;
659 bool operator<(
const packed_range_info& pi)
const {
660 if (n < pi.n)
return true;
663 stride_type mean_increm;
668 std::bitset<32> have_regular_strides;
672 class multi_tensor_iterator {
674 std::vector<packed_range> pr;
675 std::vector<packed_range_info> pri;
677 std::vector<index_type> bloc_rank;
678 std::vector<index_type> bloc_nelt;
680 std::vector<TDIter> it;
681 std::vector<TDIter*> pit0;
682 tensor_strides itbase;
683 struct index_value_data {
685 const stride_type **ppinc;
689 const stride_type *pincbase;
690 const stride_type *pposbase;
692 index_type div, mod, nn;
696 std::vector<index_value_data> idxval;
697 std::vector<stride_type> vectorized_strides_;
698 index_type vectorized_size_;
699 index_type vectorized_pr_dim;
702 N = 0; pr.clear(); pri.clear(); bloc_rank.clear(); bloc_nelt.clear();
703 it.clear(); pit0.clear(); itbase.clear(); idxval.clear();
705 void swap(multi_tensor_iterator& m) {
706 std::swap(N,m.N); pr.swap(m.pr); pri.swap(m.pri);
707 bloc_rank.swap(m.bloc_rank); bloc_nelt.swap(m.bloc_nelt);
708 it.swap(m.it); pit0.swap(m.pit0); itbase.swap(m.itbase);
709 idxval.swap(m.idxval);
712 for (dim_type i=0; i < pr.size(); ++i) {
713 pr[i].pinc = pr[i].begin = &pri[i].inc[0];
714 pr[i].end = pr[i].begin+pri[i].inc.size();
716 for (dim_type n=0; n < N; ++n)
717 it[n] = *(pit0[n]) + itbase[n];
718 for (dim_type i=0; i < idxval.size(); ++i) {
719 if (idxval[i].cnt_num != dim_type(-1)) {
720 idxval[i].ppinc = &pr[idxval[i].cnt_num].pinc;
721 idxval[i].pincbase = &pri[idxval[i].cnt_num].inc[0];
722 idxval[i].pposbase = &pri[idxval[i].cnt_num].mask_pos[0];
723 idxval[i].nn = (N-pri[idxval[i].cnt_num].n);
725 static const stride_type *
null=0;
726 idxval[i].ppinc = &
null;
727 idxval[i].pincbase = 0;
728 idxval[i].pposbase = &idxval[i].pos_;
733 dim_type ndim()
const {
return dim_type(idxval.size()); }
735 index_type index(dim_type ii) {
736 index_value_data& iv = idxval[ii];
737 index_type cnt = index_type((*iv.ppinc - iv.pincbase)/iv.nn);
738 return ((iv.pposbase[cnt]) % iv.mod)/ iv.div;
740 index_type vectorized_size()
const {
return vectorized_size_; }
741 const std::vector<stride_type>& vectorized_strides()
const {
return vectorized_strides_; }
742 bool next(
unsigned i_stop =
unsigned(-1),
unsigned i0_ =
unsigned(-2)) {
743 unsigned i0 = unsigned(i0_ ==
unsigned(-2) ? pr.size()-1 : i0_);
744 while (i0 != i_stop) {
745 for (
unsigned n = pr[i0].n; n < N; ++n) {
747 it[n] += *pr[i0].pinc; pr[i0].pinc++;
749 if (pr[i0].pinc != pr[i0].end) {
752 pr[i0].pinc = pr[i0].begin;
758 bool vnext() {
return next(
unsigned(-1), vectorized_pr_dim); }
759 bool bnext(dim_type b) {
return next(bloc_rank[b]-1, bloc_rank[b+1]-1); }
760 bool bnext_useful(dim_type b) {
return bloc_rank[b] != bloc_rank[b+1]; }
764 if (pr.size() == 0)
return false;
765 std::vector<packed_range>::reverse_iterator p_ = pr.rbegin();
766 while (p_!=pr.rend()) {
767 it[0] += *(p_->pinc++);
768 if (p_->pinc != p_->end) {
771 p_->pinc = p_->begin;
779 if (pr.size() == 0)
return false;
780 std::vector<packed_range>::reverse_iterator p_ = pr.rbegin();
781 while (p_!=pr.rend()) {
782 it[0] += *(p_->pinc++);
783 it[1] += *(p_->pinc++);
784 if (p_->pinc != p_->end) {
787 p_->pinc = p_->begin;
794 scalar_type& p(dim_type n) {
return *it[n]; }
796 multi_tensor_iterator() {}
797 multi_tensor_iterator(std::vector<tensor_ref> trtab,
bool with_index_values) {
798 init(trtab, with_index_values);
800 void assign(std::vector<tensor_ref> trtab,
bool with_index_values) {
801 multi_tensor_iterator m(trtab, with_index_values);
804 multi_tensor_iterator(
const tensor_ref& tr0,
bool with_index_values) {
805 std::vector<tensor_ref> trtab(1); trtab[0] = tr0;
806 init(trtab, with_index_values);
808 void assign(
const tensor_ref& tr0,
bool with_index_values) {
809 multi_tensor_iterator m(tr0, with_index_values);
812 multi_tensor_iterator(
const tensor_ref& tr0,
813 const tensor_ref& tr1,
bool with_index_values) {
814 std::vector<tensor_ref> trtab(2); trtab[0] = tr0; trtab[1] = tr1;
815 init(trtab, with_index_values);
817 void assign(
const tensor_ref& tr0,
const tensor_ref& tr1,
bool with_index_values) {
818 multi_tensor_iterator m(tr0, tr1, with_index_values);
821 multi_tensor_iterator(
const tensor_ref& tr0,
822 const tensor_ref& tr1,
823 const tensor_ref& tr2,
bool with_index_values) {
824 std::vector<tensor_ref> trtab(3); trtab[0] = tr0; trtab[1] = tr1; trtab[2] = tr2;
825 init(trtab, with_index_values);
827 void assign(
const tensor_ref& tr0,
const tensor_ref& tr1,
const tensor_ref& tr2,
bool with_index_values) {
828 multi_tensor_iterator m(tr0, tr1, tr2, with_index_values);
831 void init(std::vector<tensor_ref> trtab,
bool with_index_values);
842 struct tensor_reduction {
843 struct tref_or_reduction {
845 std::shared_ptr<tensor_reduction> reduction;
846 tensor_ref &tr() {
return tr_; }
847 const tensor_ref &tr()
const {
return tr_; }
848 explicit tref_or_reduction(
const tensor_ref &tr__,
const std::string& s)
849 : tr_(tr__), ridx(s) {}
850 explicit tref_or_reduction(
const std::shared_ptr<tensor_reduction> &p,
const std::string& s)
851 : reduction(p), ridx(s) {
852 reduction->result(tr_);
854 bool is_reduction()
const {
return reduction != 0; }
855 void swap(tref_or_reduction &other) { tr_.swap(other.tr_); std::swap(reduction, other.reduction); }
858 std::vector<dim_type> gdim;
862 std::vector<dim_type> rdim;
867 tensor_ranges reduced_range;
868 std::string reduction_chars;
870 typedef std::vector<tref_or_reduction>::iterator trtab_iterator;
871 std::vector<tref_or_reduction> trtab;
872 multi_tensor_iterator mti;
873 std::vector<scalar_type> out_data;
876 tensor_reduction() {
clear(); }
877 virtual ~tensor_reduction() {
clear(); }
885 static void diag_shape(tensor_shape& ts,
const std::string& s) {
886 for (index_type i=0; i < s.length(); ++i) {
888 if (s[i] !=
' ' && pos != i)
889 ts = ts.diag_shape(tensor_mask::Diagonal(dim_type(pos),dim_type(i)));
893 void insert(
const tensor_ref& tr_,
const std::string& s);
894 void prepare(
const tensor_ref* tr_out = NULL);
896 void result(tensor_ref& res)
const {
898 res.remove_unused_dimensions();
901 void insert(
const tref_or_reduction& tr_,
const std::string& s);
902 void update_reduction_chars();
904 void make_sub_reductions();
905 size_type find_best_sub_reduction(dal::bit_vector &best_lst, std::string &best_idxset);
defines and typedefs for namespace bgeot
Provide a dynamic bit container.
void clear(L &l)
clear (fill with zeros) a vector or matrix.
Definition of basic exceptions.
gmm::uint16_type short_type
used as the common short type integer in the library
std::ostream & operator<<(std::ostream &o, const convex_structure &cv)
Print the details of the convex structure cvs to the output stream o.
size_t size_type
used as the common size type in the library