vmmlib  1.7.0
 All Classes Namespaces Functions Pages
tucker3_tensor.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Susanne Suter
34  * @author Jonas Boesch
35  *
36  * The Tucker3 tensor class is consists of the same components (core tensor, basis matrices u1-u3) as the tucker3 model described in:
37  * - Tucker, 1966: Some mathematical notes on three-mode factor analysis, Psychometrika.
38  * - De Lathauwer, De Moor, Vandewalle, 2000a: A multilinear singular value decomposition, SIAM J. Matrix Anal. Appl.
39  * - De Lathauwer, De Moor, Vandewalle, 2000b: On the Best rank-1 and Rank-(R_1, R_2, ..., R_N) Approximation and Applications of Higher-Order Tensors, SIAM J. Matrix Anal. Appl.
40  * - Kolda & Bader, 2009: Tensor Decompositions and Applications, SIAM Review.
41  *
42  * see also quantized Tucker3 tensor (qtucker3_tensor.hpp)
43  */
44 
45 #ifndef __VMML__TUCKER3_TENSOR__HPP__
46 #define __VMML__TUCKER3_TENSOR__HPP__
47 
48 #include <vmmlib/t3_hooi.hpp>
49 #include <vmmlib/t3_ihooi.hpp>
50 
51 
52 namespace vmml {
53 
54  template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value = float, typename T_coeff = double >
56  public:
57  typedef float T_internal;
58 
61 
67 
73 
74  static const size_t SIZE = R1*R2*R3 + I1*R1 + I2*R2 + I3*R3;
75 
78  tucker3_tensor(t3_core_type& core, u1_type& U1, u2_type& U2, u3_type& U3);
79  tucker3_tensor(const t3_type& data_, u1_type& U1, u2_type& U2, u3_type& U3);
80  tucker3_tensor(const tucker3_type& other);
81  ~tucker3_tensor();
82 
83  void set_core(t3_core_type& core) {
84  _core = t3_core_type(core);
85  _core_comp.cast_from(core);
86  };
87 
88  void set_u1(u1_type& U1) {
89  *_u1 = U1;
90  _u1_comp->cast_from(U1);
91  };
92 
93  void set_u2(u2_type& U2) {
94  *_u2 = U2;
95  _u2_comp->cast_from(U2);
96  };
97 
98  void set_u3(u3_type& U3) {
99  *_u3 = U3;
100  _u3_comp->cast_from(U3);
101  };
102 
103  t3_core_type get_core() const {
104  return _core;
105  }; //be careful: creates copy!
106 
107  void get_core(t3_core_type& data_) const {
108  data_ = _core;
109  };
110 
111  void get_u1(u1_type& U1) const {
112  U1 = *_u1;
113  };
114 
115  void get_u2(u2_type& U2) const {
116  U2 = *_u2;
117  };
118 
119  void get_u3(u3_type& U3) const {
120  U3 = *_u3;
121  };
122 
123  void set_core_comp(t3_core_comp_type& core) {
124  _core_comp = t3_core_comp_type(core);
125  _core.cast_from(_core_comp);
126  };
127 
128  void set_u1_comp(u1_comp_type& U1) {
129  *_u1_comp = U1;
130  _u1->cast_from(U1);
131  };
132 
133  void set_u2_comp(u2_comp_type& U2) {
134  *_u2_comp = U2;
135  _u2->cast_from(U2);
136  };
137 
138  void set_u3_comp(u3_comp_type& U3) {
139  *_u3_comp = U3;
140  _u3->cast_from(U3);
141  };
142 
143  void get_core_comp(t3_core_comp_type& data_) const {
144  data_ = _core_comp;
145  };
146 
147  void get_u1_comp(u1_comp_type& U1) const {
148  U1 = *_u1_comp;
149  };
150 
151  void get_u2_comp(u2_comp_type& U2) const {
152  U2 = *_u2_comp;
153  };
154 
155  void get_u3_comp(u3_comp_type& U3) const {
156  U3 = *_u3_comp;
157  };
158 
159  //get number of nonzeros for tensor decomposition
160  size_t nnz() const;
161  size_t nnz(const T_value& threshold) const;
162  size_t nnz_core() const;
163  size_t size_core() const;
164 
165  size_t size() const {
166  return SIZE;
167  };
168 
169  void threshold_core(const size_t& nnz_core_, size_t& nnz_core_is_);
170  void threshold_core(const T_coeff& threshold_value_, size_t& nnz_core_);
171 
172  template< size_t J1, size_t J2, size_t J3 >
174  set_sub_core(const tensor3<J1, J2, J3, T_coeff >& sub_data_,
175  size_t row_offset, size_t col_offset, size_t slice_offset);
176 
177  void reconstruct(t3_type& data_);
178  double error(t3_type& original) const;
179 
180  template< typename T_init>
181  tensor_stats decompose(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 10);
182 
183  template< typename T_init>
184  tensor_stats tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04);
185 
186  template< size_t NBLOCKS, typename T_init>
187  tensor_stats i_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04);
188 
189  template< size_t R, size_t NBLOCKS, typename T_init>
190  tensor_stats i_cp_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04);
191 
192  void als_rand(const t3_type& data_);
193  //template< typename T_init>
194  // void tucker_als(const t3_type& data_, T_init init);
195  // template< size_t NBLOCKS, typename T_init >
196  // void incr_block_diag_als(const t3_type& data_, T_init init);
197  // void incr_block_diag_als( T_init init );
198 
199 
200  template< size_t K1, size_t K2, size_t K3>
201  void reduce_ranks(const tucker3_tensor< K1, K2, K3, I1, I2, I3, T_value, T_coeff >& other); //call TuckerJI.reduce_ranks(TuckerKI) K1 -> R1, K2 -> R2, K3 -> R3
202 
203  template< size_t K1, size_t K2, size_t K3>
204  void subsampling(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor);
205 
206  template< size_t K1, size_t K2, size_t K3>
207  void subsampling_on_average(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor);
208 
209  template< size_t K1, size_t K2, size_t K3>
210  void region_of_interest(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other,
211  const size_t& start_index1, const size_t& end_index1,
212  const size_t& start_index2, const size_t& end_index2,
213  const size_t& start_index3, const size_t& end_index3);
214 
215  friend std::ostream& operator <<(std::ostream& os, const tucker3_type& t3) {
216  t3_core_type core;
217  t3.get_core(core);
218  u1_type* u1 = new u1_type;
219  t3.get_u1(*u1);
220  u2_type* u2 = new u2_type;
221  t3.get_u2(*u2);
222  u3_type* u3 = new u3_type;
223  t3.get_u3(*u3);
224 
225  os << "U1: " << std::endl << *u1 << std::endl
226  << "U2: " << std::endl << *u2 << std::endl
227  << "U3: " << std::endl << *u3 << std::endl
228  << "core: " << std::endl << core << std::endl;
229 
230  delete u1;
231  delete u2;
232  delete u3;
233  return os;
234  }
235 
236  void cast_members();
237  void cast_comp_members();
238 
239  protected:
240 
241  tucker3_type operator=(const tucker3_type&) {
242  return (*this);
243  };
244 
245  private:
246  //t3_core_type* _core ;
247  u1_type* _u1;
248  u2_type* _u2;
249  u3_type* _u3;
250  t3_core_type _core;
251 
252  //used only internally for computations to have a higher precision
253  t3_core_comp_type _core_comp;
254  u1_comp_type* _u1_comp;
255  u2_comp_type* _u2_comp;
256  u3_comp_type* _u3_comp;
257 
258  }; // class tucker3_tensor
259 
260 
261 
262 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff >
263 #define VMML_TEMPLATE_CLASSNAME tucker3_tensor< R1, R2, R3, I1, I2, I3, T_value, T_coeff >
264 
265  VMML_TEMPLATE_STRING
266  VMML_TEMPLATE_CLASSNAME::tucker3_tensor() {
267  _core.zero();
268  _u1 = new u1_type();
269  _u1->zero();
270  _u2 = new u2_type();
271  _u2->zero();
272  _u3 = new u3_type();
273  _u3->zero();
274  _core_comp.zero();
275  _u1_comp = new u1_comp_type();
276  _u1_comp->zero();
277  _u2_comp = new u2_comp_type();
278  _u2_comp->zero();
279  _u3_comp = new u3_comp_type();
280  _u3_comp->zero();
281  }
282 
283  VMML_TEMPLATE_STRING
284  VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core) {
285  _core = core;
286  _u1 = new u1_type();
287  _u1->zero();
288  _u2 = new u2_type();
289  _u2->zero();
290  _u3 = new u3_type();
291  _u3->zero();
292  _u1_comp = new u1_comp_type();
293  _u1_comp->zero();
294  _u2_comp = new u2_comp_type();
295  _u2_comp->zero();
296  _u3_comp = new u3_comp_type();
297  _u3_comp->zero();
298  _core_comp.cast_from(core);
299  }
300 
301  VMML_TEMPLATE_STRING
302  VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core, u1_type& U1, u2_type& U2, u3_type& U3) {
303  _core = core;
304  _u1 = new u1_type(U1);
305  _u2 = new u2_type(U2);
306  _u3 = new u3_type(U3);
307  _u1_comp = new u1_comp_type();
308  _u2_comp = new u2_comp_type();
309  _u3_comp = new u3_comp_type();
310  cast_comp_members();
311  }
312 
313  VMML_TEMPLATE_STRING
314  VMML_TEMPLATE_CLASSNAME::tucker3_tensor(const t3_type& data_, u1_type& U1, u2_type& U2, u3_type& U3) {
315  _u1 = new u1_type(U1);
316  _u2 = new u2_type(U2);
317  _u3 = new u3_type(U3);
318  _u1_comp = new u1_comp_type();
319  _u2_comp = new u2_comp_type();
320  _u3_comp = new u3_comp_type();
321 
322  t3_hooi_type::derive_core_orthogonal_bases(data_, *_u1, *_u2, *_u3, _core);
323 
324  cast_comp_members();
325  }
326 
327  VMML_TEMPLATE_STRING
328  VMML_TEMPLATE_CLASSNAME::tucker3_tensor(const tucker3_type& other) {
329  _u1 = new u1_type();
330  _u2 = new u2_type();
331  _u3 = new u3_type();
332  _u1_comp = new u1_comp_type();
333  _u2_comp = new u2_comp_type();
334  _u3_comp = new u3_comp_type();
335 
336  other.get_core(_core);
337  other.get_u1(*_u1);
338  other.get_u2(*_u2);
339  other.get_u3(*_u3);
340 
341  cast_comp_members();
342  }
343 
344  VMML_TEMPLATE_STRING
345  void
346  VMML_TEMPLATE_CLASSNAME::cast_members() {
347  _u1->cast_from(*_u1_comp);
348  _u2->cast_from(*_u2_comp);
349  _u3->cast_from(*_u3_comp);
350  _core.cast_from(_core_comp);
351  }
352 
353  VMML_TEMPLATE_STRING
354  void
355  VMML_TEMPLATE_CLASSNAME::cast_comp_members() {
356  _u1_comp->cast_from(*_u1);
357  _u2_comp->cast_from(*_u2);
358  _u3_comp->cast_from(*_u3);
359  _core_comp.cast_from(_core);
360  }
361 
362  VMML_TEMPLATE_STRING
363  size_t
364  VMML_TEMPLATE_CLASSNAME::nnz_core() const {
365  return _core_comp.nnz();
366  }
367 
368  VMML_TEMPLATE_STRING
369  size_t
370  VMML_TEMPLATE_CLASSNAME::size_core() const {
371  return _core_comp.size();
372  }
373 
374  VMML_TEMPLATE_STRING
375  VMML_TEMPLATE_CLASSNAME::~tucker3_tensor() {
376  delete _u1;
377  delete _u2;
378  delete _u3;
379  delete _u1_comp;
380  delete _u2_comp;
381  delete _u3_comp;
382  }
383 
384  VMML_TEMPLATE_STRING
385  void
386  VMML_TEMPLATE_CLASSNAME::reconstruct(t3_type& data_) {
387  t3_comp_type data;
388  data.cast_from(data_);
389  t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data);
390 
391  // TODO have a closer look at this. It's mangling the error computation (after the cast, frobenius_norm() changes substantially)
392  //convert reconstructed data, which is in type T_internal (double, float) to T_value (uint8 or uint16)
393  if ((sizeof (T_value) == 1) || (sizeof (T_value) == 2)) {
394  data_.float_t_to_uint_t(data);
395  } else {
396  data_.cast_from(data);
397  }
398 
399  }
400 
401  VMML_TEMPLATE_STRING
402  double
403  VMML_TEMPLATE_CLASSNAME::error(t3_type& original) const {
404 
405  t3_comp_type data;
406  t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data);
407 
408  double err = data.frobenius_norm(original) / original.frobenius_norm() * 100;
409  return err;
410  }
411 
412  VMML_TEMPLATE_STRING
413  void
414  VMML_TEMPLATE_CLASSNAME::threshold_core(const size_t& nnz_core_, size_t& nnz_core_is_) {
415  nnz_core_is_ = _core_comp.nnz();
416  T_coeff threshold_value = 0.00001;
417  while (nnz_core_is_ > nnz_core_) {
418  _core_comp.threshold(threshold_value);
419  nnz_core_is_ = _core_comp.nnz();
420 
421  //threshold value scheme
422  if (threshold_value < 0.01) {
423  threshold_value *= 10;
424  } else if (threshold_value < 0.2) {
425  threshold_value += 0.05;
426  } else if (threshold_value < 1) {
427  threshold_value += 0.25;
428  } else if (threshold_value < 10) {
429  threshold_value += 1;
430  } else if (threshold_value < 50) {
431  threshold_value += 10;
432  } else if (threshold_value < 200) {
433  threshold_value += 50;
434  } else if (threshold_value < 500) {
435  threshold_value += 100;
436  } else if (threshold_value < 2000) {
437  threshold_value += 500;
438  } else if (threshold_value < 5000) {
439  threshold_value += 3000;
440  } else if (threshold_value >= 5000) {
441  threshold_value += 5000;
442  }
443  }
444  _core.cast_from(_core_comp);
445  }
446 
447  VMML_TEMPLATE_STRING
448  void
449  VMML_TEMPLATE_CLASSNAME::threshold_core(const T_coeff& threshold_value_, size_t& nnz_core_) {
450  _core_comp.threshold(threshold_value_);
451  nnz_core_ = _core_comp.nnz();
452  _core.cast_from(_core_comp);
453  }
454 
455  VMML_TEMPLATE_STRING
456  template< size_t J1, size_t J2, size_t J3 >
457  typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type*
458  VMML_TEMPLATE_CLASSNAME::
459  set_sub_core(const tensor3<J1, J2, J3, T_coeff >& sub_data_,
460  size_t row_offset, size_t col_offset, size_t slice_offset) {
461  _core_comp.set_sub_tensor3(sub_data_, row_offset, col_offset, slice_offset);
462  _core.cast_from(_core_comp);
463  }
464 
465  VMML_TEMPLATE_STRING
466  void
467  VMML_TEMPLATE_CLASSNAME::als_rand(const t3_type& data_) {
468  typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type;
469  tucker_als(data_, typename hooi_type::init_random());
470  }
471 
472 
473  VMML_TEMPLATE_STRING
474  template< typename T_init>
475  tensor_stats
476  VMML_TEMPLATE_CLASSNAME::decompose(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) {
477  return tucker_als(data_, init, max_iterations, tolerance);
478  }
479 
480  VMML_TEMPLATE_STRING
481  template< typename T_init>
482  tensor_stats
483  VMML_TEMPLATE_CLASSNAME::tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) {
484  tensor_stats result;
485 
486  t3_comp_type data;
487  data.cast_from(data_);
488 
489  typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type;
490  result += hooi_type::als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
491 
492  cast_members();
493 
494  return result;
495  }
496 
497  VMML_TEMPLATE_STRING
498  template< size_t NBLOCKS, typename T_init>
499  tensor_stats
500  VMML_TEMPLATE_CLASSNAME::i_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) {
501  tensor_stats result;
502 
503  t3_comp_type data;
504  data.cast_from(data_);
505 
506  typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type;
507  result += ihooi_type::i_als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
508 
509  cast_members();
510 
511  return result;
512  }
513 
514  VMML_TEMPLATE_STRING
515  template< size_t R, size_t NBLOCKS, typename T_init>
516  tensor_stats
517  VMML_TEMPLATE_CLASSNAME::i_cp_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) {
518  tensor_stats result;
519 
520  t3_comp_type data;
521  data.cast_from(data_);
522 
523  typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type;
524  result += ihooi_type::template i_cp_als < R > (data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
525 
526  cast_members();
527 
528  return result;
529  }
530 
531  VMML_TEMPLATE_STRING
532  template< size_t K1, size_t K2, size_t K3>
533  void
534  VMML_TEMPLATE_CLASSNAME::reduce_ranks(const tucker3_tensor< K1, K2, K3, I1, I2, I3, T_value, T_coeff >& other)
535  //TuckerJI.rank_reduction(TuckerKI) K1 -> R1, K2 -> R2, K3 -> R3; I1, I2, I3 stay the same
536  {
537  assert(R1 <= K1);
538  assert(R2 <= K2);
539  assert(R3 <= K3);
540 
541  //reduce basis matrices
542  matrix< I1, K1, T_coeff >* u1 = new matrix< I1, K1, T_coeff > ();
543  other.get_u1(*u1);
544  for (size_t r1 = 0; r1 < R1; ++r1) {
545  _u1->set_column(r1, u1->get_column(r1));
546  }
547 
548  matrix< I2, K2, T_coeff >* u2 = new matrix< I2, K2, T_coeff > ();
549  other.get_u2(*u2);
550  for (size_t r2 = 0; r2 < R2; ++r2) {
551  _u2->set_column(r2, u2->get_column(r2));
552  }
553 
554  matrix< I3, K3, T_coeff >* u3 = new matrix< I3, K3, T_coeff > ();
555  other.get_u3(*u3);
556  for (size_t r3 = 0; r3 < R3; ++r3) {
557  _u3->set_column(r3, u3->get_column(r3));
558  }
559 
560  //reduce core
561  tensor3<K1, K2, K3, T_coeff > other_core;
562  other.get_core(other_core);
563 
564  for (size_t r3 = 0; r3 < R3; ++r3) {
565  for (size_t r1 = 0; r1 < R1; ++r1) {
566  for (size_t r2 = 0; r2 < R2; ++r2) {
567  _core.at(r1, r2, r3) = other_core.at(r1, r2, r3);
568  }
569  }
570  }
571 
572 
573  cast_comp_members();
574 
575  delete u1;
576  delete u2;
577  delete u3;
578  }
579 
580  VMML_TEMPLATE_STRING
581  template< size_t K1, size_t K2, size_t K3>
582  void
583  VMML_TEMPLATE_CLASSNAME::subsampling(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor) {
584  assert(I1 <= K1);
585  assert(I1 <= K2);
586  assert(I1 <= K3);
587 
588  //subsample basis matrices
589  matrix< K1, R1, T_coeff >* u1 = new matrix< K1, R1, T_coeff > ();
590  other.get_u1(*u1);
591  for (size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) {
592  _u1->set_row(i, u1->get_row(i1));
593  }
594 
595  matrix< K2, R2, T_coeff >* u2 = new matrix< K2, R2, T_coeff > ();
596  other.get_u2(*u2);
597  for (size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) {
598  _u2->set_row(i, u2->get_row(i2));
599  }
600 
601  matrix< K3, R3, T_coeff >* u3 = new matrix< K3, R3, T_coeff > ();
602  other.get_u3(*u3);
603  for (size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) {
604  _u3->set_row(i, u3->get_row(i3));
605  }
606 
607  other.get_core(_core);
608 
609  cast_comp_members();
610  delete u1;
611  delete u2;
612  delete u3;
613  }
614 
615  VMML_TEMPLATE_STRING
616  template< size_t K1, size_t K2, size_t K3>
617  void
618  VMML_TEMPLATE_CLASSNAME::subsampling_on_average(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor) {
619  assert(I1 <= K1);
620  assert(I1 <= K2);
621  assert(I1 <= K3);
622 
623 
624  //subsample basis matrices
625  matrix< K1, R1, T_coeff >* u1 = new matrix< K1, R1, T_coeff > ();
626  other.get_u1(*u1);
627  for (size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) {
628  vector< R1, T_internal > tmp_row = u1->get_row(i1);
629  T_internal num_items_averaged = 1;
630  for (size_t j = i1 + 1; (j < (factor + i1)) & (j < K1); ++j, ++num_items_averaged)
631  tmp_row += u1->get_row(j);
632 
633  tmp_row /= num_items_averaged;
634  _u1->set_row(i, tmp_row);
635  }
636 
637  matrix< K2, R2, T_coeff >* u2 = new matrix< K2, R2, T_coeff > ();
638  other.get_u2(*u2);
639  for (size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) {
640  vector< R2, T_internal > tmp_row = u2->get_row(i2);
641  T_internal num_items_averaged = 1;
642  for (size_t j = i2 + 1; (j < (factor + i2)) & (j < K2); ++j, ++num_items_averaged)
643  tmp_row += u2->get_row(j);
644 
645  tmp_row /= num_items_averaged;
646  _u2->set_row(i, u2->get_row(i2));
647  }
648 
649  matrix< K3, R3, T_coeff >* u3 = new matrix< K3, R3, T_coeff > ();
650  other.get_u3(*u3);
651  for (size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) {
652  vector< R3, T_internal > tmp_row = u3->get_row(i3);
653  T_internal num_items_averaged = 1;
654  for (size_t j = i3 + 1; (j < (factor + i3)) & (j < K3); ++j, ++num_items_averaged)
655  tmp_row += u3->get_row(j);
656 
657  tmp_row /= num_items_averaged;
658  _u3->set_row(i, u3->get_row(i3));
659  }
660 
661  other.get_core(_core);
662  cast_comp_members();
663  delete u1;
664  delete u2;
665  delete u3;
666  }
667 
668  VMML_TEMPLATE_STRING
669  template< size_t K1, size_t K2, size_t K3>
670  void
671  VMML_TEMPLATE_CLASSNAME::region_of_interest(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other,
672  const size_t& start_index1, const size_t& end_index1,
673  const size_t& start_index2, const size_t& end_index2,
674  const size_t& start_index3, const size_t& end_index3) {
675  assert(I1 <= K1);
676  assert(I1 <= K2);
677  assert(I1 <= K3);
678  assert(start_index1 < end_index1);
679  assert(start_index2 < end_index2);
680  assert(start_index3 < end_index3);
681  assert(end_index1 < K1);
682  assert(end_index2 < K2);
683  assert(end_index3 < K3);
684 
685  //region_of_interes of basis matrices
686  matrix< K1, R1, T_internal >* u1 = new matrix< K1, R1, T_internal > ();
687  other.get_u1_comp(*u1);
688  for (size_t i1 = start_index1, i = 0; i1 < end_index1; ++i1, ++i) {
689  _u1_comp->set_row(i, u1->get_row(i1));
690  }
691 
692  matrix< K2, R2, T_internal>* u2 = new matrix< K2, R2, T_internal > ();
693  other.get_u2_comp(*u2);
694  for (size_t i2 = start_index2, i = 0; i2 < end_index2; ++i2, ++i) {
695  _u2_comp->set_row(i, u2->get_row(i2));
696  }
697 
698  matrix< K3, R3, T_internal >* u3 = new matrix< K3, R3, T_internal > ();
699  other.get_u3_comp(*u3);
700  for (size_t i3 = start_index3, i = 0; i3 < end_index3; ++i3, ++i) {
701  _u3_comp->set_row(i, u3->get_row(i3));
702  }
703 
704  other.get_core_comp(_core_comp);
705 
706  //cast_comp_members();
707  delete u1;
708  delete u2;
709  delete u3;
710  }
711 
712  VMML_TEMPLATE_STRING
713  size_t
714  VMML_TEMPLATE_CLASSNAME::nnz() const {
715  size_t counter = 0;
716 
717  counter += _u1_comp->nnz();
718  counter += _u2_comp->nnz();
719  counter += _u3_comp->nnz();
720  counter += _core_comp.nnz();
721 
722  return counter;
723  }
724 
725  VMML_TEMPLATE_STRING
726  size_t
727  VMML_TEMPLATE_CLASSNAME::nnz(const T_value& threshold) const {
728  size_t counter = 0;
729 
730  counter += _u1_comp->nnz(threshold);
731  counter += _u2_comp->nnz(threshold);
732  counter += _u3_comp->nnz(threshold);
733  counter += _core_comp.nnz(threshold);
734 
735  return counter;
736  }
737 
738 
739 #undef VMML_TEMPLATE_STRING
740 #undef VMML_TEMPLATE_CLASSNAME
741 
742 } // namespace vmml
743 
744 #endif