vmmlib  1.7.0
 All Classes Namespaces Functions Pages
tensor3.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Susanne Suter
34  * @author Jonas Boesch
35  *
36  * a tensor is a generalization of a multidimensional array
37  * a tensor3 is a tensor data structure with three modes I1, I2 and I3
38  */
39 
40 #ifndef __VMML__TENSOR3__HPP__
41 #define __VMML__TENSOR3__HPP__
42 
43 #include <fstream> // file I/O
44 #include <vmmlib/tensor3_iterator.hpp>
45 #include <vmmlib/enable_if.hpp>
46 #include <vmmlib/blas_dot.hpp>
47 #include <fcntl.h>
48 #include <limits>
49 #ifdef VMMLIB_USE_OPENMP
50 #include <omp.h>
51 #endif
52 #undef min
53 #undef max
54 
55 
56 namespace vmml {
57 
58  // tensor with three modes, containing a series I3 of I1 x I2 vmml matrices
59  //I1 is number of rows, I2 is number of columns and I3 is number of tubes
60 
61  template< size_t I1, size_t I2, size_t I3, typename T = float >
62  class tensor3 {
63  public:
64  typedef T value_type;
65  typedef T* pointer;
66  typedef T& reference;
67  typedef float T_blas;
68 
69  typedef typename matrix< I1, I2, T>::iterator matrix_iterator;
70 
73 
76 
77  typedef matrix< I1, I2, T > front_slice_type; //fwd: forward cylcling (after kiers, 2000)
80 
84 
85  typedef matrix< I2, I1, T > bwd_front_slice_type; //bwd: backward cylcling (after lathauwer et al., 2000a)
88 
92 
93 
94  static const size_t ROWS = I1;
95  static const size_t COLS = I2;
96  static const size_t SLICES = I3;
97  static const size_t MATRIX_SIZE = I1 * I2;
98  static const size_t SIZE = MATRIX_SIZE * I3;
99 
100 
101  static size_t get_array_size_in_bytes();
102 
103  // WARNING: dangerous. Use before destruction if you want to prevent
104  // a delete call for the assigned T* _array in the destructor.
105  void clear_array_pointer();
106 
107  // accessors
108  inline T& operator()(size_t i1, size_t i2, size_t i3);
109  inline const T& operator()(size_t i1, size_t i2, size_t i3) const;
110 
111  inline T& at(size_t i1, size_t i2, size_t i3);
112  inline const T& at(size_t i1, size_t i2, size_t i3) const;
113 
114  // element iterators - NOTE: column-major order
115  iterator begin();
116  iterator end();
117 
118  const_iterator begin() const;
119  const_iterator end() const;
120 
121 #if 0
122  reverse_iterator rbegin();
123  reverse_iterator rend();
124  const_reverse_iterator rbegin() const;
125  const_reverse_iterator rend() const;
126 #endif
127 
128  // ctors
129  tensor3();
130 
131  explicit tensor3(void* memory);
132 
133  tensor3(const tensor3& source);
134 
135  template< typename U >
136  tensor3(const tensor3< I1, I2, I3, U >& source_);
137 
138  template< size_t J1, size_t J2, size_t J3>
139  tensor3(const tensor3< J1, J2, J3, T >& source_);
140 
141  ~tensor3();
142 
143  size_t size() const; // return I1 * I2 * I3;
144 
145  template< size_t J1, size_t J2, size_t J3 >
147  get_sub_tensor3(size_t row_offset, size_t col_offset, size_t slice_offset = 0,
148  typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type* = 0) const;
149 
150  template< size_t J1, size_t J2, size_t J3 >
152  get_sub_tensor3(tensor3<J1, J2, J3, T >& result,
153  size_t row_offset = 0, size_t col_offset = 0, size_t slice_offset = 0) const;
154 
155  template< size_t J1, size_t J2, size_t J3 >
157  set_sub_tensor3(const tensor3<J1, J2, J3, T >& sub_data_,
158  size_t row_offset = 0, size_t col_offset = 0, size_t slice_offset = 0);
159 
160  void get_sub_tensor3( char* data_,
161  const size_t i1_start, const size_t i1_end,
162  const size_t i2_start, const size_t i2_end,
163  const size_t i3_start, const size_t i3_end ) const;
164 
165  void set_sub_tensor3( const char* data_,
166  const size_t i1_start, const size_t i1_end,
167  const size_t i2_start, const size_t i2_end,
168  const size_t i3_start, const size_t i3_end );
169 
170  inline void get_I1_vector(size_t i2, size_t i3, vector< I1, T >& data) const; // I1_vector is a column vector with all values i1 at i2 and i3
171  inline void get_I2_vector(size_t i1, size_t i3, vector< I2, T >& data) const; // I2_vector is a row vector with all values i2 at i1 and i3
172  inline void get_I3_vector(size_t i1, size_t i2, vector< I3, T >& data) const; // I3_vector is a vector with all values i3 at a given i1 and i2
173 
174  inline void get_row(size_t i1, size_t i3, vector< I2, T >& data) const; // same as get_I2_vector
175  inline void get_column(size_t i2, size_t i3, vector< I1, T >& data) const; // same as get_I1_vector
176  inline void get_tube(size_t i1, size_t i2, vector< I3, T >& data) const; // same as get_I3_vector
177 
178  inline void set_I1_vector(size_t i2, size_t i3, const vector< I1, T >& data); // I1_vector is a column vector with all values i1 at i2 and i3
179  inline void set_I2_vector(size_t i1, size_t i3, const vector< I2, T >& data); // I2_vector is a row vector with all values i2 at i1 and i3
180  inline void set_I3_vector(size_t i1, size_t i2, const vector< I3, T >& data); // I3_vector is a vector with all values i3 at a given i1 and i2
181 
182  inline void set_row(size_t i1, size_t i3, const vector< I2, T >& data); // same as set_I2_vector
183  inline void set_column(size_t i2, size_t i3, const vector< I1, T >& data); // same as set_I1_vector
184  inline void set_tube(size_t i1, size_t i2, const vector< I3, T >& data); // same as set_I3_vector
185 
186  inline void get_frontal_slice_fwd(size_t i3, front_slice_type& data) const;
187  inline void get_lateral_slice_bwd(size_t i2, bwd_lat_slice_type& data) const;
188  inline void get_horizontal_slice_fwd(size_t i1, horiz_slice_type& data) const;
189 
190  inline void get_frontal_slice_bwd(size_t i3, bwd_front_slice_type& data) const;
191  inline void get_lateral_slice_fwd(size_t i2, lat_slice_type& data) const;
192  inline void get_horizontal_slice_bwd(size_t i1, bwd_horiz_slice_type& data) const;
193 
194  inline void set_frontal_slice_fwd(size_t i3, const front_slice_type& data);
195  inline void set_lateral_slice_bwd(size_t i2, const bwd_lat_slice_type& data);
196  inline void set_horizontal_slice_fwd(size_t i1, const horiz_slice_type& data);
197 
198  inline void set_frontal_slice_bwd(size_t i3, const bwd_front_slice_type& data);
199  inline void set_lateral_slice_fwd(size_t i2, const lat_slice_type& data);
200  inline void set_horizontal_slice_bwd(size_t i1, const bwd_horiz_slice_type& data);
201 
202  inline front_slice_type& get_frontal_slice_fwd(size_t index);
203  inline const front_slice_type& get_frontal_slice_fwd(size_t index) const;
204 
205  // sets all elements to fill_value
206  void operator=(T fill_value); //@SUS: todo
207  void fill(T fill_value); //special case of set method (all values are set to the same value!)
208 
209  //sets all tensor values with random values
210  //set srand(time(NULL)) or srand( seed )
211  //if seed is set to -1, srand( seed ) was set outside set_random
212  //otherwise srand( seed ) will be called with the given seed
213  void fill_random(int seed = -1);
214  void fill_random_signed(int seed = -1);
215  void fill_increasing_values();
216  void fill_rand_sym_slices(int seed = -1);
217  void fill_rand_sym(int seed = -1);
218 
219  const tensor3& operator=(const tensor3& source_);
220 
221  template< size_t R >
222  typename enable_if< R == I1 && R == I2 && R == I3 >::type*
223  diag(const vector< R, T >& diag_values_);
224 
225  void range_threshold(tensor3< I1, I2, I3, T >& other_, const T& start_value, const T& end_value) const;
226 
227  template< size_t K1, size_t K2, size_t K3 >
228  void average_8to1(tensor3< K1, K2, K3, T >& other) const;
229 
230 
231  // note: this function copies elements until either the matrix is full or
232  // the iterator equals end_.
233  template< typename input_iterator_t >
234  void set(input_iterator_t begin_, input_iterator_t end_,
235  bool row_major_layout = true);
236  void zero();
237 
238  T get_min() const;
239  T get_max() const;
240  T get_abs_min() const;
241  T get_abs_max() const;
242 
243  //returns number of non-zeros
244  size_t nnz() const;
245  size_t nnz(const T& threshold_) const;
246  void threshold(const T& threshold_value_);
247 
248  //note: move to t3_converter
249  template< typename TT >
250  void quantize(tensor3< I1, I2, I3, TT >& quantized_, T& min_value_, T& max_value_) const;
251  template< typename TT >
252  void quantize_to(tensor3< I1, I2, I3, TT >& quantized_, tensor3< I1, I2, I3, char >& signs_, T& min_value_, T& max_value_, const TT& tt_range_) const;
253  template< typename TT >
254  void quantize_to(tensor3< I1, I2, I3, TT >& quantized_, const T& min_value_, const T& max_value_) const;
255  template< typename TT >
256  void quantize_log(tensor3< I1, I2, I3, TT >& quantized_, tensor3< I1, I2, I3, char >& signs_, T& min_value_, T& max_value_, const TT& tt_range_) const;
257  template< typename TT >
258  void dequantize(tensor3< I1, I2, I3, TT >& dequantized_, const TT& min_value_, const TT& max_value_) const;
259  template< typename TT >
260  void dequantize_log(tensor3< I1, I2, I3, TT >& dequantized_, const tensor3< I1, I2, I3, char >& signs_, const TT& min_value_, const TT& max_value_) const;
261  template< typename TT >
262  void dequantize(tensor3< I1, I2, I3, TT >& dequantized_, const tensor3< I1, I2, I3, char >& signs_, const TT& min_value_, const TT& max_value_) const;
263 
264  bool operator==(const tensor3& other) const;
265  bool operator!=(const tensor3& other) const;
266 
267  // due to limited precision, two 'idential' tensor3 might seem different.
268  // this function allows to specify a tolerance when comparing matrices.
269  bool equals(const tensor3& other, T tolerance) const;
270  // this version takes a comparison functor to compare the components of
271  // the two tensor3 data structures
272  template< typename compare_t >
273  bool equals(const tensor3& other, compare_t& cmp) const;
274 
275 
276  //NOTE: moved tensor times matrix multiplications (TTM) to t3_ttm
277 
278  //apply spherical weights
279  template< typename float_t>
280  void apply_spherical_weights(tensor3< I1, I2, I3, float_t >& other);
281  void get_sphere();
282 
283  void horizontal_unfolding_bwd(bwd_horiz_unfolding_type& unfolding) const;
284  void horizontal_unfolding_fwd(fwd_horiz_unfolding_type& unfolding) const;
285  void lateral_unfolding_bwd(bwd_lat_unfolding_type& unfolding) const;
286  void lateral_unfolding_fwd(fwd_lat_unfolding_type& unfolding) const;
287  void frontal_unfolding_bwd(bwd_front_unfolding_type& unfolding) const;
288  void frontal_unfolding_fwd(fwd_front_unfolding_type& unfolding) const;
289 
290  void horizontal_folding_bwd(const bwd_horiz_unfolding_type& unfolding);
291  void lateral_folding_bwd(const bwd_lat_unfolding_type& unfolding);
292  void frontal_folding_bwd(const bwd_front_unfolding_type& unfolding);
293 
294 
295  // reconstruction of a Kruskal tensor => inversion of CP (Candecomp/Parafac)
296  // please note that the parameter U will be overwritten
297  // temp is simply a required workspace matrix, it can be empty or uninitialized
298  // but is passed as parameter to prevent potentially superfluous allocations.
299  template< size_t R >
300  void reconstruct_CP(const vmml::vector< R, T>& lambda,
302  const vmml::matrix< R, I2, T >& V,
303  const vmml::matrix< R, I3, T >& W,
305  ); //-> tensor outer product
306 
307 
308  template< size_t R, typename TT >
309  double tensor_inner_product(
310  const vmml::vector< R, TT>& lambda,
311  const vmml::matrix< I1, R, TT >& U,
312  const vmml::matrix< I2, R, TT >& V,
313  const vmml::matrix< I3, R, TT >& W) const;
314 
315  //error computation
316  double frobenius_norm() const;
317  double frobenius_norm(const tensor3< I1, I2, I3, T >& other) const;
318  double avg_frobenius_norm() const;
319  double mse(const tensor3< I1, I2, I3, T >& other) const; // mean-squared error
320  double rmse(const tensor3< I1, I2, I3, T >& other) const; //root mean-squared error
321  double compute_psnr(const tensor3< I1, I2, I3, T >& other, const T& max_value_) const; //peak signal-to-noise ratio
322  void mean(T& mean_) const;
323  double mean() const;
324  double variance() const;
325  double stdev() const;
326 
327  template< typename TT >
328  void cast_from(const tensor3< I1, I2, I3, TT >& other);
329 
330  template< size_t J1, size_t J2, size_t J3, typename TT >
331  void cast_from(const tensor3< J1, J2, J3, TT >& other, const long& slice_idx_start_ = 0);
332 
333 
334  template< typename TT >
335  void float_t_to_uint_t(const tensor3< I1, I2, I3, TT >& other);
336 
337  //note: these have been moved to t3_converter
338  //void write_to_raw( const std::string& dir_, const std::string& filename_ ) const;
339  // void read_from_raw( const std::string& dir_, const std::string& filename_ ) ;
340  // void write_datfile( const std::string& dir_, const std::string& filename_ ) const;
341  // void write_to_csv( const std::string& dir_, const std::string& filename_ ) const;
342  // void remove_normals_from_raw( const std::string& dir_, const std::string& filename_ ) ;
343  //void remove_uct_cylinder( const size_t radius_offset_, int seed_ = 0 ) ;
344 
345  inline tensor3 operator+(T scalar) const;
346  inline tensor3 operator-(T scalar) const;
347 
348  void operator+=(T scalar);
349  void operator-=(T scalar);
350 
351  inline tensor3 operator+(const tensor3& other) const;
352  inline tensor3 operator-(const tensor3& other) const;
353 
354  template< size_t J1, size_t J2, size_t J3>
356  operator+=(const tensor3< J1, J2, J3, T>& other);
357 
358  void operator+=(const tensor3& other);
359  void operator-=(const tensor3& other);
360 
361  //
362  // tensor3-scalar operations / scaling
363  //
364  tensor3 operator*(T scalar);
365  void operator*=(T scalar);
366 
367  tensor3 operator/(T scalar);
368  void operator/=(T scalar);
369 
370  //
371  // matrix-vector operations
372  //
373  // transform column vector by matrix ( vec = matrix * vec )
374  vector< I1, T > operator*(const vector< I2, T >& other) const;
375 
376  // transform column vector by matrix ( vec = matrix * vec )
377  // assume homogenous coords, e.g. vec3 = mat4x4 * vec3, with w = 1.0
378  template< size_t O >
379  vector< O, T > operator*(const vector< O, T >& vector_) const;
380 
381  inline tensor3< I1, I2, I3, T > operator-() const;
382  tensor3< I1, I2, I3, T > negate() const;
383 
384  friend std::ostream& operator <<(std::ostream& os, const tensor3< I1, I2, I3, T >& t3) {
385  for (size_t i = 0; i < I3; ++i) {
386  //os << t3.array[ i ] << "***" << std::endl;
387  os << t3.get_frontal_slice_fwd(i) << " *** " << std::endl;
388  }
389  return os;
390  }
391 
392 
393  // static members
394  static void tensor3_allocate_data(T*& array_);
395  static void tensor3_deallocate_data(T*& array_);
396 
397  static const tensor3< I1, I2, I3, T > ZERO;
398 
399  T* get_array_ptr();
400  const T* get_array_ptr() const;
401 
402  // computes the array index for direct access
403  inline size_t compute_index(size_t i1, size_t i2, size_t i3) const;
404 
405  protected:
406  front_slice_type& _get_slice(size_t index_);
407  const front_slice_type& _get_slice(size_t index_) const;
408 
409  T* _array;
410 
411  }; // class tensor3
412 
413 #define VMML_TEMPLATE_STRING template< size_t I1, size_t I2, size_t I3, typename T >
414 #define VMML_TEMPLATE_CLASSNAME tensor3< I1, I2, I3, T >
415 
416  // WARNING: make sure that memory is a pointer to a memory block of
417  // sufficient size (that is, is at least get_array_size_in_bytes())
418 
419  VMML_TEMPLATE_STRING
420  VMML_TEMPLATE_CLASSNAME::tensor3(void* memory)
421  : _array(reinterpret_cast<T*> (memory)) {
422  assert(_array);
423  }
424 
425  VMML_TEMPLATE_STRING
426  VMML_TEMPLATE_CLASSNAME::tensor3()
427  : _array() {
428  tensor3_allocate_data(_array);
429  }
430 
431  VMML_TEMPLATE_STRING
432  VMML_TEMPLATE_CLASSNAME::tensor3(const tensor3& source_)
433  : _array() {
434  tensor3_allocate_data(_array);
435  (*this) = source_;
436  }
437 
438  VMML_TEMPLATE_STRING
439  template< typename U >
440  VMML_TEMPLATE_CLASSNAME::tensor3(const tensor3< I1, I2, I3, U >& source_) {
441  const U* s_array = source_.get_array_ptr();
442  tensor3_allocate_data(_array);
443  for (size_t index = 0; index < I1 * I2 * I3; ++index) {
444  _array[ index ] = static_cast<T> (s_array[ index ]);
445  }
446  }
447 
448  VMML_TEMPLATE_STRING
449  template< size_t J1, size_t J2, size_t J3 >
450  VMML_TEMPLATE_CLASSNAME::tensor3(const tensor3< J1, J2, J3, T >& source_) {
451  const size_t minL = J1 < I1 ? J1 : I1;
452  const size_t minC = J2 < I2 ? J2 : I2;
453  const size_t minS = J3 < I3 ? J3 : I3;
454 
455  zero();
456 
457  for (size_t i = 0; i < minL; i++) {
458  for (size_t j = 0; j < minC; j++) {
459  for (size_t k = 0; k < minS; k++) {
460  at(i, j, k) = source_(i, j, k);
461  }
462  }
463  }
464  }
465 
466  VMML_TEMPLATE_STRING
467  VMML_TEMPLATE_CLASSNAME::~tensor3() {
468  tensor3_deallocate_data(_array);
469  }
470 
471  VMML_TEMPLATE_STRING
472  inline T&
473  VMML_TEMPLATE_CLASSNAME::at(size_t i1, size_t i2, size_t i3) {
474 #ifdef VMMLIB_SAFE_ACCESSORS
475  if (i1 >= I1 || i2 >= I2 || i3 >= I3)
476  VMMLIB_ERROR("at( i1, i2, i3 ) - index out of bounds", VMMLIB_HERE);
477 #endif
478  //col_index * M + row_index
479  return _array[ i3 * MATRIX_SIZE + i2 * ROWS + i1 ];
480  //return array[ i3 ].at( i1, i2 );
481  }
482 
483  VMML_TEMPLATE_STRING
484  const inline T&
485  VMML_TEMPLATE_CLASSNAME::at(size_t i1, size_t i2, size_t i3) const {
486 #ifdef VMMLIB_SAFE_ACCESSORS
487  if (i1 >= I1 || i2 >= I2 || i3 >= I3)
488  VMMLIB_ERROR("at( i1, i2, i3 ) - i3 index out of bounds", VMMLIB_HERE);
489 #endif
490  return _array[ i3 * MATRIX_SIZE + i2 * ROWS + i1 ];
491  //return array[ i3 ].at( i1, i2 );
492  }
493 
494  VMML_TEMPLATE_STRING
495  inline T&
496  VMML_TEMPLATE_CLASSNAME::operator()(size_t i1, size_t i2, size_t i3) {
497  return at(i1, i2, i3);
498  }
499 
500  VMML_TEMPLATE_STRING
501  const inline T&
502  VMML_TEMPLATE_CLASSNAME::operator()(size_t i1, size_t i2, size_t i3) const {
503  return at(i1, i2, i3);
504  }
505 
506  VMML_TEMPLATE_STRING
507  inline void
508  VMML_TEMPLATE_CLASSNAME::
509  get_I2_vector(size_t i1, size_t i3, vector< I2, T >& data) const {
510 #ifdef VMMLIB_SAFE_ACCESSORS
511 
512  if (i3 >= I3)
513  VMMLIB_ERROR("get_I1_vector() - i3 index out of bounds.", VMMLIB_HERE);
514 
515 #endif
516  _get_slice(i3).get_row(i1, data);
517  }
518 
519  VMML_TEMPLATE_STRING
520  inline void
521  VMML_TEMPLATE_CLASSNAME::
522  get_I1_vector(size_t i2, size_t i3, vector< I1, T >& data) const {
523 #ifdef VMMLIB_SAFE_ACCESSORS
524 
525  if (i3 >= I3)
526  VMMLIB_ERROR("get_I2_vector() - i3 index out of bounds.", VMMLIB_HERE);
527 
528 #endif
529 
530  _get_slice(i3).get_column(i2, data);
531 
532  }
533 
534  VMML_TEMPLATE_STRING
535  inline void
536  VMML_TEMPLATE_CLASSNAME::
537  get_I3_vector(size_t i1, size_t i2, vector< I3, T >& data) const {
538  for (size_t i3 = 0; i3 < I3; ++i3) {
539  data[ i3 ] = _get_slice(i3).at(i1, i2);
540  }
541 
542  }
543 
544  VMML_TEMPLATE_STRING
545  inline void
546  VMML_TEMPLATE_CLASSNAME::
547  get_row(size_t i1, size_t i3, vector< I2, T >& data) const {
548  get_I2_vector(i1, i3, data);
549  }
550 
551  VMML_TEMPLATE_STRING
552  inline void
553  VMML_TEMPLATE_CLASSNAME::
554  get_column(size_t i2, size_t i3, vector< I1, T >& data) const {
555  get_I1_vector(i2, i3, data);
556  }
557 
558  VMML_TEMPLATE_STRING
559  inline void
560  VMML_TEMPLATE_CLASSNAME::
561  get_tube(size_t i1, size_t i2, vector< I3, T >& data) const {
562  get_I3_vector(i1, i2, data);
563  }
564 
565  VMML_TEMPLATE_STRING
566  inline void
567  VMML_TEMPLATE_CLASSNAME::
568  set_I2_vector(size_t i1, size_t i3, const vector< I2, T >& data) {
569 #ifdef VMMLIB_SAFE_ACCESSORS
570 
571  if (i3 >= I3)
572  VMMLIB_ERROR("set_I1_vector() - i3 index out of bounds.", VMMLIB_HERE);
573 
574 #endif
575 
576  _get_slice(i3).set_row(i1, data); //@SUS: bug fix
577  }
578 
579  VMML_TEMPLATE_STRING
580  inline void
581  VMML_TEMPLATE_CLASSNAME::
582  set_I1_vector(size_t i2, size_t i3, const vector< I1, T >& data) {
583 #ifdef VMMLIB_SAFE_ACCESSORS
584 
585  if (i3 >= I3)
586  VMMLIB_ERROR("set_I2_vector() - i3 index out of bounds.", VMMLIB_HERE);
587 
588 #endif
589 
590  _get_slice(i3).set_column(i2, data);
591 
592  }
593 
594  VMML_TEMPLATE_STRING
595  inline void
596  VMML_TEMPLATE_CLASSNAME::
597  set_I3_vector(size_t i1, size_t i2, const vector< I3, T >& data) {
598  for (size_t i3 = 0; i3 < I3; ++i3) {
599  _get_slice(i3).at(i1, i2) = data[ i3 ];
600  }
601 
602  }
603 
604  VMML_TEMPLATE_STRING
605  inline void
606  VMML_TEMPLATE_CLASSNAME::
607  set_row(size_t i1, size_t i3, const vector< I2, T >& data) {
608  set_I2_vector(i1, i3, data);
609  }
610 
611  VMML_TEMPLATE_STRING
612  inline void
613  VMML_TEMPLATE_CLASSNAME::
614  set_column(size_t i2, size_t i3, const vector< I1, T >& data) {
615  set_I1_vector(i2, i3, data);
616  }
617 
618  VMML_TEMPLATE_STRING
619  inline void
620  VMML_TEMPLATE_CLASSNAME::
621  set_tube(size_t i1, size_t i2, const vector< I3, T >& data) {
622  set_I3_vector(i1, i2, data);
623  }
624 
625  VMML_TEMPLATE_STRING
626  inline typename VMML_TEMPLATE_CLASSNAME::front_slice_type&
627  VMML_TEMPLATE_CLASSNAME::
628  get_frontal_slice_fwd(size_t i3) {
629 #ifdef VMMLIB_SAFE_ACCESSORS
630  if (i3 >= I3)
631  VMMLIB_ERROR("get_frontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
632 #endif
633  return _get_slice(i3);
634  }
635 
636  VMML_TEMPLATE_STRING
637  inline const typename VMML_TEMPLATE_CLASSNAME::front_slice_type&
638  VMML_TEMPLATE_CLASSNAME::
639  get_frontal_slice_fwd(size_t i3) const {
640 #ifdef VMMLIB_SAFE_ACCESSORS
641  if (i3 >= I3)
642  VMMLIB_ERROR("get_frontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
643 #endif
644  return _get_slice(i3);
645  }
646 
647  VMML_TEMPLATE_STRING
648  inline void
649  VMML_TEMPLATE_CLASSNAME::
650  get_frontal_slice_fwd(size_t i3, front_slice_type& data) const {
651 #ifdef VMMLIB_SAFE_ACCESSORS
652  if (i3 >= I3)
653  VMMLIB_ERROR("get_frontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
654 #endif
655 
656  data = _get_slice(i3);
657  ;
658  }
659 
660  VMML_TEMPLATE_STRING
661  inline void
662  VMML_TEMPLATE_CLASSNAME::
663  get_lateral_slice_bwd(size_t i2, bwd_lat_slice_type& data) const {
664 #ifdef VMMLIB_SAFE_ACCESSORS
665  if (i2 >= I2)
666  VMMLIB_ERROR("get_lateral_slice_bwd() - index out of bounds.", VMMLIB_HERE);
667 #endif
668 
669  for (size_t i3 = 0; i3 < I3; ++i3) {
670  data.set_column(i3, _get_slice(i3).get_column(i2));
671  }
672  }
673 
674  VMML_TEMPLATE_STRING
675  inline void
676  VMML_TEMPLATE_CLASSNAME::
677  get_horizontal_slice_fwd(size_t i1, horiz_slice_type& data) const {
678 #ifdef VMMLIB_SAFE_ACCESSORS
679  if (i1 >= I1)
680  VMMLIB_ERROR("get_horizontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
681 #endif
682  for (size_t i3 = 0; i3 < I3; ++i3) {
683  data.set_column(i3, _get_slice(i3).get_row(i1)); //or for every i2 get/set column
684  }
685  }
686 
687  VMML_TEMPLATE_STRING
688  inline void
689  VMML_TEMPLATE_CLASSNAME::
690  get_frontal_slice_bwd(size_t i3, bwd_front_slice_type& data) const {
691 #ifdef VMMLIB_SAFE_ACCESSORS
692  if (i3 >= I3)
693  VMMLIB_ERROR("get_frontal_slice_bwd() - index out of bounds.", VMMLIB_HERE);
694 #endif
695 
696  front_slice_type* data_t = new front_slice_type();
697  *data_t = _get_slice(i3);
698  data_t->transpose_to(data);
699  delete data_t;
700  }
701 
702  VMML_TEMPLATE_STRING
703  inline void
704  VMML_TEMPLATE_CLASSNAME::
705  get_lateral_slice_fwd(size_t i2, lat_slice_type& data) const {
706 #ifdef VMMLIB_SAFE_ACCESSORS
707  if (i2 >= I2)
708  VMMLIB_ERROR("get_lateral_slice_fwd() - index out of bounds.", VMMLIB_HERE);
709 #endif
710  bwd_lat_slice_type* data_t = new bwd_lat_slice_type();
711  for (size_t i3 = 0; i3 < I3; ++i3) {
712  data_t->set_column(i3, _get_slice(i3).get_column(i2));
713  }
714  data_t->transpose_to(data);
715  delete data_t;
716  }
717 
718  VMML_TEMPLATE_STRING
719  inline void
720  VMML_TEMPLATE_CLASSNAME::
721  get_horizontal_slice_bwd(size_t i1, bwd_horiz_slice_type& data) const {
722 #ifdef VMMLIB_SAFE_ACCESSORS
723  if (i1 >= I1)
724  VMMLIB_ERROR("get_horizontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
725 #endif
726  horiz_slice_type* data_t = new horiz_slice_type();
727  for (size_t i3 = 0; i3 < I3; ++i3) {
728  data_t->set_column(i3, _get_slice(i3).get_row(i1)); //or for every i2 get/set column
729  }
730  data_t->transpose_to(data);
731  delete data_t;
732  }
733 
734 
735 
736  //setter
737 
738  VMML_TEMPLATE_STRING
739  inline void
740  VMML_TEMPLATE_CLASSNAME::
741  set_frontal_slice_fwd(size_t i3, const front_slice_type& data) {
742 #ifdef VMMLIB_SAFE_ACCESSORS
743  if (i3 >= I3)
744  VMMLIB_ERROR("set_frontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
745 #endif
746 
747  _get_slice(i3) = data;
748  }
749 
750  VMML_TEMPLATE_STRING
751  inline void
752  VMML_TEMPLATE_CLASSNAME::
753  set_lateral_slice_bwd(size_t i2, const bwd_lat_slice_type& data) {
754 #ifdef VMMLIB_SAFE_ACCESSORS
755  if (i2 >= I2)
756  VMMLIB_ERROR("set_lateral_slice_bwd() - index out of bounds.", VMMLIB_HERE);
757 #endif
758 
759  for (size_t i3 = 0; i3 < I3; ++i3) {
760  _get_slice(i3).set_column(i2, data.get_column(i3));
761  }
762  }
763 
764  VMML_TEMPLATE_STRING
765  inline void
766  VMML_TEMPLATE_CLASSNAME::
767  set_horizontal_slice_fwd(size_t i1, const horiz_slice_type& data) {
768 #ifdef VMMLIB_SAFE_ACCESSORS
769  if (i1 >= I1)
770  VMMLIB_ERROR("set_horizontal_slice_fwd() - index out of bounds.", VMMLIB_HERE);
771 #endif
772 
773  for (size_t i3 = 0; i3 < I3; ++i3) {
774  _get_slice(i3).set_row(i1, data.get_column(i3));
775  }
776 
777  }
778 
779  VMML_TEMPLATE_STRING
780  inline void
781  VMML_TEMPLATE_CLASSNAME::
782  set_frontal_slice_bwd(size_t i3, const bwd_front_slice_type& data) {
783 #ifdef VMMLIB_SAFE_ACCESSORS
784  if (i3 >= I3)
785  VMMLIB_ERROR("set_frontal_slice_bwd() - index out of bounds.", VMMLIB_HERE);
786 #endif
787  front_slice_type* data_t = new front_slice_type();
788  data.transpose_to(*data_t);
789  _get_slice(i3) = *data_t;
790  delete data_t;
791  }
792 
793  VMML_TEMPLATE_STRING
794  inline void
795  VMML_TEMPLATE_CLASSNAME::
796  set_lateral_slice_fwd(size_t i2, const lat_slice_type& data) {
797 #ifdef VMMLIB_SAFE_ACCESSORS
798  if (i2 >= I2)
799  VMMLIB_ERROR("set_lateral_slice_fwd() - index out of bounds.", VMMLIB_HERE);
800 #endif
801  bwd_lat_slice_type* data_t = new bwd_lat_slice_type();
802  data.transpose_to(*data_t);
803  for (size_t i3 = 0; i3 < I3; ++i3) {
804  _get_slice(i3).set_column(i2, data_t->get_column(i3));
805  }
806 
807  delete data_t;
808  }
809 
810  VMML_TEMPLATE_STRING
811  inline void
812  VMML_TEMPLATE_CLASSNAME::
813  set_horizontal_slice_bwd(size_t i1, const bwd_horiz_slice_type& data) {
814 #ifdef VMMLIB_SAFE_ACCESSORS
815  if (i1 >= I1)
816  VMMLIB_ERROR("set_horizontal_slice_bwd() - index out of bounds.", VMMLIB_HERE);
817 #endif
818  horiz_slice_type* data_t = new horiz_slice_type();
819  data.transpose_to(*data_t);
820 
821  for (size_t i3 = 0; i3 < I3; ++i3) {
822  _get_slice(i3).set_row(i1, data_t->get_column(i3));
823  }
824  delete data_t;
825  }
826 
827 
828 
829  //fill
830 
831  VMML_TEMPLATE_STRING
832  void
833  VMML_TEMPLATE_CLASSNAME::
834  fill(T fillValue) {
835  for (size_t i3 = 0; i3 < I3; ++i3) {
836  _get_slice(i3).fill(fillValue);
837  }
838  }
839 
840  VMML_TEMPLATE_STRING
841  void
842  VMML_TEMPLATE_CLASSNAME::
843  fill_random(int seed) {
844  if (seed >= 0)
845  srand(seed);
846 
847  double fillValue = 0.0f;
848  for (size_t index = 0; index < I1 * I2 * I3; ++index) {
849  fillValue = rand();
850  fillValue /= RAND_MAX;
851  fillValue *= std::numeric_limits< T >::max();
852  _array[ index ] = static_cast<T> (fillValue);
853  }
854 
855 #if 0
856  for (size_t i3 = 0; i3 < I3; ++i3) {
857  for (size_t i1 = 0; i1 < I1; ++i1) {
858  for (size_t i2 = 0; i2 < I2; ++i2) {
859  fillValue = rand();
860  fillValue /= RAND_MAX;
861  fillValue *= std::numeric_limits< T >::max();
862  at(i1, i2, i3) = static_cast<T> (fillValue);
863  }
864  }
865  }
866 #endif
867  }
868 
869  VMML_TEMPLATE_STRING
870  void
871  VMML_TEMPLATE_CLASSNAME::
872  fill_random_signed(int seed) {
873  if (seed >= 0)
874  srand(seed);
875 
876  double fillValue = 0.0f;
877  for (size_t i3 = 0; i3 < I3; ++i3) {
878  for (size_t i1 = 0; i1 < I1; ++i1) {
879  for (size_t i2 = 0; i2 < I2; ++i2) {
880  fillValue = rand();
881  fillValue /= RAND_MAX;
882  fillValue *= std::numeric_limits< T >::max();
883  T fillValue2 = static_cast<T> (fillValue) % std::numeric_limits< T >::max();
884  fillValue2 -= std::numeric_limits< T >::max() / 2;
885  at(i1, i2, i3) = fillValue2;
886  }
887  }
888  }
889  }
890 
891  VMML_TEMPLATE_STRING
892  void
893  VMML_TEMPLATE_CLASSNAME::
894  fill_rand_sym(int seed) {
895  if (seed >= 0)
896  srand(seed);
897  assert(I1 == I2);
898  assert(I1 == I3);
899 
900  double fillValue = 0.0f;
901  T t_fill_value = 0;
902  for (size_t i3 = 0; i3 < I3; ++i3) {
903  for (size_t i1 = i3; i1 < I1; ++i1) {
904  for (size_t i2 = i1; i2 < I2; ++i2) {
905  fillValue = rand();
906  fillValue /= RAND_MAX;
907  fillValue *= std::numeric_limits< T >::max(); //add fillValue += 0.5; for rounding
908  t_fill_value = static_cast<T> (fillValue);
909 
910  at(i1, i2, i3) = t_fill_value;
911 
912  if (i1 != i2 || i1 != i3 || i2 != i3) {
913  if (i1 != i2)
914  at(i2, i1, i3) = t_fill_value;
915  if (i2 != i3)
916  at(i1, i3, i2) = t_fill_value;
917  if (i1 != i3)
918  at(i3, i2, i1) = t_fill_value;
919 
920  if (i1 != i2 && i1 != i3 && i2 != i3) {
921  at(i2, i3, i1) = t_fill_value;
922  at(i3, i1, i2) = t_fill_value;
923  }
924  }
925  }
926  }
927  }
928  }
929 
930  VMML_TEMPLATE_STRING
931  void
932  VMML_TEMPLATE_CLASSNAME::
933  fill_rand_sym_slices(int seed) {
934  if (seed >= 0)
935  srand(seed);
936  assert(I1 == I2);
937 
938  double fillValue = 0.0f;
939  for (size_t i3 = 0; i3 < I3; ++i3) {
940  for (size_t i1 = 0; i1 < I1; ++i1) {
941  for (size_t i2 = i1; i2 < I2; ++i2) {
942  fillValue = rand();
943  fillValue /= RAND_MAX;
944  fillValue *= std::numeric_limits< T >::max();
945  at(i1, i2, i3) = static_cast<T> (fillValue);
946  at(i2, i1, i3) = static_cast<T> (fillValue);
947  }
948  }
949  }
950  }
951 
952  VMML_TEMPLATE_STRING
953  void
954  VMML_TEMPLATE_CLASSNAME::
955  fill_increasing_values() {
956  double fillValue = 0.0f;
957  for (size_t i3 = 0; i3 < I3; ++i3) {
958  for (size_t i1 = 0; i1 < I1; ++i1) {
959  for (size_t i2 = 0; i2 < I2; ++i2) {
960  at(i1, i2, i3) = static_cast<T> (fillValue);
961  fillValue++;
962  }
963  }
964  }
965  }
966 
967  VMML_TEMPLATE_STRING
968  void
969  VMML_TEMPLATE_CLASSNAME::range_threshold(tensor3<I1, I2, I3, T>& other_, const T& start_value, const T& end_value) const {
970 
971  for (size_t i3 = 0; i3 < I3; ++i3) {
972  for (size_t i1 = 0; i1 < I1; ++i1) {
973  for (size_t i2 = 0; i2 < I2; ++i2) {
974  T value = at(i1, i2, i3);
975  if (value >= start_value && value <= end_value)
976  other_.at(i1, i2, i3) = static_cast<T> (value);
977  }
978  }
979  }
980  }
981 
982  VMML_TEMPLATE_STRING
983  template< size_t R>
984  typename enable_if< R == I1 && R == I2 && R == I3>::type*
985  VMML_TEMPLATE_CLASSNAME::diag(const vector< R, T >& diag_values_) {
986  zero();
987  for (size_t r = 0; r < R; ++r) {
988  at(r, r, r) = static_cast<T> (diag_values_.at(r));
989  }
990 
991  return 0;
992  }
993 
994  VMML_TEMPLATE_STRING
995  void
996  VMML_TEMPLATE_CLASSNAME::zero() {
997  fill(static_cast<T> (0.0));
998  }
999 
1000  VMML_TEMPLATE_STRING
1001  bool
1002  VMML_TEMPLATE_CLASSNAME::operator==(const tensor3< I1, I2, I3, T >& other) const {
1003  bool is_ok = true;
1004  for (size_t index = 0; index < I1 * I2 * I3; ++index) {
1005  if (_array[ index ] != other._array[ index ])
1006  return false;
1007  }
1008  return true;
1009 
1010 #if 0
1011  for (size_t i3 = 0; ok && i3 < I3; ++i3) {
1012  ok = array[ i3 ] == other.array[ i3 ];
1013  }
1014 #endif
1015  return is_ok;
1016  }
1017 
1018  VMML_TEMPLATE_STRING
1019  bool
1020  VMML_TEMPLATE_CLASSNAME::operator!=(const tensor3< I1, I2, I3, T >& other) const {
1021  return !operator==(other);
1022  }
1023 
1024  VMML_TEMPLATE_STRING
1025  bool equals(const tensor3< I1, I2, I3, T >& t3_0, const tensor3< I1, I2, I3, T >& t3_1, T tolerance) {
1026  return t3_0.equals(t3_1, tolerance);
1027  }
1028 
1029  VMML_TEMPLATE_STRING
1030  bool
1031  VMML_TEMPLATE_CLASSNAME::equals(const tensor3< I1, I2, I3, T >& other, T tolerance) const {
1032  bool is_ok = true;
1033  for (size_t i3 = 0; is_ok && i3 < I3; ++i3) {
1034  is_ok = _get_slice(i3).equals(other.get_frontal_slice_fwd(i3), tolerance);
1035  }
1036  return is_ok;
1037  }
1038 
1039  VMML_TEMPLATE_STRING
1040  size_t
1041  VMML_TEMPLATE_CLASSNAME::size() const {
1042  return I1 * I2 * I3;
1043  }
1044 
1045  VMML_TEMPLATE_STRING
1046  template< size_t J1, size_t J2, size_t J3 >
1047  tensor3<J1, J2, J3, T>
1048  VMML_TEMPLATE_CLASSNAME::
1049  get_sub_tensor3(size_t row_offset, size_t col_offset, size_t slice_offset,
1050  typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type*) const {
1051  tensor3< J1, J2, J3, T > result;
1052  get_sub_tensor3(result, row_offset, col_offset, slice_offset);
1053  return result;
1054  }
1055 
1056  VMML_TEMPLATE_STRING
1057  template< size_t J1, size_t J2, size_t J3 >
1058  typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type*
1059  VMML_TEMPLATE_CLASSNAME::
1060  get_sub_tensor3(tensor3<J1, J2, J3, T >& result,
1061  size_t row_offset, size_t col_offset, size_t slice_offset) const {
1062 #ifdef VMMLIB_SAFE_ACCESSORS
1063  if (J1 + row_offset > I1 || J2 + col_offset > I2 || J3 + slice_offset > I3)
1064  VMMLIB_ERROR("index out of bounds.", VMMLIB_HERE);
1065 #endif
1066 
1067  for (size_t slice = 0; slice < J3; ++slice) {
1068  for (size_t row = 0; row < J1; ++row) {
1069  for (size_t col = 0; col < J2; ++col) {
1070  result.at(row, col, slice)
1071  = at(row_offset + row, col_offset + col, slice_offset + slice);
1072  }
1073  }
1074  }
1075  return 0;
1076  }
1077 
1078  VMML_TEMPLATE_STRING
1079  void
1080  VMML_TEMPLATE_CLASSNAME::
1081  get_sub_tensor3( char* data_,
1082  const size_t i1_start, const size_t i1_end,
1083  const size_t i2_start, const size_t i2_end,
1084  const size_t i3_start, const size_t i3_end ) const
1085  {
1086 
1087  T* t_ptr = (T*)&(data_[0]);
1088  for ( size_t slice = i3_start; slice <= i3_end; ++slice ) {
1089  for ( size_t col = i2_start; col <= i2_end; ++col ) {
1090  for ( size_t row = i1_start; row <= i1_end; ++row ) {
1091  //std::cout << "at("<< row << "," << col << "," << slice << ") of (" <<I1 << "," << I2 << "," << I3 << ")" << std::endl;
1092  *t_ptr = at( row, col, slice );
1093  ++t_ptr;
1094  }
1095  }
1096  }
1097  }
1098 
1099  VMML_TEMPLATE_STRING
1100  template< size_t J1, size_t J2, size_t J3 >
1101  typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type*
1102  VMML_TEMPLATE_CLASSNAME::
1103  set_sub_tensor3(const tensor3<J1, J2, J3, T >& sub_data_,
1104  size_t row_offset, size_t col_offset, size_t slice_offset) {
1105 #ifdef VMMLIB_SAFE_ACCESSORS
1106  if (J1 + row_offset > I1 || J2 + col_offset > I2 || J3 + slice_offset > I3)
1107  VMMLIB_ERROR("index out of bounds.", VMMLIB_HERE);
1108 #endif
1109 
1110  for (size_t slice = 0; slice < J3; ++slice) {
1111  for (size_t row = 0; row < J1; ++row) {
1112  for (size_t col = 0; col < J2; ++col) {
1113  at(row_offset + row, col_offset + col, slice_offset + slice) = sub_data_.at(row, col, slice);
1114  }
1115  }
1116  }
1117  return 0;
1118  }
1119 
1120  VMML_TEMPLATE_STRING
1121  void
1122  VMML_TEMPLATE_CLASSNAME::
1123  set_sub_tensor3( const char* data_,
1124  const size_t i1_start, const size_t i1_end,
1125  const size_t i2_start, const size_t i2_end,
1126  const size_t i3_start, const size_t i3_end )
1127  {
1128 
1129  T* t_ptr = (T*)&(data_[0]);
1130  for ( size_t slice = i3_start; slice <= i3_end; ++slice ) {
1131  for ( size_t col = i2_start; col <= i2_end; ++col ) {
1132  for ( size_t row = i1_start; row <= i1_end; ++row ) {
1133  //std::cout << "at("<< row << "," << col << "," << slice << ") of (" <<I1 << "," << I2 << "," << I3 << ")" << std::endl;
1134  at( row, col, slice ) = *t_ptr;
1135  ++t_ptr;
1136  }
1137  }
1138  }
1139  }
1140 
1141  VMML_TEMPLATE_STRING
1142  typename VMML_TEMPLATE_CLASSNAME::iterator
1143  VMML_TEMPLATE_CLASSNAME::begin() {
1144  return iterator(*this, true);
1145  }
1146 
1147  VMML_TEMPLATE_STRING
1148  typename VMML_TEMPLATE_CLASSNAME::iterator
1149  VMML_TEMPLATE_CLASSNAME::end() {
1150  return iterator(*this, false);
1151  }
1152 
1153  VMML_TEMPLATE_STRING
1154  typename VMML_TEMPLATE_CLASSNAME::const_iterator
1155  VMML_TEMPLATE_CLASSNAME::begin() const {
1156  return const_iterator(*this, true);
1157  }
1158 
1159  VMML_TEMPLATE_STRING
1160  typename VMML_TEMPLATE_CLASSNAME::const_iterator
1161  VMML_TEMPLATE_CLASSNAME::end() const {
1162  return const_iterator(*this, false);
1163  }
1164 
1165  VMML_TEMPLATE_STRING
1166  template< typename input_iterator_t >
1167  void
1168  VMML_TEMPLATE_CLASSNAME::set(input_iterator_t begin_, input_iterator_t end_, bool row_major_layout) {
1169  input_iterator_t it(begin_);
1170  if (row_major_layout) {
1171  for (size_t i3 = 0; i3 < I3; ++i3) {
1172  for (size_t i1 = 0; i1 < I1; ++i1) {
1173  for (size_t i2 = 0; i2 < I2; ++i2, ++it) {
1174  if (it == end_)
1175  return;
1176  at(i1, i2, i3) = static_cast<T> (*it);
1177  }
1178  }
1179  }
1180  } else {
1181  std::copy(it, it + (I1 * I2 * I3), begin());
1182  }
1183  }
1184 
1185  VMML_TEMPLATE_STRING
1186  inline VMML_TEMPLATE_CLASSNAME
1187  VMML_TEMPLATE_CLASSNAME::operator+(const tensor3< I1, I2, I3, T >& other) const {
1188  tensor3< I1, I2, I3, T > result(*this);
1189  result += other;
1190  return result;
1191  }
1192 
1193  VMML_TEMPLATE_STRING
1194  template< size_t J1, size_t J2, size_t J3>
1195  typename enable_if< J1 < I1 && J2 < I2 && J3 < I3 >::type*
1196  VMML_TEMPLATE_CLASSNAME::operator+=(const tensor3< J1, J2, J3, T >& other) {
1197  for (size_t i3 = 0; i3 < J3; ++i3) {
1198  for (size_t i1 = 0; i1 < J1; ++i1) {
1199  for (size_t i2 = 0; i2 < J2; ++i2) {
1200  at(i1, i2, i3) += other.at(i1, i2, i3);
1201  }
1202  }
1203  }
1204  return 0;
1205  }
1206 
1207  VMML_TEMPLATE_STRING
1208  void
1209  VMML_TEMPLATE_CLASSNAME::operator+=(const tensor3< I1, I2, I3, T >& other) {
1210  iterator it = begin(), it_end = end();
1211  const_iterator other_it = other.begin();
1212  for (; it != it_end; ++it, ++other_it) {
1213  *it += *other_it;
1214  }
1215  }
1216 
1217  VMML_TEMPLATE_STRING
1218  inline VMML_TEMPLATE_CLASSNAME
1219  VMML_TEMPLATE_CLASSNAME::operator-(const tensor3< I1, I2, I3, T >& other) const {
1220  tensor3< I1, I2, I3, T > result(*this);
1221  result -= other;
1222  return result;
1223  }
1224 
1225  VMML_TEMPLATE_STRING
1226  void
1227  VMML_TEMPLATE_CLASSNAME::operator-=(const tensor3< I1, I2, I3, T >& other) {
1228  iterator it = begin(), it_end = end();
1229  const_iterator other_it = other.begin();
1230  for (; it != it_end; ++it, ++other_it) {
1231  *it -= *other_it;
1232  }
1233  }
1234 
1235 
1236  //sum with scalar
1237 
1238  VMML_TEMPLATE_STRING
1239  inline VMML_TEMPLATE_CLASSNAME
1240  VMML_TEMPLATE_CLASSNAME::operator+(T scalar) const {
1241  tensor3< I1, I2, I3, T > result(*this);
1242  result += scalar;
1243  return result;
1244  }
1245 
1246  VMML_TEMPLATE_STRING
1247  void
1248  VMML_TEMPLATE_CLASSNAME::operator+=(T scalar) {
1249  iterator it = begin(), it_end = end();
1250  for (; it != it_end; ++it) {
1251  *it += scalar;
1252  }
1253  }
1254 
1255  VMML_TEMPLATE_STRING
1256  inline VMML_TEMPLATE_CLASSNAME
1257  VMML_TEMPLATE_CLASSNAME::operator-(T scalar) const {
1258  tensor3< I1, I2, I3, T > result(*this);
1259  result -= scalar;
1260  return result;
1261  }
1262 
1263  VMML_TEMPLATE_STRING
1264  void
1265  VMML_TEMPLATE_CLASSNAME::operator-=(T scalar) {
1266  iterator it = begin(), it_end = end();
1267  for (; it != it_end; ++it) {
1268  *it -= scalar;
1269  }
1270  }
1271 
1272  VMML_TEMPLATE_STRING
1273  void
1274  VMML_TEMPLATE_CLASSNAME::horizontal_unfolding_bwd(bwd_horiz_unfolding_type& unfolding) const {
1275  bwd_horiz_slice_type* horizontal_slice = new bwd_horiz_slice_type();
1276  for (size_t i = 0; i < I1; ++i) {
1277  get_horizontal_slice_bwd(i, *horizontal_slice);
1278  for (size_t col = 0; col < I2; ++col) {
1279  unfolding.set_column(i * I2 + col, horizontal_slice->get_column(col));
1280  }
1281  }
1282  delete horizontal_slice;
1283  }
1284 
1285  VMML_TEMPLATE_STRING
1286  void
1287  VMML_TEMPLATE_CLASSNAME::horizontal_unfolding_fwd(fwd_horiz_unfolding_type& unfolding) const {
1288  horiz_slice_type* horizontal_slice = new horiz_slice_type();
1289  for (size_t i = 0; i < I1; ++i) {
1290  get_horizontal_slice_fwd(i, *horizontal_slice);
1291  for (size_t col = 0; col < I3; ++col) {
1292  unfolding.set_column(i * I3 + col, horizontal_slice->get_column(col));
1293  }
1294  }
1295  delete horizontal_slice;
1296  }
1297 
1298  VMML_TEMPLATE_STRING
1299  void
1300  VMML_TEMPLATE_CLASSNAME::lateral_unfolding_bwd(bwd_lat_unfolding_type& unfolding) const {
1301  bwd_lat_slice_type* lateral_slice = new bwd_lat_slice_type();
1302  for (size_t i = 0; i < I2; ++i) {
1303  get_lateral_slice_bwd(i, *lateral_slice);
1304  for (size_t col = 0; col < I3; ++col) {
1305  unfolding.set_column(i * I3 + col, lateral_slice->get_column(col));
1306  }
1307  }
1308  delete lateral_slice;
1309  }
1310 
1311  VMML_TEMPLATE_STRING
1312  void
1313  VMML_TEMPLATE_CLASSNAME::lateral_unfolding_fwd(fwd_lat_unfolding_type& unfolding) const {
1314  lat_slice_type* lateral_slice = new lat_slice_type();
1315  for (size_t i = 0; i < I2; ++i) {
1316  get_lateral_slice_fwd(i, *lateral_slice);
1317  for (size_t col = 0; col < I1; ++col) {
1318  unfolding.set_column(i * I1 + col, lateral_slice->get_column(col));
1319  }
1320  }
1321  delete lateral_slice;
1322  }
1323 
1324  VMML_TEMPLATE_STRING
1325  void
1326  VMML_TEMPLATE_CLASSNAME::frontal_unfolding_bwd(bwd_front_unfolding_type& unfolding) const {
1327  bwd_front_slice_type* frontal_slice = new bwd_front_slice_type();
1328  for (size_t i = 0; i < I3; ++i) {
1329  get_frontal_slice_bwd(i, *frontal_slice);
1330  for (size_t col = 0; col < I1; ++col) {
1331  unfolding.set_column(i * I1 + col, frontal_slice->get_column(col));
1332  }
1333  }
1334  delete frontal_slice;
1335  }
1336 
1337  VMML_TEMPLATE_STRING
1338  void
1339  VMML_TEMPLATE_CLASSNAME::frontal_unfolding_fwd(fwd_front_unfolding_type& unfolding) const {
1340  front_slice_type* frontal_slice = new front_slice_type();
1341  for (size_t i = 0; i < I3; ++i) {
1342  get_frontal_slice_fwd(i, *frontal_slice);
1343  for (size_t col = 0; col < I2; ++col) {
1344  unfolding.set_column(i * I2 + col, frontal_slice->get_column(col));
1345  }
1346  }
1347  delete frontal_slice;
1348  }
1349 
1350  VMML_TEMPLATE_STRING
1351  void
1352  VMML_TEMPLATE_CLASSNAME::horizontal_folding_bwd(const bwd_horiz_unfolding_type& unfolding) {
1353  bwd_horiz_slice_type* horizontal_slice = new bwd_horiz_slice_type;
1354  for (size_t i = 0; i < I1; ++i) {
1355  for (size_t col = 0; col < I2; ++col) {
1356  horizontal_slice->set_column(col, unfolding.get_column(i * I2 + col));
1357  }
1358  set_horizontal_slice_bwd(i, *horizontal_slice);
1359  }
1360  delete horizontal_slice;
1361  }
1362 
1363  VMML_TEMPLATE_STRING
1364  void
1365  VMML_TEMPLATE_CLASSNAME::frontal_folding_bwd(const bwd_front_unfolding_type& unfolding) {
1366  bwd_front_slice_type* frontal_slice = new bwd_front_slice_type();
1367  for (size_t i = 0; i < I3; ++i) {
1368  for (size_t col = 0; col < I1; ++col) {
1369  frontal_slice->set_column(col, unfolding.get_column(i * I1 + col));
1370  }
1371  set_frontal_slice_bwd(i, *frontal_slice);
1372  }
1373  delete frontal_slice;
1374  }
1375 
1376  VMML_TEMPLATE_STRING
1377  void
1378  VMML_TEMPLATE_CLASSNAME::lateral_folding_bwd(const bwd_lat_unfolding_type& unfolding) {
1379  bwd_lat_slice_type* lateral_slice = new bwd_lat_slice_type();
1380  for (size_t i = 0; i < I2; ++i) {
1381  for (size_t col = 0; col < I3; ++col) {
1382  lateral_slice->set_column(col, unfolding.get_column(i * I3 + col));
1383  }
1384  set_lateral_slice_bwd(i, *lateral_slice);
1385  }
1386  delete lateral_slice;
1387  }
1388 
1389  VMML_TEMPLATE_STRING
1390  tensor3< I1, I2, I3, T >
1391  VMML_TEMPLATE_CLASSNAME::operator*(T scalar) {
1392  tensor3< I1, I2, I3, T > result;
1393  for (size_t index = 0; index < I1 * I2 * I3; ++index) {
1394  result._array[ index ] = _array[ index ] * scalar;
1395  }
1396 
1397 #if 0
1398  tensor3< I1, I2, I3, T >* result = (*this);
1399 
1400  for (size_t i3 = 0; i3 < I3; ++i3) {
1401  result.array[ i3 ] = array[ i3 ] * scalar;
1402  }
1403 
1404  return *result;
1405 #endif
1406  }
1407 
1408  VMML_TEMPLATE_STRING
1409  void
1410  VMML_TEMPLATE_CLASSNAME::operator*=(T scalar) {
1411  for (size_t index = 0; index < I1 * I2 * I3; ++index) {
1412  _array[ index ] *= scalar;
1413  }
1414 
1415 #if 0
1416  for (size_t i3 = 0; i3 < I3; ++i3) {
1417  array[ i3 ] *= scalar;
1418  }
1419 #endif
1420  }
1421 
1422  VMML_TEMPLATE_STRING
1423  tensor3< I1, I2, I3, T >
1424  VMML_TEMPLATE_CLASSNAME::operator/(T scalar) {
1425  tensor3< I1, I2, I3, T > result;
1426 
1427  for (size_t slice_idx = 0; slice_idx < I3; ++slice_idx) {
1428  for (size_t row_index = 0; row_index < I1; ++row_index) {
1429  for (size_t col_index = 0; col_index < I2; ++col_index) {
1430  result.at(row_index, col_index, slice_idx) = at(row_index, col_index, slice_idx) / scalar;
1431  }
1432  }
1433  }
1434  return result;
1435  }
1436 
1437  VMML_TEMPLATE_STRING
1438  void
1439  VMML_TEMPLATE_CLASSNAME::operator/=(T scalar) {
1440  for (size_t slice_idx = 0; slice_idx < I3; ++slice_idx) {
1441  for (size_t row_index = 0; row_index < I1; ++row_index) {
1442  for (size_t col_index = 0; col_index < I2; ++col_index) {
1443  at(row_index, col_index, slice_idx) /= scalar;
1444  }
1445  }
1446  }
1447  }
1448 
1449  VMML_TEMPLATE_STRING
1450  inline tensor3< I1, I2, I3, T >
1451  VMML_TEMPLATE_CLASSNAME::operator-() const {
1452  return negate();
1453  }
1454 
1455  VMML_TEMPLATE_STRING
1456  tensor3< I1, I2, I3, T >
1457  VMML_TEMPLATE_CLASSNAME::negate() const {
1458  tensor3< I1, I2, I3, T > result;
1459  result *= -1.0;
1460  return result;
1461  }
1462 
1463  VMML_TEMPLATE_STRING
1464  double
1465  VMML_TEMPLATE_CLASSNAME::frobenius_norm(const tensor3< I1, I2, I3, T>& other_) const {
1466  double f_norm = 0.0;
1467  T abs_diff = 0;
1468  const_iterator it = begin(), it_end = end();
1469  const_iterator it_other = other_.begin();
1470  for (; it != it_end; ++it, ++it_other) {
1471  abs_diff = fabs(*it - *it_other);
1472  f_norm += abs_diff * abs_diff;
1473  }
1474 
1475  return sqrt(f_norm);
1476  }
1477 
1478  VMML_TEMPLATE_STRING
1479  double
1480  VMML_TEMPLATE_CLASSNAME::frobenius_norm() const {
1481  double f_norm = 0.0;
1482 #if 0
1483  const_iterator it = begin(), it_end = end();
1484  for (; it != it_end; ++it)
1485  f_norm += *it * *it;
1486 #else
1487  for (long i3 = 0; i3 < long(I3); ++i3) {
1488  for (long i1 = 0; i1 < long(I1); ++i1) {
1489  long i2 = 0;
1490  for (i2 = 0; i2 < long(I2); ++i2) {
1491  f_norm += at(i1, i2, i3) * at(i1, i2, i3);
1492  }
1493  }
1494  }
1495 
1496 #endif
1497 
1498  return sqrt(f_norm);
1499  }
1500 
1501  VMML_TEMPLATE_STRING
1502  double
1503  VMML_TEMPLATE_CLASSNAME::avg_frobenius_norm() const {
1504  double af_norm = 0.0;
1505  const_iterator it = begin(), it_end = end();
1506  for (; it != it_end; ++it)
1507  af_norm += *it * *it;
1508 
1509  af_norm /= size();
1510  return sqrt(af_norm);
1511  }
1512 
1513  VMML_TEMPLATE_STRING
1514  double
1515  VMML_TEMPLATE_CLASSNAME::mse(const tensor3< I1, I2, I3, T >& other) const {
1516  double mse_val = 0.0;
1517  double diff = 0.0;
1518  const_iterator it = begin(), it_end = end();
1519  const_iterator other_it = other.begin();
1520  for (; it != it_end; ++it, ++other_it) {
1521  diff = abs(*it) - abs(*other_it);
1522  mse_val += diff * diff;
1523  }
1524 
1525  mse_val /= (double) size();
1526 
1527  return mse_val;
1528  }
1529 
1530  VMML_TEMPLATE_STRING
1531  double
1532  VMML_TEMPLATE_CLASSNAME::rmse(const tensor3< I1, I2, I3, T >& other) const {
1533  return sqrt(mse(other));
1534  }
1535 
1536  VMML_TEMPLATE_STRING
1537  double
1538  VMML_TEMPLATE_CLASSNAME::mean() const {
1539  double val = 0;
1540  const_iterator it = begin(), it_end = end();
1541  for (; it != it_end; ++it) {
1542  val += double(abs(*it));
1543  }
1544 
1545  return ( val / size());
1546  }
1547 
1548  VMML_TEMPLATE_STRING
1549  void
1550  VMML_TEMPLATE_CLASSNAME::mean(T& mean_) const {
1551  mean_ = static_cast<T> (mean());
1552  }
1553 
1554  VMML_TEMPLATE_STRING
1555  double
1556  VMML_TEMPLATE_CLASSNAME::variance() const {
1557  double val = 0.0;
1558  double sum_val = 0.0;
1559  double mean_val = mean();
1560  const_iterator it = begin(), it_end = end();
1561  for (; it != it_end; ++it) {
1562  val = double(*it) - mean_val;
1563  val *= val;
1564  sum_val += val;
1565  }
1566 
1567  return double(sum_val / (size() - 1));
1568  }
1569 
1570  VMML_TEMPLATE_STRING
1571  double
1572  VMML_TEMPLATE_CLASSNAME::stdev() const {
1573  return sqrt(variance());
1574  }
1575 
1576  VMML_TEMPLATE_STRING
1577  double
1578  VMML_TEMPLATE_CLASSNAME::compute_psnr(const tensor3< I1, I2, I3, T >& other, const T& max_value_) const {
1579  double rmse_val = rmse(other);
1580  double psnr_val = log(max_value_ / rmse_val);
1581  psnr_val *= 20;
1582 
1583  return fabs(psnr_val);
1584  }
1585 
1586  VMML_TEMPLATE_STRING
1587  template< typename TT >
1588  void
1589  VMML_TEMPLATE_CLASSNAME::cast_from(const tensor3< I1, I2, I3, TT >& other) {
1590 #if 0
1591  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1592  typedef typename t3_tt_type::const_iterator tt_const_iterator;
1593 
1594  iterator it = begin(), it_end = end();
1595  tt_const_iterator other_it = other.begin();
1596  for (; it != it_end; ++it, ++other_it) {
1597  *it = static_cast<T> (*other_it);
1598  }
1599 #else
1600 #pragma omp parallel for
1601  for (long slice_idx = 0; slice_idx < (long) I3; ++slice_idx) {
1602 #pragma omp parallel for
1603  for (long row_index = 0; row_index < (long) I1; ++row_index) {
1604 #pragma omp parallel for
1605  for (long col_index = 0; col_index < (long) I2; ++col_index) {
1606  at(row_index, col_index, slice_idx) = static_cast<T> (other.at(row_index, col_index, slice_idx));
1607  }
1608  }
1609  }
1610 
1611 #endif
1612  }
1613 
1614  VMML_TEMPLATE_STRING
1615  template< size_t J1, size_t J2, size_t J3, typename TT >
1616  void
1617  VMML_TEMPLATE_CLASSNAME::cast_from(const tensor3< J1, J2, J3, TT >& other, const long& slice_idx_start_) {
1618 #pragma omp parallel for
1619  for (long slice_idx = slice_idx_start_; slice_idx < (long) J3; ++slice_idx) {
1620 #pragma omp parallel for
1621  for (long row_index = 0; row_index < (long) J1; ++row_index) {
1622 #pragma omp parallel for
1623  for (long col_index = 0; col_index < (long) J2; ++col_index) {
1624  at(row_index, col_index, slice_idx) = static_cast<T> (other.at(row_index, col_index, slice_idx));
1625  }
1626  }
1627  }
1628  }
1629 
1630  VMML_TEMPLATE_STRING
1631  template< typename TT >
1632  void
1633  VMML_TEMPLATE_CLASSNAME::float_t_to_uint_t(const tensor3< I1, I2, I3, TT >& other) {
1634  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1635  typedef typename t3_tt_type::const_iterator tt_const_iterator;
1636 
1637  if (sizeof (T) == 1 || sizeof (T) == 2) {
1638  iterator it = begin(), it_end = end();
1639  tt_const_iterator other_it = other.begin();
1640  for (; it != it_end; ++it, ++other_it) {
1641  *it = T(std::min(std::max(int(0), int( *other_it + 0.5)), int(std::numeric_limits< T >::max())));
1642  }
1643  } else {
1644  //std::cout << "Warning: use a different type as target (uint8 or uint16). No converstion done.\n" << std::endl;
1645  this->cast_from(other);
1646  return;
1647  }
1648  }
1649 
1650  VMML_TEMPLATE_STRING
1651  T
1652  VMML_TEMPLATE_CLASSNAME::get_min() const {
1653  T tensor3_min = static_cast<T> (std::numeric_limits<T>::max());
1654 
1655  const_iterator it = begin(),
1656  it_end = end();
1657  for (; it != it_end; ++it) {
1658  if (*it < tensor3_min) {
1659  tensor3_min = *it;
1660  }
1661  }
1662  return tensor3_min;
1663  }
1664 
1665  VMML_TEMPLATE_STRING
1666  T
1667  VMML_TEMPLATE_CLASSNAME::get_max() const {
1668  T tensor3_max = static_cast<T> (0);
1669 
1670  const_iterator it = begin(),
1671  it_end = end();
1672  for (; it != it_end; ++it) {
1673  if (*it > tensor3_max) {
1674  tensor3_max = *it;
1675  }
1676  }
1677  return tensor3_max;
1678  }
1679 
1680  VMML_TEMPLATE_STRING
1681  T
1682  VMML_TEMPLATE_CLASSNAME::get_abs_min() const {
1683  T tensor3_min = static_cast<T> (std::numeric_limits<T>::max());
1684 
1685  const_iterator it = begin(),
1686  it_end = end();
1687  for (; it != it_end; ++it) {
1688  if (fabs(*it) < fabs(tensor3_min)) {
1689  tensor3_min = fabs(*it);
1690  }
1691  }
1692  return tensor3_min;
1693  }
1694 
1695  VMML_TEMPLATE_STRING
1696  T
1697  VMML_TEMPLATE_CLASSNAME::get_abs_max() const {
1698  T tensor3_max = static_cast<T> (0);
1699 
1700  const_iterator it = begin(),
1701  it_end = end();
1702  for (; it != it_end; ++it) {
1703  if (fabs(*it) > fabs(tensor3_max)) {
1704  tensor3_max = fabs(*it);
1705  }
1706  }
1707  return tensor3_max;
1708  }
1709 
1710  VMML_TEMPLATE_STRING
1711  size_t
1712  VMML_TEMPLATE_CLASSNAME::nnz() const {
1713  size_t counter = 0;
1714 
1715  const_iterator it = begin(),
1716  it_end = end();
1717  for (; it != it_end; ++it) {
1718  if (*it != 0) {
1719  ++counter;
1720  }
1721  }
1722 
1723  return counter;
1724  }
1725 
1726  VMML_TEMPLATE_STRING
1727  size_t
1728  VMML_TEMPLATE_CLASSNAME::nnz(const T& threshold_) const {
1729  size_t counter = 0;
1730 
1731  const_iterator it = begin(),
1732  it_end = end();
1733  for (; it != it_end; ++it) {
1734  if (fabs(*it) > threshold_) {
1735  ++counter;
1736  }
1737  }
1738 
1739  return counter;
1740  }
1741 
1742  VMML_TEMPLATE_STRING
1743  void
1744  VMML_TEMPLATE_CLASSNAME::threshold(const T& threshold_value_) {
1745  iterator it = begin(),
1746  it_end = end();
1747  for (; it != it_end; ++it) {
1748  if (fabs(*it) <= threshold_value_) {
1749  *it = static_cast<T> (0);
1750  }
1751  }
1752  }
1753 
1754  VMML_TEMPLATE_STRING
1755  template< typename TT >
1756  void
1757  VMML_TEMPLATE_CLASSNAME::quantize_to(tensor3< I1, I2, I3, TT >& quantized_,
1758  const T& min_value_, const T& max_value_) const {
1759  double max_tt_range = double(std::numeric_limits< TT >::max());
1760  double min_tt_range = double(std::numeric_limits< TT >::min());
1761  double tt_range = max_tt_range - min_tt_range;
1762  double t_range = max_value_ - min_value_;
1763 
1764  //std::cout << "tt min= " << min_tt_range << ", tt max= " << max_tt_range << ", t min= " << min_value_ << ", t max= " << max_value_ << std::endl;
1765  //std::cout << "tt range=" << tt_range << ", t range= " << t_range << std::endl;
1766 
1767  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1768  typedef typename t3_tt_type::iterator tt_iterator;
1769  tt_iterator it_quant = quantized_.begin();
1770  const_iterator it = begin(), it_end = end();
1771 
1772  for (; it != it_end; ++it, ++it_quant) {
1773  if (std::numeric_limits<TT>::is_signed) {
1774  *it_quant = TT(std::min(std::max(min_tt_range, double((*it * tt_range / t_range) + 0.5)), max_tt_range));
1775  } else {
1776  *it_quant = TT(std::min(std::max(min_tt_range, double(((*it - min_value_) * tt_range / t_range) + 0.5)), max_tt_range));
1777  }
1778  //std::cout << "original value= " << double(*it) << ", converted value= " << double(*it_quant ) << std::endl;
1779  }
1780  }
1781 
1782  VMML_TEMPLATE_STRING
1783  template< typename TT >
1784  void
1785  VMML_TEMPLATE_CLASSNAME::quantize(tensor3< I1, I2, I3, TT >& quantized_, T& min_value_, T& max_value_) const {
1786  min_value_ = get_min();
1787  max_value_ = get_max();
1788 
1789  quantize_to(quantized_, min_value_, max_value_);
1790  }
1791 
1792  VMML_TEMPLATE_STRING
1793  template< typename TT >
1794  void
1795  VMML_TEMPLATE_CLASSNAME::quantize_log(tensor3< I1, I2, I3, TT >& quantized_, tensor3< I1, I2, I3, char >& signs_, T& min_value_, T& max_value_, const TT& tt_range_) const {
1796  double max_tt_range = double(tt_range_);
1797  double min_tt_range = 0;
1798 
1799  min_value_ = 0;
1800  max_value_ = get_abs_max();
1801  double t_range = max_value_ - min_value_;
1802 
1803  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1804  typedef typename t3_tt_type::iterator tt_iterator;
1805  tt_iterator it_quant = quantized_.begin();
1806  const_iterator it = begin(), it_end = end();
1807 
1808  typedef tensor3< I1, I2, I3, char > t3_sign_type;
1809  typedef typename t3_sign_type::iterator sign_iterator;
1810  sign_iterator it_sign = signs_.begin();
1811 
1812  for (; it != it_end; ++it, ++it_quant, ++it_sign) {
1813  T value = fabs(*it);
1814  *it_sign = ((*it) < 0.f) ? 0 : 1;
1815  T quant_value = 0;
1816  if (std::numeric_limits<TT>::is_signed) {
1817  quant_value = log2(1 + value) / log2(1 + t_range) * tt_range_;
1818  *it_quant = TT(std::min(std::max(min_tt_range, double(quant_value + 0.5)), max_tt_range));
1819  } else {
1820  quant_value = log2(1 + (value - min_value_)) / log2(1 + t_range) * tt_range_;
1821  *it_quant = TT(std::min(std::max(min_tt_range, double(quant_value + 0.5)), max_tt_range));
1822  }
1823  }
1824  }
1825 
1826  VMML_TEMPLATE_STRING
1827  template< typename TT >
1828  void
1829  VMML_TEMPLATE_CLASSNAME::quantize_to(tensor3< I1, I2, I3, TT >& quantized_,
1830  tensor3< I1, I2, I3, char >& signs_,
1831  T& min_value_, T& max_value_,
1832  const TT& tt_range_) const {
1833  double max_tt_range = double(tt_range_);
1834  double min_tt_range = 0;
1835 
1836  min_value_ = get_abs_min();
1837  max_value_ = get_abs_max();
1838  double t_range = max_value_ - min_value_;
1839 
1840  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1841  typedef typename t3_tt_type::iterator tt_iterator;
1842  tt_iterator it_quant = quantized_.begin();
1843  const_iterator it = begin(), it_end = end();
1844 
1845  typedef tensor3< I1, I2, I3, char > t3_sign_type;
1846  typedef typename t3_sign_type::iterator sign_iterator;
1847  sign_iterator it_sign = signs_.begin();
1848 
1849  for (; it != it_end; ++it, ++it_quant, ++it_sign) {
1850  T value = fabs(*it);
1851  *it_sign = ((*it) < 0.f) ? 0 : 1;
1852  if (std::numeric_limits<TT>::is_signed) {
1853  *it_quant = TT(std::min(std::max(min_tt_range, double((value * tt_range_ / t_range) + 0.5)), max_tt_range));
1854  } else {
1855  *it_quant = TT(std::min(std::max(min_tt_range, double(((value - min_value_) * tt_range_ / t_range) + 0.5)), max_tt_range));
1856  }
1857  }
1858  }
1859 
1860  VMML_TEMPLATE_STRING
1861  template< typename TT >
1862  void
1863  VMML_TEMPLATE_CLASSNAME::dequantize(tensor3< I1, I2, I3, TT >& dequantized_,
1864  const tensor3< I1, I2, I3, char >& signs_,
1865  const TT& min_value_, const TT& max_value_) const {
1866  T max_t_range = get_max();
1867  T min_t_range = get_min();
1868  long t_range = long(max_t_range) - long(min_t_range);
1869 
1870  TT tt_range = max_value_ - min_value_;
1871 
1872  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1873  typedef typename t3_tt_type::iterator tt_iterator;
1874  tt_iterator it_dequant = dequantized_.begin();
1875  const_iterator it = begin(), it_end = end();
1876 
1877  typedef tensor3< I1, I2, I3, char > t3_sign_type;
1878  typedef typename t3_sign_type::const_iterator sign_iterator;
1879  sign_iterator it_sign = signs_.begin();
1880 
1881  float sign = 0;
1882  for (; it != it_end; ++it, ++it_dequant, ++it_sign) {
1883  sign = ((*it_sign) == 0) ? -1 : 1;
1884  if (std::numeric_limits<T>::is_signed) {
1885  *it_dequant = sign * std::min(std::max(min_value_, TT((TT(*it) / t_range) * tt_range)), max_value_);
1886  } else {
1887  *it_dequant = sign * std::min(std::max(min_value_, TT((((TT(*it) / t_range)) * tt_range) + min_value_)), max_value_);
1888  }
1889  }
1890  }
1891 
1892  VMML_TEMPLATE_STRING
1893  template< typename TT >
1894  void
1895  VMML_TEMPLATE_CLASSNAME::dequantize_log(tensor3< I1, I2, I3, TT >& dequantized_,
1896  const tensor3< I1, I2, I3, char >& signs_,
1897  const TT& min_value_, const TT& max_value_) const {
1898  T max_t_range = get_max();
1899  T min_t_range = get_min();
1900  long t_range = long(max_t_range) - long(min_t_range);
1901 
1902  TT tt_range = max_value_ - min_value_;
1903 
1904  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1905  typedef typename t3_tt_type::iterator tt_iterator;
1906  tt_iterator it_dequant = dequantized_.begin();
1907  const_iterator it = begin(), it_end = end();
1908 
1909  typedef tensor3< I1, I2, I3, char > t3_sign_type;
1910  typedef typename t3_sign_type::const_iterator sign_iterator;
1911  sign_iterator it_sign = signs_.begin();
1912 
1913  float sign = 0;
1914  for (; it != it_end; ++it, ++it_dequant, ++it_sign) {
1915  TT value = TT(*it);
1916  TT dequant_value = 0;
1917  sign = ((*it_sign) == 0) ? -1 : 1;
1918  if (std::numeric_limits<T>::is_signed) {
1919  dequant_value = exp2((value / t_range) * log2(1 + tt_range)) - 1;
1920  *it_dequant = sign * (std::min(std::max(min_value_, dequant_value), max_value_));
1921  } else {
1922  dequant_value = exp2((value / t_range) * log2(1 + tt_range)) - 1;
1923  *it_dequant = sign * (std::min(std::max(min_value_, dequant_value + min_value_), max_value_));
1924  }
1925  }
1926  }
1927 
1928  VMML_TEMPLATE_STRING
1929  template< typename TT >
1930  void
1931  VMML_TEMPLATE_CLASSNAME::dequantize(tensor3< I1, I2, I3, TT >& dequantized_, const TT& min_value_, const TT& max_value_) const {
1932  T max_t_range = get_max();
1933  T min_t_range = get_min();
1934  long t_range = long(max_t_range) - long(min_t_range);
1935 
1936  TT tt_range = max_value_ - min_value_;
1937 
1938  typedef tensor3< I1, I2, I3, TT > t3_tt_type;
1939  typedef typename t3_tt_type::iterator tt_iterator;
1940  tt_iterator it_dequant = dequantized_.begin();
1941  const_iterator it = begin(), it_end = end();
1942  for (; it != it_end; ++it, ++it_dequant) {
1943  if (std::numeric_limits<T>::is_signed) {
1944  *it_dequant = std::min(std::max(min_value_, TT((TT(*it) / t_range) * tt_range)), max_value_);
1945  } else {
1946  *it_dequant = std::min(std::max(min_value_, TT((((TT(*it) / t_range)) * tt_range) + min_value_)), max_value_);
1947  }
1948  }
1949  }
1950 
1951  VMML_TEMPLATE_STRING
1952  const VMML_TEMPLATE_CLASSNAME&
1953  VMML_TEMPLATE_CLASSNAME::operator=(const VMML_TEMPLATE_CLASSNAME& source_) {
1954  memcpy(_array, source_._array, I1 * I2 * I3 * sizeof ( T));
1955 
1956  return *this;
1957  }
1958 
1959 
1960 
1961 #if 0
1962 
1963  std::string format_path(const std::string& dir_, const std::string& filename_, const std::string& format_) {
1964  std::string path = dir_;
1965  int dir_length = dir_.size() - 1;
1966  int last_separator = dir_.find_last_of("/");
1967  std::string path = dir_;
1968  if (last_separator < dir_length) {
1969  path.append("/");
1970  }
1971  path.append(filename_);
1972  //check for format
1973  if (filename_.find(format_, filename_.size() - 3) == (-1)) {
1974  path.append(".");
1975  path.append(format_);
1976  }
1977  return path;
1978  }
1979 
1980 #endif
1981 
1982  VMML_TEMPLATE_STRING
1984  VMML_TEMPLATE_CLASSNAME::
1985  _get_slice(size_t index_) {
1986  typedef matrix< I1, I2, T > matrix_type;
1987  return *reinterpret_cast<matrix_type*> (_array + I1 * I2 * index_);
1988  }
1989 
1990  VMML_TEMPLATE_STRING
1992  VMML_TEMPLATE_CLASSNAME::
1993  _get_slice(size_t index_) const {
1994  typedef matrix< I1, I2, T > matrix_type;
1995  return *reinterpret_cast<const matrix_type*> (_array + I1 * I2 * index_);
1996  }
1997 
1998  VMML_TEMPLATE_STRING
1999  void
2000  VMML_TEMPLATE_CLASSNAME::
2001  tensor3_allocate_data(T*& array_) {
2002  array_ = new T[ I1 * I2 * I3];
2003  }
2004 
2005  VMML_TEMPLATE_STRING
2006  void
2007  VMML_TEMPLATE_CLASSNAME::
2008  tensor3_deallocate_data(T*& array_) {
2009  if (array_) {
2010  delete[] array_;
2011  }
2012  }
2013 
2014  VMML_TEMPLATE_STRING
2015  T*
2016  VMML_TEMPLATE_CLASSNAME::get_array_ptr() {
2017  return _array;
2018  }
2019 
2020  VMML_TEMPLATE_STRING
2021  const T*
2022  VMML_TEMPLATE_CLASSNAME::get_array_ptr() const {
2023  return _array;
2024  }
2025 
2026  VMML_TEMPLATE_STRING
2027  template< size_t R >
2028  void
2029  VMML_TEMPLATE_CLASSNAME::
2030  reconstruct_CP(
2031  const vmml::vector< R, T>& lambda,
2033  const vmml::matrix< R, I2, T >& V,
2034  const vmml::matrix< R, I3, T >& W,
2036  ) {
2037  for (size_t j = 0; j < I2; j++) {
2038  for (size_t k = 0; k < I3; k++) {
2039  for (size_t r = 0; r < R; r++) {
2040  temp(r, j + k * I2) = V(r, j) * W(r, k);
2041  }
2042  }
2043  }
2044 
2045  for (size_t i = 0; i < I1; i++) {
2046  for (size_t r = 0; r < R; r++) {
2047  U(r, i) = lambda[r] * U(r, i);
2048  }
2049  }
2050 
2051  vector< R, T > ui;
2052  vector< R, T > tmpi;
2053  blas_dot< R, T > bdot;
2054  for (size_t k = 0; k < I3; k++) {
2055  for (size_t j = 0; j < I2; j++) {
2056  for (size_t i = 0; i < I1; i++) {
2057  T& value = at(i, j, k);
2058  value = static_cast<T> (0.0);
2059 
2060 #if 0
2061  ui = U.get_column(i);
2062  tmpi = temp.get_column(j + k * I2);
2063  bdot.compute(ui, tmpi, value);
2064 
2065 #else
2066  for (size_t r = 0; r < R; ++r)
2067  value += U(r, i) * temp(r, j + k * I2);
2068 
2069 #endif
2070  }
2071  }
2072  }
2073  }
2074 
2075  VMML_TEMPLATE_STRING
2076  template< typename float_t>
2077  void
2078  VMML_TEMPLATE_CLASSNAME::
2079  apply_spherical_weights(tensor3< I1, I2, I3, float_t >& other) {
2080  //piecewise multiplication of every frontal slice with the weights (spherical)
2081  for (size_t i3 = 0; i3 < I3; ++i3) {
2082  size_t k3 = i3 - I3 / 2;
2083  for (size_t i1 = 0; i1 < I1; ++i1) {
2084  size_t k1 = i1 - I1 / 2;
2085  for (size_t i2 = 0; i2 < I2; ++i2) {
2086  size_t k2 = i2 - I2 / 2;
2087  float_t weight = (sqrtf(k1 * k1 + k2 * k2 + k3 * k3) + 0.0000001);
2088  weight = exp(-weight); //or try exp(- weight * factor)
2089  float_t value = static_cast<float_t> (at(i1, i2, i3));
2090  //value = (value > 35) ? (value - 35) : 0;
2091  other.at(i1, i2, i3) = static_cast<float_t> (weight * value);
2092  }
2093  }
2094  }
2095  }
2096 
2097  VMML_TEMPLATE_STRING
2098  void
2099  VMML_TEMPLATE_CLASSNAME::
2100  get_sphere() {
2101  for (size_t i3 = 0; i3 < I3; ++i3) {
2102  size_t k3 = i3 - I3 / 2;
2103  for (size_t i1 = 0; i1 < I1; ++i1) {
2104  size_t k1 = i1 - I1 / 2;
2105  for (size_t i2 = 0; i2 < I2; ++i2) {
2106  size_t k2 = i2 - I2 / 2;
2107  float_t radius = sqrtf(k1 * k1 + k2 * k2 + k3 * k3);
2108  //FIXME(choose appropriate I
2109  if (radius >= (I1 / 2))
2110  at(i1, i2, i3) = 0;
2111  }
2112  }
2113  }
2114  }
2115 
2116  VMML_TEMPLATE_STRING
2117  template< size_t R, typename TT >
2118  double
2119  VMML_TEMPLATE_CLASSNAME::tensor_inner_product(
2120  const vmml::vector< R, TT>& lambda,
2121  const vmml::matrix< I1, R, TT >& U,
2122  const vmml::matrix< I2, R, TT >& V,
2123  const vmml::matrix< I3, R, TT >& W) const {
2124  T inner_prod(0);
2125  for (size_t r = 0; r < R; ++r) {
2126  for (size_t k = 0; k < I3; ++k) {
2127  for (size_t j = 0; j < I2; ++j) {
2128  for (size_t i = 0; i < I1; ++i) {
2129  inner_prod += at(i, j, k) * U(i, r) * V(j, r) * W(k, r) * lambda.at(r);
2130  }
2131  }
2132  }
2133  }
2134  return inner_prod;
2135  }
2136 
2137  VMML_TEMPLATE_STRING
2138  template< size_t K1, size_t K2, size_t K3 >
2139  void
2140  VMML_TEMPLATE_CLASSNAME::average_8to1(tensor3< K1, K2, K3, T >& other) const {
2141  assert(I1 / 2 >= K1);
2142  assert(I2 / 2 >= K2);
2143  assert(I3 / 2 >= K3);
2144 
2145  typedef matrix< K1, K2, T > other_slice_type;
2146  typedef matrix< K1, K2, float > other_slice_float_type;
2147  typedef matrix< K1, I2, T> sub_row_slice_type;
2148 
2149  front_slice_type* slice0 = new front_slice_type;
2150  front_slice_type* slice1 = new front_slice_type;
2151  sub_row_slice_type* sub_row_slice = new sub_row_slice_type;
2152  other_slice_type* slice_other = new other_slice_type;
2153  other_slice_float_type* slice_float_other = new other_slice_float_type;
2154 
2155  other.zero();
2156 
2157  for (size_t i3 = 0, k3 = 0; i3 < I3; ++i3, ++k3) {
2158  get_frontal_slice_fwd(i3++, *slice0);
2159  if (i3 < I3) {
2160  get_frontal_slice_fwd(i3, *slice1);
2161 
2162  *slice0 += *slice1;
2163  slice0->sum_rows(*sub_row_slice);
2164  sub_row_slice->sum_columns(*slice_other);
2165 
2166  *slice_float_other = *slice_other;
2167  *slice_float_other /= 8.0;
2168  *slice_float_other += 0.5;
2169 
2170  slice_other->cast_from(*slice_float_other);
2171 
2172  other.set_frontal_slice_fwd(k3, *slice_other);
2173  }
2174  }
2175 
2176  delete slice0;
2177  delete slice1;
2178  delete slice_other;
2179  delete sub_row_slice;
2180  }
2181 
2182  VMML_TEMPLATE_STRING
2183  size_t
2184  VMML_TEMPLATE_CLASSNAME::get_array_size_in_bytes() {
2185  return (sizeof (T) * SIZE);
2186  }
2187 
2188 
2189 
2190  // WARNING: dangerous. Use before destruction if you want to prevent
2191  // a delete call for the assigned T* _array in the destructor.
2192 
2193  VMML_TEMPLATE_STRING
2194  void
2195  VMML_TEMPLATE_CLASSNAME::clear_array_pointer() {
2196  _array = 0;
2197  }
2198 
2199 
2200 
2201 #undef VMML_TEMPLATE_STRING
2202 #undef VMML_TEMPLATE_CLASSNAME
2203 
2204 } // namespace vmml
2205 
2206 #endif
2207