vmmlib  1.7.0
 All Classes Namespaces Functions Pages
tucker3_exporter.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Susanne Suter
34  *
35  * Export tool for Tucker3 tensor and quantized Tucker3 tensor
36  *
37  */
38 
39 
40 #ifndef __VMML__TUCK3_EXPORTER__HPP__
41 #define __VMML__TUCK3_EXPORTER__HPP__
42 
43 #include <vmmlib/qtucker3_tensor.hpp>
44 
45 
46 /* FIXME:
47  *
48  * - T_internal
49  * - const input argument for tucker3 data
50  */
51 
52 namespace vmml
53 {
54 
55  template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value = float, typename T_coeff = float >
57  {
58  public:
59 
60  typedef float T_internal; //FIXME! should match with tucker3 tensor
61 
64 
68 
70  typedef typename u1_type::iterator u1_iterator;
71  typedef typename u1_type::const_iterator u1_const_iterator;
72 
74  typedef typename u2_type::iterator u2_iterator;
75  typedef typename u2_type::const_iterator u2_const_iterator;
76 
78  typedef typename u3_type::iterator u3_iterator;
79  typedef typename u3_type::const_iterator u3_const_iterator;
80 
82 
83  template< typename T >
84  static void export_to( std::vector< T >& data_, tucker3_type& tuck3_data_ );
85 
86  //previous version, but works only with 16bit quantization
87  static void export_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ );
88 
89  //use this version, works with a better quantization for the core tensor:
90  //logarithmic quantization and separate high energy core vale
91  //suitable for voxelwise reconstruction
92  static void export_hot_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ );
93 
94  //use this version for the ttm export/import (core: backward cyclic), without plain hot value
95  static void export_ttm_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ );
96 
97 
98  }; //end tucker3 exporter class
99 
100 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff >
101 #define VMML_TEMPLATE_CLASSNAME tucker3_exporter< R1, R2, R3, I1, I2, I3, T_value, T_coeff >
102 
103 
104 VMML_TEMPLATE_STRING
105 template< typename T >
106 void
107 VMML_TEMPLATE_CLASSNAME::export_to( std::vector< T >& data_, tucker3_type& tuck3_data_ )
108 {
109 
110  data_.clear();
111 
112  u1_type* u1 = new u1_type;
113  u2_type* u2 = new u2_type;
114  u3_type* u3 = new u3_type;
115  t3_core_type core;
116 
117  tuck3_data_.get_u1( *u1 );
118  tuck3_data_.get_u2( *u2 );
119  tuck3_data_.get_u3( *u3 );
120  tuck3_data_.get_core( core );
121 
122  tuck3_data_.cast_members();
123 
124  u1_const_iterator it = u1->begin(),
125  it_end = u1->end();
126  for( ; it != it_end; ++it )
127  {
128  data_.push_back( static_cast< T >( *it) );
129  }
130 
131  u2_const_iterator u2_it = u2->begin(),
132  u2_it_end = u2->end();
133  for( ; u2_it != u2_it_end; ++u2_it )
134  {
135  data_.push_back(static_cast< T >(*u2_it) );
136  }
137 
138  u3_const_iterator u3_it = u3->begin(),
139  u3_it_end = u3->end();
140  for( ; u3_it != u3_it_end; ++u3_it )
141  {
142  data_.push_back(static_cast< T >( *u3_it) );
143  }
144 
145  t3_core_iterator it_core = core.begin(),
146  it_core_end = core.end();
147  for( ; it_core != it_core_end; ++it_core )
148  {
149  data_.push_back(static_cast< T >( *it_core) );
150  }
151 
152  delete u1;
153  delete u2;
154  delete u3;
155 }
156 
157 
158 
159 
160 VMML_TEMPLATE_STRING
161 void
162 VMML_TEMPLATE_CLASSNAME::export_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
163 {
164  //quantize tucker3 components (u1-u3 and core)
165  size_t len_t_comp = sizeof( T_internal );
166  size_t len_export_data = tuck3_data_.SIZE * sizeof(T_coeff) + 8 * len_t_comp;
167  char * data = new char[ len_export_data ];
168  size_t end_data = 0;
169 
170  //quantize basis matrices and copy min-max values
171  T_internal u_min, u_max;
172  tuck3_data_.quantize_basis_matrices( u_min, u_max);
173  memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
174  memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
175 
176  u1_type* u1 = new u1_type;
177  u2_type* u2 = new u2_type;
178  u3_type* u3 = new u3_type;
179  t3_core_type core;
180 
181  tuck3_data_.get_u1( *u1 );
182  tuck3_data_.get_u2( *u2 );
183  tuck3_data_.get_u3( *u3 );
184  tuck3_data_.get_core( core );
185 
186  //quantize core and copy min-max values
187  T_internal core_min, core_max;
188  tuck3_data_.quantize_core( core_min, core_max );
189  memcpy( data + end_data, &core_min, len_t_comp ); end_data += len_t_comp;
190  memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
191 
192  //copy data for u1
193  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
194  memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
195 
196  //copy data for u2
197  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
198  memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
199 
200  //copy data for u3
201  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
202  memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
203 
204  //copy data for core
205  size_t len_core_slice = R1 * R2 * sizeof( T_coeff );
206  for (size_t r3 = 0; r3 < R3; ++r3 ) {
207  memcpy( data + end_data, core.get_frontal_slice_fwd( r3 ), len_core_slice );
208  end_data += len_core_slice;
209  }
210 
211  data_out_.clear();
212  for( size_t byte = 0; byte < len_export_data; ++byte )
213  {
214  data_out_.push_back( data[byte] );
215  }
216  delete[] data;
217 
218  delete u1;
219  delete u2;
220  delete u3;
221 }
222 
223 
224 
225 
226 VMML_TEMPLATE_STRING
227 void
228 VMML_TEMPLATE_CLASSNAME::export_hot_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
229 {
230  tuck3_data_.enable_quantify_hot();
231  //quantize tucker3 components (u1-u3 and core)
232  size_t len_t_comp = sizeof( T_internal );
233  size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) * sizeof(T_coeff) + 4 * len_t_comp;
234  char * data = new char[ len_export_data ];
235  size_t end_data = 0;
236 
237  //quantize basis matrices and copy min-max values
238  T_internal u_min, u_max;
239  tuck3_data_.quantize_basis_matrices( u_min, u_max);
240  memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
241  memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
242 
243  //quantize core and copy min-max values
244  T_internal core_min, core_max;
245  tuck3_data_.quantize_core( core_min, core_max );
246  //memcpy( data + end_data, &core_min, len_t_comp ); end_data += len_t_comp; min_value is always zero in log quant
247  memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
248 
249  u1_type* u1 = new u1_type;
250  u2_type* u2 = new u2_type;
251  u3_type* u3 = new u3_type;
252  t3_core_type core;
253  t3_core_signs_type signs;
254 
255  tuck3_data_.get_u1( *u1 );
256  tuck3_data_.get_u2( *u2 );
257  tuck3_data_.get_u3( *u3 );
258  tuck3_data_.get_core( core );
259  tuck3_data_.get_core_signs( signs );
260  T_internal hottest_value = tuck3_data_.get_hottest_value();
261 
262  //copy first value of core tensor separately as a float
263  memcpy( data + end_data, &hottest_value, len_t_comp ); end_data += len_t_comp;
264 
265  //copy data for u1
266  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
267  memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
268 
269  //copy data for u2
270  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
271  memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
272 
273  //copy data for u3
274  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
275  memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
276 
277  //copy data for core
278  size_t len_core_el = 1; //currently 1 bit for sign and 7 bit for values
279 
280  //colume-first iteration
281  unsigned char core_el;
282  for (size_t r3 = 0; r3 < R3; ++r3 ) {
283  for (size_t r2 = 0; r2 < R2; ++r2 ) {
284  for (size_t r1 = 0; r1 < R1; ++r1 ) {
285  core_el = (core.at( r1, r2, r3 ) | (signs.at( r1, r2, r3) * 0x80 ));
286  /*std::cout << "value: " << int(_core.at( r1, r2, r3 )) << " bit " << int( core_el )
287  << " sign: " << int(_signs.at( r1, r2, r3)) << std::endl;*/
288  memcpy( data + end_data, &core_el, len_core_el );
289  ++end_data;
290  }
291  }
292  }
293 
294  data_out_.clear();
295  for( size_t byte = 0; byte < len_export_data; ++byte )
296  {
297  data_out_.push_back( data[byte] );
298  }
299 
300  delete[] data;
301  delete u1;
302  delete u2;
303  delete u3;
304 }
305 
306 
307 
308 
309 
310 
311 VMML_TEMPLATE_STRING
312 void
313 VMML_TEMPLATE_CLASSNAME::export_ttm_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
314 {
315  tuck3_data_.enable_quantify_log();
316  //quantize tucker3 components (u1-u3 and core)
317  size_t len_t_comp = sizeof( T_internal );
318  size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) * sizeof(T_coeff) + 3 *len_t_comp;
319  char * data = new char[ len_export_data ];
320  size_t end_data = 0;
321 
322  //quantize basis matrices and copy min-max values
323  T_internal u_min, u_max;
324  tuck3_data_.quantize_basis_matrices( u_min, u_max);
325  memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
326  memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
327 
328  //quantize core and copy min-max values
329  T_internal core_min, core_max;
330  tuck3_data_.quantize_core( core_min, core_max );
331  //memcpy( data + end_data, &core_min, len_t_comp ); end_data += len_t_comp; min_value is always zero in log quant
332  memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
333 
334  u1_type* u1 = new u1_type;
335  u2_type* u2 = new u2_type;
336  u3_type* u3 = new u3_type;
337  t3_core_type core;
338  t3_core_signs_type signs;
339 
340  tuck3_data_.get_u1( *u1 );
341  tuck3_data_.get_u2( *u2 );
342  tuck3_data_.get_u3( *u3 );
343  tuck3_data_.get_core( core );
344  tuck3_data_.get_core_signs( signs );
345 
346  //copy data for u1
347  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
348  memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
349 
350  //copy data for u2
351  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
352  memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
353 
354  //copy data for u3
355  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
356  memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
357 
358  //copy data for core
359  size_t len_core_el = 1; //currently 1 bit for sign and 7 bit for values
360 
361  //colume-first iteration
362  //backward cylcling after lathauwer et al.
363  unsigned char core_el;
364  for (size_t r2 = 0; r2 < R2; ++r2 ) {
365  for (size_t r3 = 0; r3 < R3; ++r3 ) {
366  for (size_t r1 = 0; r1 < R1; ++r1 ) {
367  core_el = (core.at( r1, r2, r3 ) | (signs.at( r1, r2, r3) * 0x80 ));
368  /*std::cout << "value: " << int(_core.at( r1, r2, r3 )) << " bit " << int( core_el )
369  << " sign: " << int(_signs.at( r1, r2, r3)) << std::endl;*/
370  memcpy( data + end_data, &core_el, len_core_el );
371  ++end_data;
372  }
373  }
374  }
375 
376  data_out_.clear();
377  for( size_t byte = 0; byte < len_export_data; ++byte )
378  {
379  data_out_.push_back( data[byte] );
380  }
381 
382  delete[] data;
383  delete u1;
384  delete u2;
385  delete u3;
386 }
387 
388 #undef VMML_TEMPLATE_STRING
389 #undef VMML_TEMPLATE_CLASSNAME
390 
391 
392 } // namespace vmml
393 
394 
395 #endif
396