vmmlib  1.7.0
 All Classes Namespaces Functions Pages
tucker3_importer.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Susanne Suter
34  *
35  * Import tool for Tucker3 tensor and quantized Tucker3 tensor
36  *
37  */
38 
39 
40 #ifndef __VMML__TUCK3_IMPORTER__HPP__
41 #define __VMML__TUCK3_IMPORTER__HPP__
42 
43 #include <vmmlib/qtucker3_tensor.hpp>
44 
45 /* FIXME:
46  *
47  * - T_internal
48  */
49 
50 
51 namespace vmml
52 {
53 
54  template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value = float, typename T_coeff = float >
56  {
57  public:
58 
59  typedef float T_internal; //FIXME! should match with tucker3 tensor
60 
63 
67 
69  typedef typename u1_type::iterator u1_iterator;
70  typedef typename u1_type::const_iterator u1_const_iterator;
71 
73  typedef typename u2_type::iterator u2_iterator;
74  typedef typename u2_type::const_iterator u2_const_iterator;
75 
77  typedef typename u3_type::iterator u3_iterator;
78  typedef typename u3_type::const_iterator u3_const_iterator;
79 
80  typedef matrix< R1, R2, T_coeff > front_core_slice_type; //fwd: forward cylcling (after kiers et al., 2000)
81 
83 
84  template< typename T >
85  static void import_from( const std::vector< T >& data_, tucker3_type& tuck3_data_ );
86 
87  //previous version, but works only with 16bit quantization
88  static void import_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ );
89 
90  //use this version, works with a better quantization for the core tensor:
91  //logarithmic quantization and separate high energy core vale
92  //suitable for voxelwise reconstruction
93  static void import_hot_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ );
94 
95  //use this version for the ttm export/import (core: backward cyclic), without plain hot value
96  static void import_ttm_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ );
97 
98 
99  }; //end tucker3 importer class
100 
101 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff >
102 #define VMML_TEMPLATE_CLASSNAME tucker3_importer< R1, R2, R3, I1, I2, I3, T_value, T_coeff >
103 
104 
105 VMML_TEMPLATE_STRING
106 template< typename T >
107 void
108 VMML_TEMPLATE_CLASSNAME::import_from( const std::vector< T >& data_, tucker3_type& tuck3_data_ )
109 {
110  size_t i = 0; //iterator over data_
111  size_t data_size = (size_t) data_.size();
112 
113  if ( data_size != tuck3_data_.SIZE )
114  VMMLIB_ERROR( "import_from: the input data must have the size R1xR2xR3 + R1xI1 + R2xI2 + R3xI3 ", VMMLIB_HERE );
115 
116  u1_type* u1 = new u1_type;
117  u2_type* u2 = new u2_type;
118  u3_type* u3 = new u3_type;
119  t3_core_type core;
120 
121  tuck3_data_.get_u1( *u1 );
122  tuck3_data_.get_u2( *u2 );
123  tuck3_data_.get_u3( *u3 );
124  tuck3_data_.get_core( core );
125 
126  u1_iterator it = u1->begin(),
127  it_end = u1->end();
128  for( ; it != it_end; ++it, ++i )
129  {
130  *it = static_cast< T >( data_.at(i));
131  }
132 
133 
134  u2_iterator u2_it = u2->begin(),
135  u2_it_end = u2->end();
136  for( ; u2_it != u2_it_end; ++u2_it, ++i )
137  {
138  *u2_it = static_cast< T >( data_.at(i));
139  }
140 
141  u3_iterator u3_it = u3->begin(),
142  u3_it_end = u3->end();
143  for( ; u3_it != u3_it_end; ++u3_it, ++i )
144  {
145  *u3_it = static_cast< T >( data_.at(i));
146  }
147 
148  t3_core_iterator it_core = core.begin(),
149  it_core_end = core.end();
150  for( ; it_core != it_core_end; ++it_core, ++i )
151  {
152  *it_core = static_cast< T >( data_.at(i));
153  }
154 
155  tuck3_data_.set_u1( *u1 );
156  tuck3_data_.set_u2( *u2 );
157  tuck3_data_.set_u3( *u3 );
158  tuck3_data_.set_core( core );
159 
160  tuck3_data_.cast_comp_members();
161 
162  delete u1;
163  delete u2;
164  delete u3;
165 }
166 
167 
168 VMML_TEMPLATE_STRING
169 void
170 VMML_TEMPLATE_CLASSNAME::import_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ )
171 {
172  size_t end_data = 0;
173  size_t len_t_comp = sizeof( T_internal );
174  size_t len_export_data = tuck3_data_.SIZE * sizeof(T_coeff) + 8 * len_t_comp;
175  unsigned char * data = new unsigned char[ len_export_data ];
176  for( size_t byte = 0; byte < len_export_data; ++byte )
177  {
178  data[byte] = data_in_.at(byte);
179  }
180 
181  //copy min and max values: u1_min, u1_max, u2_min, u2_max, u3_min, u3_max, core_min, core_max
182  T_internal u_min = 0; T_internal u_max = 0;
183  memcpy( &u_min, data, len_t_comp ); end_data = len_t_comp;
184  memcpy( &u_max, data + end_data, len_t_comp ); end_data += len_t_comp;
185 
186  T_internal core_min = 0; T_internal core_max = 0;
187  memcpy( &core_min, data + end_data, len_t_comp ); end_data += len_t_comp;
188  memcpy( &core_max, data + end_data, len_t_comp ); end_data += len_t_comp;
189 
190  u1_type* u1 = new u1_type;
191  u2_type* u2 = new u2_type;
192  u3_type* u3 = new u3_type;
193  t3_core_type core;
194 
195  tuck3_data_.get_u1( *u1 );
196  tuck3_data_.get_u2( *u2 );
197  tuck3_data_.get_u3( *u3 );
198  tuck3_data_.get_core( core );
199 
200  //copy data to u1
201  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
202  memcpy( *u1, data + end_data, len_u1 ); end_data += len_u1;
203 
204  //copy data to u2
205  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
206  memcpy( *u2, data + end_data, len_u2 ); end_data += len_u2;
207 
208  //copy data to u3
209  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
210  memcpy( *u3, data + end_data, len_u3 ); end_data += len_u3;
211 
212  //copy data to core
213  size_t len_core_slice = R1 * R2 * sizeof( T_coeff );
214  front_core_slice_type* slice = new front_core_slice_type();
215  for (size_t r3 = 0; r3 < R3; ++r3 ) {
216  memcpy( slice, data + end_data, len_core_slice );
217  core.set_frontal_slice_fwd( r3, *slice );
218  end_data += len_core_slice;
219  }
220 
221  tuck3_data_.set_u1( *u1 );
222  tuck3_data_.set_u2( *u2 );
223  tuck3_data_.set_u3( *u3 );
224  tuck3_data_.set_core( core );
225 
226  //dequantize tucker3 components (u1-u3 and core)
227  tuck3_data_.dequantize_basis_matrices( u_min, u_max, u_min, u_max, u_min, u_max );
228  tuck3_data_.dequantize_core( core_min, core_max );
229 
230  delete slice;
231  delete[] data;
232  delete u1;
233  delete u2;
234  delete u3;
235 }
236 
237 
238 
239 VMML_TEMPLATE_STRING
240 void
241 VMML_TEMPLATE_CLASSNAME::import_hot_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ )
242 {
243  tuck3_data_.enable_quantify_hot();
244  size_t end_data = 0;
245  size_t len_t_comp = sizeof( T_internal );
246  size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) * sizeof(T_coeff) + 4 * len_t_comp;
247  unsigned char * data = new unsigned char[ len_export_data ];
248  for( size_t byte = 0; byte < len_export_data; ++byte )
249  {
250  data[byte] = data_in_.at(byte);
251  }
252 
253  //copy min and max values: u1_min, u1_max, u2_min, u2_max, u3_min, u3_max, core_min, core_max
254  T_internal u_min = 0; T_internal u_max = 0;
255  memcpy( &u_min, data, len_t_comp ); end_data = len_t_comp;
256  memcpy( &u_max, data + end_data, len_t_comp ); end_data += len_t_comp;
257 
258  T_internal core_min = 0; T_internal core_max = 0; //core_min is 0
259  //memcpy( &core_min, data + end_data, len_t_comp ); end_data += len_t_comp;
260  memcpy( &core_max, data + end_data, len_t_comp ); end_data += len_t_comp;
261  //copy first value of core tensor separately as a float
262  T_internal hottest_value = 0;
263  memcpy( &hottest_value, data + end_data, len_t_comp ); end_data += len_t_comp;
264  tuck3_data_.set_hottest_value( hottest_value );
265 
266  u1_type* u1 = new u1_type;
267  u2_type* u2 = new u2_type;
268  u3_type* u3 = new u3_type;
269  t3_core_type core;
270  t3_core_signs_type signs;
271 
272  tuck3_data_.get_u1( *u1 );
273  tuck3_data_.get_u2( *u2 );
274  tuck3_data_.get_u3( *u3 );
275  tuck3_data_.get_core( core );
276  tuck3_data_.get_core_signs( signs );
277 
278  //copy data to u1
279  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
280  memcpy( *u1, data + end_data, len_u1 ); end_data += len_u1;
281 
282  //copy data to u2
283  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
284  memcpy( *u2, data + end_data, len_u2 ); end_data += len_u2;
285 
286  //copy data to u3
287  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
288  memcpy( *u3, data + end_data, len_u3 ); end_data += len_u3;
289 
290  //copy data to core
291  size_t len_core_el = 1; //currently 1 bit for sign and 7 bit for values
292 
293  unsigned char core_el;
294  for (size_t r3 = 0; r3 < R3; ++r3 ) {
295  for (size_t r2 = 0; r2 < R2; ++r2 ) {
296  for (size_t r1 = 0; r1 < R1; ++r1 ) {
297  memcpy( &core_el, data + end_data, len_core_el );
298  signs.at( r1, r2, r3 ) = (core_el & 0x80)/128;
299  core.at( r1, r2, r3 ) = core_el & 0x7f ;
300  ++end_data;
301  }
302  }
303  }
304 
305  tuck3_data_.set_u1( *u1 );
306  tuck3_data_.set_u2( *u2 );
307  tuck3_data_.set_u3( *u3 );
308  tuck3_data_.set_core( core );
309  tuck3_data_.set_core_signs( signs );
310 
311  //dequantize tucker3 components (u1-u3 and core)
312  tuck3_data_.dequantize_basis_matrices( u_min, u_max, u_min, u_max, u_min, u_max );
313  tuck3_data_.dequantize_core( core_min, core_max );
314 
315  delete[] data;
316  delete u1;
317  delete u2;
318  delete u3;
319 }
320 
321 
322 VMML_TEMPLATE_STRING
323 void
324 VMML_TEMPLATE_CLASSNAME::import_ttm_quantized_from( const std::vector<unsigned char>& data_in_, qtucker3_type& tuck3_data_ )
325 {
326  tuck3_data_.enable_quantify_log();
327  size_t end_data = 0;
328  size_t len_t_comp = sizeof( T_internal );
329  size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) * sizeof(T_coeff) + 3 * len_t_comp;
330  unsigned char * data = new unsigned char[ len_export_data ];
331  for( size_t byte = 0; byte < len_export_data; ++byte )
332  {
333  data[byte] = data_in_.at(byte);
334  }
335 
336  //copy min and max values: u1_min, u1_max, u2_min, u2_max, u3_min, u3_max, core_min, core_max
337  T_internal u_min = 0; T_internal u_max = 0;
338  memcpy( &u_min, data, len_t_comp ); end_data = len_t_comp;
339  memcpy( &u_max, data + end_data, len_t_comp ); end_data += len_t_comp;
340 
341  T_internal core_min = 0; T_internal core_max = 0; //core_min is 0
342  //memcpy( &core_min, data + end_data, len_t_comp ); end_data += len_t_comp;
343  memcpy( &core_max, data + end_data, len_t_comp ); end_data += len_t_comp;
344 
345  u1_type* u1 = new u1_type;
346  u2_type* u2 = new u2_type;
347  u3_type* u3 = new u3_type;
348  t3_core_type core;
349  t3_core_signs_type signs;
350 
351  tuck3_data_.get_u1( *u1 );
352  tuck3_data_.get_u2( *u2 );
353  tuck3_data_.get_u3( *u3 );
354  tuck3_data_.get_core( core );
355  tuck3_data_.get_core_signs( signs );
356 
357  //copy data to u1
358  size_t len_u1 = I1 * R1 * sizeof( T_coeff );
359  memcpy( *u1, data + end_data, len_u1 ); end_data += len_u1;
360 
361  //copy data to u2
362  size_t len_u2 = I2 * R2 * sizeof( T_coeff );
363  memcpy( *u2, data + end_data, len_u2 ); end_data += len_u2;
364 
365  //copy data to u3
366  size_t len_u3 = I3 * R3 * sizeof( T_coeff );
367  memcpy( *u3, data + end_data, len_u3 ); end_data += len_u3;
368 
369  //copy data to core
370  size_t len_core_el = 1; //currently 1 bit for sign and 7 bit for values
371 
372  //backward cyclic after lathauwer et al.
373  unsigned char core_el;
374  for (size_t r2 = 0; r2 < R2; ++r2 ) {
375  for (size_t r3 = 0; r3 < R3; ++r3 ) {
376  for (size_t r1 = 0; r1 < R1; ++r1 ) {
377  memcpy( &core_el, data + end_data, len_core_el );
378  signs.at( r1, r2, r3 ) = (core_el & 0x80)/128;
379  core.at( r1, r2, r3 ) = core_el & 0x7f ;
380  ++end_data;
381  }
382  }
383  }
384  //std::cout << "signs: " << _signs << std::endl;
385  //std::cout << "_core: " << _core << std::endl;
386 
387  delete[] data;
388 
389  tuck3_data_.set_u1( *u1 );
390  tuck3_data_.set_u2( *u2 );
391  tuck3_data_.set_u3( *u3 );
392  tuck3_data_.set_core( core );
393  tuck3_data_.set_core_signs( signs );
394 
395  //dequantize tucker3 components (u1-u3 and core)
396  tuck3_data_.dequantize_basis_matrices( u_min, u_max, u_min, u_max, u_min, u_max );
397  tuck3_data_.dequantize_core( core_min, core_max );
398 
399  delete u1;
400  delete u2;
401  delete u3;
402 }
403 
404 #undef VMML_TEMPLATE_STRING
405 #undef VMML_TEMPLATE_CLASSNAME
406 
407 
408 } // namespace vmml
409 
410 
411 
412 #endif
413