40 #ifndef __VMML__TUCK3_EXPORTER__HPP__
41 #define __VMML__TUCK3_EXPORTER__HPP__
43 #include <vmmlib/qtucker3_tensor.hpp>
55 template<
size_t R1,
size_t R2,
size_t R3,
size_t I1,
size_t I2,
size_t I3,
typename T_value =
float,
typename T_coeff =
float >
60 typedef float T_internal;
70 typedef typename u1_type::iterator u1_iterator;
71 typedef typename u1_type::const_iterator u1_const_iterator;
74 typedef typename u2_type::iterator u2_iterator;
75 typedef typename u2_type::const_iterator u2_const_iterator;
78 typedef typename u3_type::iterator u3_iterator;
79 typedef typename u3_type::const_iterator u3_const_iterator;
83 template<
typename T >
84 static void export_to( std::vector< T >& data_,
tucker3_type& tuck3_data_ );
87 static void export_quantized_to( std::vector<unsigned char>& data_out_,
qtucker3_type& tuck3_data_ );
92 static void export_hot_quantized_to( std::vector<unsigned char>& data_out_,
qtucker3_type& tuck3_data_ );
95 static void export_ttm_quantized_to( std::vector<unsigned char>& data_out_,
qtucker3_type& tuck3_data_ );
100 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff >
101 #define VMML_TEMPLATE_CLASSNAME tucker3_exporter< R1, R2, R3, I1, I2, I3, T_value, T_coeff >
105 template<
typename T >
107 VMML_TEMPLATE_CLASSNAME::export_to( std::vector< T >& data_, tucker3_type& tuck3_data_ )
112 u1_type* u1 =
new u1_type;
113 u2_type* u2 =
new u2_type;
114 u3_type* u3 =
new u3_type;
117 tuck3_data_.get_u1( *u1 );
118 tuck3_data_.get_u2( *u2 );
119 tuck3_data_.get_u3( *u3 );
120 tuck3_data_.get_core( core );
122 tuck3_data_.cast_members();
124 u1_const_iterator it = u1->begin(),
126 for( ; it != it_end; ++it )
128 data_.push_back( static_cast< T >( *it) );
131 u2_const_iterator u2_it = u2->begin(),
132 u2_it_end = u2->end();
133 for( ; u2_it != u2_it_end; ++u2_it )
135 data_.push_back(static_cast< T >(*u2_it) );
138 u3_const_iterator u3_it = u3->begin(),
139 u3_it_end = u3->end();
140 for( ; u3_it != u3_it_end; ++u3_it )
142 data_.push_back(static_cast< T >( *u3_it) );
145 t3_core_iterator it_core = core.begin(),
146 it_core_end = core.end();
147 for( ; it_core != it_core_end; ++it_core )
149 data_.push_back(static_cast< T >( *it_core) );
162 VMML_TEMPLATE_CLASSNAME::export_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
165 size_t len_t_comp =
sizeof( T_internal );
166 size_t len_export_data = tuck3_data_.SIZE *
sizeof(T_coeff) + 8 * len_t_comp;
167 char * data =
new char[ len_export_data ];
171 T_internal u_min, u_max;
172 tuck3_data_.quantize_basis_matrices( u_min, u_max);
173 memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
174 memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
176 u1_type* u1 =
new u1_type;
177 u2_type* u2 =
new u2_type;
178 u3_type* u3 =
new u3_type;
181 tuck3_data_.get_u1( *u1 );
182 tuck3_data_.get_u2( *u2 );
183 tuck3_data_.get_u3( *u3 );
184 tuck3_data_.get_core( core );
187 T_internal core_min, core_max;
188 tuck3_data_.quantize_core( core_min, core_max );
189 memcpy( data + end_data, &core_min, len_t_comp ); end_data += len_t_comp;
190 memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
193 size_t len_u1 = I1 * R1 *
sizeof( T_coeff );
194 memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
197 size_t len_u2 = I2 * R2 *
sizeof( T_coeff );
198 memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
201 size_t len_u3 = I3 * R3 *
sizeof( T_coeff );
202 memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
205 size_t len_core_slice = R1 * R2 *
sizeof( T_coeff );
206 for (
size_t r3 = 0; r3 < R3; ++r3 ) {
207 memcpy( data + end_data, core.get_frontal_slice_fwd( r3 ), len_core_slice );
208 end_data += len_core_slice;
212 for(
size_t byte = 0; byte < len_export_data; ++byte )
214 data_out_.push_back( data[byte] );
228 VMML_TEMPLATE_CLASSNAME::export_hot_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
230 tuck3_data_.enable_quantify_hot();
232 size_t len_t_comp =
sizeof( T_internal );
233 size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) *
sizeof(T_coeff) + 4 * len_t_comp;
234 char * data =
new char[ len_export_data ];
238 T_internal u_min, u_max;
239 tuck3_data_.quantize_basis_matrices( u_min, u_max);
240 memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
241 memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
244 T_internal core_min, core_max;
245 tuck3_data_.quantize_core( core_min, core_max );
247 memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
249 u1_type* u1 =
new u1_type;
250 u2_type* u2 =
new u2_type;
251 u3_type* u3 =
new u3_type;
253 t3_core_signs_type signs;
255 tuck3_data_.get_u1( *u1 );
256 tuck3_data_.get_u2( *u2 );
257 tuck3_data_.get_u3( *u3 );
258 tuck3_data_.get_core( core );
259 tuck3_data_.get_core_signs( signs );
260 T_internal hottest_value = tuck3_data_.get_hottest_value();
263 memcpy( data + end_data, &hottest_value, len_t_comp ); end_data += len_t_comp;
266 size_t len_u1 = I1 * R1 *
sizeof( T_coeff );
267 memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
270 size_t len_u2 = I2 * R2 *
sizeof( T_coeff );
271 memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
274 size_t len_u3 = I3 * R3 *
sizeof( T_coeff );
275 memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
278 size_t len_core_el = 1;
281 unsigned char core_el;
282 for (
size_t r3 = 0; r3 < R3; ++r3 ) {
283 for (
size_t r2 = 0; r2 < R2; ++r2 ) {
284 for (
size_t r1 = 0; r1 < R1; ++r1 ) {
285 core_el = (core.at( r1, r2, r3 ) | (signs.at( r1, r2, r3) * 0x80 ));
288 memcpy( data + end_data, &core_el, len_core_el );
295 for(
size_t byte = 0; byte < len_export_data; ++byte )
297 data_out_.push_back( data[byte] );
313 VMML_TEMPLATE_CLASSNAME::export_ttm_quantized_to( std::vector<unsigned char>& data_out_, qtucker3_type& tuck3_data_ )
315 tuck3_data_.enable_quantify_log();
317 size_t len_t_comp =
sizeof( T_internal );
318 size_t len_export_data = R1*R2*R3 + (R1*I1 + R2*I2 + R3*I3) *
sizeof(T_coeff) + 3 *len_t_comp;
319 char * data =
new char[ len_export_data ];
323 T_internal u_min, u_max;
324 tuck3_data_.quantize_basis_matrices( u_min, u_max);
325 memcpy( data, &u_min, len_t_comp ); end_data = len_t_comp;
326 memcpy( data + end_data, &u_max, len_t_comp ); end_data += len_t_comp;
329 T_internal core_min, core_max;
330 tuck3_data_.quantize_core( core_min, core_max );
332 memcpy( data + end_data, &core_max, len_t_comp ); end_data += len_t_comp;
334 u1_type* u1 =
new u1_type;
335 u2_type* u2 =
new u2_type;
336 u3_type* u3 =
new u3_type;
338 t3_core_signs_type signs;
340 tuck3_data_.get_u1( *u1 );
341 tuck3_data_.get_u2( *u2 );
342 tuck3_data_.get_u3( *u3 );
343 tuck3_data_.get_core( core );
344 tuck3_data_.get_core_signs( signs );
347 size_t len_u1 = I1 * R1 *
sizeof( T_coeff );
348 memcpy( data + end_data, *u1, len_u1 ); end_data += len_u1;
351 size_t len_u2 = I2 * R2 *
sizeof( T_coeff );
352 memcpy( data + end_data, *u2, len_u2 ); end_data += len_u2;
355 size_t len_u3 = I3 * R3 *
sizeof( T_coeff );
356 memcpy( data + end_data, *u3, len_u3 ); end_data += len_u3;
359 size_t len_core_el = 1;
363 unsigned char core_el;
364 for (
size_t r2 = 0; r2 < R2; ++r2 ) {
365 for (
size_t r3 = 0; r3 < R3; ++r3 ) {
366 for (
size_t r1 = 0; r1 < R1; ++r1 ) {
367 core_el = (core.at( r1, r2, r3 ) | (signs.at( r1, r2, r3) * 0x80 ));
370 memcpy( data + end_data, &core_el, len_core_el );
377 for(
size_t byte = 0; byte < len_export_data; ++byte )
379 data_out_.push_back( data[byte] );
388 #undef VMML_TEMPLATE_STRING
389 #undef VMML_TEMPLATE_CLASSNAME