45 #ifndef __VMML__TUCKER3_TENSOR__HPP__
46 #define __VMML__TUCKER3_TENSOR__HPP__
48 #include <vmmlib/t3_hooi.hpp>
49 #include <vmmlib/t3_ihooi.hpp>
54 template<
size_t R1,
size_t R2,
size_t R3,
size_t I1,
size_t I2,
size_t I3,
typename T_value =
float,
typename T_coeff =
double >
57 typedef float T_internal;
74 static const size_t SIZE = R1*R2*R3 + I1*R1 + I2*R2 + I3*R3;
85 _core_comp.cast_from(core);
90 _u1_comp->cast_from(U1);
95 _u2_comp->cast_from(U2);
100 _u3_comp->cast_from(U3);
111 void get_u1(
u1_type& U1)
const {
115 void get_u2(
u2_type& U2)
const {
119 void get_u3(
u3_type& U3)
const {
125 _core.cast_from(_core_comp);
161 size_t nnz(
const T_value& threshold)
const;
162 size_t nnz_core()
const;
163 size_t size_core()
const;
165 size_t size()
const {
169 void threshold_core(
const size_t& nnz_core_,
size_t& nnz_core_is_);
170 void threshold_core(
const T_coeff& threshold_value_,
size_t& nnz_core_);
172 template<
size_t J1,
size_t J2,
size_t J3 >
175 size_t row_offset,
size_t col_offset,
size_t slice_offset);
177 void reconstruct(
t3_type& data_);
178 double error(
t3_type& original)
const;
180 template<
typename T_init>
181 tensor_stats decompose(
const t3_type& data_, T_init init,
const size_t max_iterations = 10,
const float tolerance = 10);
183 template<
typename T_init>
184 tensor_stats tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations = 10,
const float tolerance = 1e-04);
186 template<
size_t NBLOCKS,
typename T_init>
187 tensor_stats i_tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations = 10,
const float tolerance = 1e-04);
189 template<
size_t R,
size_t NBLOCKS,
typename T_init>
190 tensor_stats i_cp_tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations = 10,
const float tolerance = 1e-04);
192 void als_rand(
const t3_type& data_);
200 template<
size_t K1,
size_t K2,
size_t K3>
203 template<
size_t K1,
size_t K2,
size_t K3>
206 template<
size_t K1,
size_t K2,
size_t K3>
209 template<
size_t K1,
size_t K2,
size_t K3>
211 const size_t& start_index1,
const size_t& end_index1,
212 const size_t& start_index2,
const size_t& end_index2,
213 const size_t& start_index3,
const size_t& end_index3);
215 friend std::ostream& operator <<(std::ostream& os,
const tucker3_type& t3) {
225 os <<
"U1: " << std::endl << *u1 << std::endl
226 <<
"U2: " << std::endl << *u2 << std::endl
227 <<
"U3: " << std::endl << *u3 << std::endl
228 <<
"core: " << std::endl << core << std::endl;
237 void cast_comp_members();
262 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff >
263 #define VMML_TEMPLATE_CLASSNAME tucker3_tensor< R1, R2, R3, I1, I2, I3, T_value, T_coeff >
266 VMML_TEMPLATE_CLASSNAME::tucker3_tensor() {
275 _u1_comp =
new u1_comp_type();
277 _u2_comp =
new u2_comp_type();
279 _u3_comp =
new u3_comp_type();
284 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core) {
292 _u1_comp =
new u1_comp_type();
294 _u2_comp =
new u2_comp_type();
296 _u3_comp =
new u3_comp_type();
298 _core_comp.cast_from(core);
302 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core, u1_type& U1, u2_type& U2, u3_type& U3) {
304 _u1 =
new u1_type(U1);
305 _u2 =
new u2_type(U2);
306 _u3 =
new u3_type(U3);
307 _u1_comp =
new u1_comp_type();
308 _u2_comp =
new u2_comp_type();
309 _u3_comp =
new u3_comp_type();
314 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(
const t3_type& data_, u1_type& U1, u2_type& U2, u3_type& U3) {
315 _u1 =
new u1_type(U1);
316 _u2 =
new u2_type(U2);
317 _u3 =
new u3_type(U3);
318 _u1_comp =
new u1_comp_type();
319 _u2_comp =
new u2_comp_type();
320 _u3_comp =
new u3_comp_type();
322 t3_hooi_type::derive_core_orthogonal_bases(data_, *_u1, *_u2, *_u3, _core);
328 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(
const tucker3_type& other) {
332 _u1_comp =
new u1_comp_type();
333 _u2_comp =
new u2_comp_type();
334 _u3_comp =
new u3_comp_type();
336 other.get_core(_core);
346 VMML_TEMPLATE_CLASSNAME::cast_members() {
347 _u1->cast_from(*_u1_comp);
348 _u2->cast_from(*_u2_comp);
349 _u3->cast_from(*_u3_comp);
350 _core.cast_from(_core_comp);
355 VMML_TEMPLATE_CLASSNAME::cast_comp_members() {
356 _u1_comp->cast_from(*_u1);
357 _u2_comp->cast_from(*_u2);
358 _u3_comp->cast_from(*_u3);
359 _core_comp.cast_from(_core);
364 VMML_TEMPLATE_CLASSNAME::nnz_core()
const {
365 return _core_comp.nnz();
370 VMML_TEMPLATE_CLASSNAME::size_core()
const {
371 return _core_comp.size();
375 VMML_TEMPLATE_CLASSNAME::~tucker3_tensor() {
386 VMML_TEMPLATE_CLASSNAME::reconstruct(t3_type& data_) {
388 data.cast_from(data_);
389 t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data);
393 if ((
sizeof (T_value) == 1) || (
sizeof (T_value) == 2)) {
394 data_.float_t_to_uint_t(data);
396 data_.cast_from(data);
403 VMML_TEMPLATE_CLASSNAME::error(t3_type& original)
const {
406 t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data);
408 double err = data.frobenius_norm(original) / original.frobenius_norm() * 100;
414 VMML_TEMPLATE_CLASSNAME::threshold_core(
const size_t& nnz_core_,
size_t& nnz_core_is_) {
415 nnz_core_is_ = _core_comp.nnz();
416 T_coeff threshold_value = 0.00001;
417 while (nnz_core_is_ > nnz_core_) {
418 _core_comp.threshold(threshold_value);
419 nnz_core_is_ = _core_comp.nnz();
422 if (threshold_value < 0.01) {
423 threshold_value *= 10;
424 }
else if (threshold_value < 0.2) {
425 threshold_value += 0.05;
426 }
else if (threshold_value < 1) {
427 threshold_value += 0.25;
428 }
else if (threshold_value < 10) {
429 threshold_value += 1;
430 }
else if (threshold_value < 50) {
431 threshold_value += 10;
432 }
else if (threshold_value < 200) {
433 threshold_value += 50;
434 }
else if (threshold_value < 500) {
435 threshold_value += 100;
436 }
else if (threshold_value < 2000) {
437 threshold_value += 500;
438 }
else if (threshold_value < 5000) {
439 threshold_value += 3000;
440 }
else if (threshold_value >= 5000) {
441 threshold_value += 5000;
444 _core.cast_from(_core_comp);
449 VMML_TEMPLATE_CLASSNAME::threshold_core(
const T_coeff& threshold_value_,
size_t& nnz_core_) {
450 _core_comp.threshold(threshold_value_);
451 nnz_core_ = _core_comp.nnz();
452 _core.cast_from(_core_comp);
456 template<
size_t J1,
size_t J2,
size_t J3 >
457 typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type*
458 VMML_TEMPLATE_CLASSNAME::
459 set_sub_core(
const tensor3<J1, J2, J3, T_coeff >& sub_data_,
460 size_t row_offset,
size_t col_offset,
size_t slice_offset) {
461 _core_comp.set_sub_tensor3(sub_data_, row_offset, col_offset, slice_offset);
462 _core.cast_from(_core_comp);
467 VMML_TEMPLATE_CLASSNAME::als_rand(
const t3_type& data_) {
468 typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type;
469 tucker_als(data_,
typename hooi_type::init_random());
474 template<
typename T_init>
476 VMML_TEMPLATE_CLASSNAME::decompose(
const t3_type& data_, T_init init,
const size_t max_iterations,
const float tolerance) {
477 return tucker_als(data_, init, max_iterations, tolerance);
481 template<
typename T_init>
483 VMML_TEMPLATE_CLASSNAME::tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations,
const float tolerance) {
487 data.cast_from(data_);
489 typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type;
490 result += hooi_type::als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
498 template<
size_t NBLOCKS,
typename T_init>
500 VMML_TEMPLATE_CLASSNAME::i_tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations,
const float tolerance) {
504 data.cast_from(data_);
506 typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type;
507 result += ihooi_type::i_als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
515 template<
size_t R,
size_t NBLOCKS,
typename T_init>
517 VMML_TEMPLATE_CLASSNAME::i_cp_tucker_als(
const t3_type& data_, T_init init,
const size_t max_iterations,
const float tolerance) {
521 data.cast_from(data_);
523 typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type;
524 result += ihooi_type::template i_cp_als < R > (data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance);
532 template<
size_t K1,
size_t K2,
size_t K3>
534 VMML_TEMPLATE_CLASSNAME::reduce_ranks(
const tucker3_tensor< K1, K2, K3, I1, I2, I3, T_value, T_coeff >& other)
542 matrix< I1, K1, T_coeff >* u1 =
new matrix< I1, K1, T_coeff > ();
544 for (
size_t r1 = 0; r1 < R1; ++r1) {
545 _u1->set_column(r1, u1->get_column(r1));
548 matrix< I2, K2, T_coeff >* u2 =
new matrix< I2, K2, T_coeff > ();
550 for (
size_t r2 = 0; r2 < R2; ++r2) {
551 _u2->set_column(r2, u2->get_column(r2));
554 matrix< I3, K3, T_coeff >* u3 =
new matrix< I3, K3, T_coeff > ();
556 for (
size_t r3 = 0; r3 < R3; ++r3) {
557 _u3->set_column(r3, u3->get_column(r3));
561 tensor3<K1, K2, K3, T_coeff > other_core;
562 other.get_core(other_core);
564 for (
size_t r3 = 0; r3 < R3; ++r3) {
565 for (
size_t r1 = 0; r1 < R1; ++r1) {
566 for (
size_t r2 = 0; r2 < R2; ++r2) {
567 _core.at(r1, r2, r3) = other_core.at(r1, r2, r3);
581 template<
size_t K1,
size_t K2,
size_t K3>
583 VMML_TEMPLATE_CLASSNAME::subsampling(
const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other,
const size_t& factor) {
589 matrix< K1, R1, T_coeff >* u1 =
new matrix< K1, R1, T_coeff > ();
591 for (
size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) {
592 _u1->set_row(i, u1->get_row(i1));
595 matrix< K2, R2, T_coeff >* u2 =
new matrix< K2, R2, T_coeff > ();
597 for (
size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) {
598 _u2->set_row(i, u2->get_row(i2));
601 matrix< K3, R3, T_coeff >* u3 =
new matrix< K3, R3, T_coeff > ();
603 for (
size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) {
604 _u3->set_row(i, u3->get_row(i3));
607 other.get_core(_core);
616 template<
size_t K1,
size_t K2,
size_t K3>
618 VMML_TEMPLATE_CLASSNAME::subsampling_on_average(
const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other,
const size_t& factor) {
625 matrix< K1, R1, T_coeff >* u1 =
new matrix< K1, R1, T_coeff > ();
627 for (
size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) {
628 vector< R1, T_internal > tmp_row = u1->get_row(i1);
629 T_internal num_items_averaged = 1;
630 for (
size_t j = i1 + 1; (j < (factor + i1)) & (j < K1); ++j, ++num_items_averaged)
631 tmp_row += u1->get_row(j);
633 tmp_row /= num_items_averaged;
634 _u1->set_row(i, tmp_row);
637 matrix< K2, R2, T_coeff >* u2 =
new matrix< K2, R2, T_coeff > ();
639 for (
size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) {
640 vector< R2, T_internal > tmp_row = u2->get_row(i2);
641 T_internal num_items_averaged = 1;
642 for (
size_t j = i2 + 1; (j < (factor + i2)) & (j < K2); ++j, ++num_items_averaged)
643 tmp_row += u2->get_row(j);
645 tmp_row /= num_items_averaged;
646 _u2->set_row(i, u2->get_row(i2));
649 matrix< K3, R3, T_coeff >* u3 =
new matrix< K3, R3, T_coeff > ();
651 for (
size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) {
652 vector< R3, T_internal > tmp_row = u3->get_row(i3);
653 T_internal num_items_averaged = 1;
654 for (
size_t j = i3 + 1; (j < (factor + i3)) & (j < K3); ++j, ++num_items_averaged)
655 tmp_row += u3->get_row(j);
657 tmp_row /= num_items_averaged;
658 _u3->set_row(i, u3->get_row(i3));
661 other.get_core(_core);
669 template<
size_t K1,
size_t K2,
size_t K3>
671 VMML_TEMPLATE_CLASSNAME::region_of_interest(
const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other,
672 const size_t& start_index1,
const size_t& end_index1,
673 const size_t& start_index2,
const size_t& end_index2,
674 const size_t& start_index3,
const size_t& end_index3) {
678 assert(start_index1 < end_index1);
679 assert(start_index2 < end_index2);
680 assert(start_index3 < end_index3);
681 assert(end_index1 < K1);
682 assert(end_index2 < K2);
683 assert(end_index3 < K3);
686 matrix< K1, R1, T_internal >* u1 =
new matrix< K1, R1, T_internal > ();
687 other.get_u1_comp(*u1);
688 for (
size_t i1 = start_index1, i = 0; i1 < end_index1; ++i1, ++i) {
689 _u1_comp->set_row(i, u1->get_row(i1));
692 matrix< K2, R2, T_internal>* u2 =
new matrix< K2, R2, T_internal > ();
693 other.get_u2_comp(*u2);
694 for (
size_t i2 = start_index2, i = 0; i2 < end_index2; ++i2, ++i) {
695 _u2_comp->set_row(i, u2->get_row(i2));
698 matrix< K3, R3, T_internal >* u3 =
new matrix< K3, R3, T_internal > ();
699 other.get_u3_comp(*u3);
700 for (
size_t i3 = start_index3, i = 0; i3 < end_index3; ++i3, ++i) {
701 _u3_comp->set_row(i, u3->get_row(i3));
704 other.get_core_comp(_core_comp);
714 VMML_TEMPLATE_CLASSNAME::nnz()
const {
717 counter += _u1_comp->nnz();
718 counter += _u2_comp->nnz();
719 counter += _u3_comp->nnz();
720 counter += _core_comp.nnz();
727 VMML_TEMPLATE_CLASSNAME::nnz(
const T_value& threshold)
const {
730 counter += _u1_comp->nnz(threshold);
731 counter += _u2_comp->nnz(threshold);
732 counter += _u3_comp->nnz(threshold);
733 counter += _core_comp.nnz(threshold);
739 #undef VMML_TEMPLATE_STRING
740 #undef VMML_TEMPLATE_CLASSNAME