47 #ifndef __VMML__T3_HOOI__HPP__
48 #define __VMML__T3_HOOI__HPP__
51 #include <vmmlib/t3_hosvd.hpp>
52 #include <vmmlib/t3_ttm.hpp>
53 #include <vmmlib/matrix_pseudoinverse.hpp>
54 #include <vmmlib/tensor_stats.hpp>
58 template<
size_t R1,
size_t R2,
size_t R3,
size_t I1,
size_t I2,
size_t I3,
typename T =
float >
81 template<
typename T_init>
85 template<
typename T_init>
86 static tensor_stats als(
const t3_type& data_,
u1_type& u1_,
u2_type& u2_,
u3_type& u3_, T_init init,
const double& max_f_norm_ = 0.0,
const size_t max_iterations = 10,
const float tolerance = 1e-04);
114 u2_ /= u2_.frobenius_norm();
115 u3_ /= u3_.frobenius_norm();
143 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T >
144 #define VMML_TEMPLATE_CLASSNAME t3_hooi< R1, R2, R3, I1, I2, I3, T >
147 template<
typename T_init>
149 VMML_TEMPLATE_CLASSNAME::als(
const t3_type& data_,
150 u1_type& u1_, u2_type& u2_, u3_type& u3_,
151 T_init init,
const double& max_f_norm_,
const size_t max_iterations,
const float tolerance) {
154 return als(data_, u1_, u2_, u3_, core, init, max_f_norm_, max_iterations, tolerance);
158 template<
typename T_init>
160 VMML_TEMPLATE_CLASSNAME::als(
const t3_type& data_,
161 u1_type& u1_, u2_type& u2_, u3_type& u3_,
164 const double& max_f_norm_,
const size_t max_iterations_,
const float tolerance_) {
168 init(data_, u2_, u3_);
172 T f_norm, fit, fitchange, fitold, normresidual;
173 if (tolerance_ > 0) {
174 max_f_norm = max_f_norm_;
176 if (max_f_norm <= 0.0) {
177 max_f_norm = data_.frobenius_norm();
192 tensor3< I1, R2, R3, T > projection1;
193 tensor3< R1, I2, R3, T > projection2;
194 tensor3< R1, R2, I3, T > projection3;
196 tensor3< I1, R2, I3, T > tmp1;
197 tensor3< R1, I2, I3, T > tmp2;
200 std::cout <<
"HOOI ALS (for tensor3) " << std::endl
201 <<
"initial fit: " << fit <<
", "
202 <<
"frobenius norm original: " << max_f_norm << std::endl;
205 while (i < max_iterations_ && (tolerance_ < 0 || fitchange >= tolerance_)) {
209 optimize_mode1(data_, u2_, u3_, projection1, tmp1);
210 t3_hosvd< R1, R2, R3, I1, R2, R3, T >::apply_mode1(projection1, u1_);
212 optimize_mode2(data_, u1_, u3_, projection2, tmp2);
213 t3_hosvd< R1, R2, R3, R1, I2, R3, T >::apply_mode2(projection2, u2_);
215 optimize_mode3(data_, u1_, u2_, projection3, tmp2);
216 t3_hosvd< R1, R2, R3, R1, R2, I3, T >::apply_mode3(projection3, u3_);
218 t3_ttm::multiply_horizontal_bwd(projection3, transpose(u3_), core_);
220 if (tolerance_ > 0) {
221 f_norm = core_.frobenius_norm();
222 normresidual = sqrt( fabs(max_f_norm * max_f_norm - f_norm * f_norm) );
223 fit = 1 - (normresidual / max_f_norm);
224 fitchange = fabs(fitold - fit);
226 std::cout <<
"iteration '" << i <<
"', fit: " << fit
227 <<
", fitdelta: " << fitchange
228 <<
", frobenius norm of core: " << f_norm << std::endl;
233 result.set_n_iterations(i);
239 VMML_TEMPLATE_CLASSNAME::optimize_mode1(
const t3_type& data_,
const u2_type& u2_,
const u3_type& u3_,
240 tensor3< I1, R2, R3, T >& projection_,
241 tensor3< I1, R2, I3, T >& tmp_) {
242 u2_t_type* u2_inv =
new u2_t_type;
243 u3_t_type* u3_inv =
new u3_t_type;
244 u2_.transpose_to(*u2_inv);
245 u3_.transpose_to(*u3_inv);
249 t3_ttm::multiply_frontal_bwd(data_, *u2_inv, tmp_);
250 t3_ttm::multiply_horizontal_bwd(tmp_, *u3_inv, projection_);
253 t3_ttm::multiply_horizontal_fwd(data_, *u2_inv, tmp_);
254 t3_ttm::multiply_lateral_fwd(tmp_, *u3_inv, projection_);
263 VMML_TEMPLATE_CLASSNAME::optimize_mode2(
const t3_type& data_,
const u1_type& u1_,
const u3_type& u3_,
264 tensor3< R1, I2, R3, T >& projection_,
265 tensor3< R1, I2, I3, T >& tmp_) {
266 u1_t_type* u1_inv =
new u1_t_type();
267 u3_t_type* u3_inv =
new u3_t_type();
268 u1_.transpose_to(*u1_inv);
269 u3_.transpose_to(*u3_inv);
274 t3_ttm::multiply_lateral_bwd(data_, *u1_inv, tmp_);
275 t3_ttm::multiply_horizontal_bwd(tmp_, *u3_inv, projection_);
278 t3_ttm::multiply_frontal_fwd(data_, *u1_inv, tmp_);
279 t3_ttm::multiply_lateral_fwd(tmp_, *u3_inv, projection_);
288 VMML_TEMPLATE_CLASSNAME::optimize_mode3(
const t3_type& data_,
const u1_type& u1_,
const u2_type& u2_,
289 tensor3< R1, R2, I3, T >& projection_,
290 tensor3< R1, I2, I3, T >& tmp_) {
291 u1_t_type* u1_inv =
new u1_t_type;
292 u2_t_type* u2_inv =
new u2_t_type;
293 u1_.transpose_to(*u1_inv);
294 u2_.transpose_to(*u2_inv);
298 t3_ttm::multiply_lateral_bwd(data_, *u1_inv, tmp_);
299 t3_ttm::multiply_frontal_bwd(tmp_, *u2_inv, projection_);
302 t3_ttm::multiply_frontal_fwd(data_, *u1_inv, tmp_);
303 t3_ttm::multiply_horizontal_fwd(tmp_, *u2_inv, projection_);
312 VMML_TEMPLATE_CLASSNAME::derive_core_orthogonal_bases(
const t3_type& data_,
const u1_type& u1_,
const u2_type& u2_,
const u3_type& u3_, t3_core_type& core_) {
313 u1_t_type* u1_inv =
new u1_t_type;
314 u2_t_type* u2_inv =
new u2_t_type;
315 u3_t_type* u3_inv =
new u3_t_type;
317 u1_.transpose_to(*u1_inv);
318 u2_.transpose_to(*u2_inv);
319 u3_.transpose_to(*u3_inv);
321 t3_ttm::full_tensor3_matrix_multiplication(data_, *u1_inv, *u2_inv, *u3_inv, core_);
330 VMML_TEMPLATE_CLASSNAME::derive_core(
const t3_type& data_,
const u1_type& u1_,
const u2_type& u2_,
const u3_type& u3_, t3_core_type& core_) {
335 u1_type* u1_pinv_t =
new u1_type;
336 u2_type* u2_pinv_t =
new u2_type;
337 u3_type* u3_pinv_t =
new u3_type;
339 compute_pseudoinverse< u1_type > compute_pinv_u1;
340 compute_pinv_u1(u1_, *u1_pinv_t);
341 compute_pseudoinverse< u2_type > compute_pinv_u2;
342 compute_pinv_u2(u2_, *u2_pinv_t);
343 compute_pseudoinverse< u3_type > compute_pinv_u3;
344 compute_pinv_u3(u3_, *u3_pinv_t);
346 u1_t_type* u1_pinv =
new u1_t_type;
347 u2_t_type* u2_pinv =
new u2_t_type;
348 u3_t_type* u3_pinv =
new u3_t_type;
350 u1_pinv_t->transpose_to(*u1_pinv);
351 u2_pinv_t->transpose_to(*u2_pinv);
352 u3_pinv_t->transpose_to(*u3_pinv);
354 t3_ttm::full_tensor3_matrix_multiplication(data_, *u1_pinv, *u2_pinv, *u3_pinv, core_);
365 for (
size_t r3 = 0; r3 < R3; ++r3) {
366 for (
size_t r1 = 0; r1 < R1; ++r1) {
367 for (
size_t r2 = 0; r2 < R2; ++r2) {
368 float_t sum_i1_i2_i3 = 0.0;
369 for (
size_t i3 = 0; i3 < I3; ++i3) {
370 for (
size_t i1 = 0; i1 < I1; ++i1) {
371 for (
size_t i2 = 0; i2 < I2; ++i2) {
372 sum_i1_i2_i3 += u1_.at(i1, r1) * u2_.at(i2, r2) * u3_.at(i3, r3) * T(data_.at(i1, i2, i3));
376 core_.at(r1, r2, r3) = sum_i1_i2_i3;
385 #undef VMML_TEMPLATE_STRING
386 #undef VMML_TEMPLATE_CLASSNAME