33 #ifndef __VMML__T4_HOOI__HPP__
34 #define __VMML__T4_HOOI__HPP__
37 #include <vmmlib/t4_hosvd.hpp>
38 #include <vmmlib/t3_ttm.hpp>
39 #include <vmmlib/t4_ttm.hpp>
40 #include <vmmlib/matrix_pseudoinverse.hpp>
41 #include <vmmlib/tensor_stats.hpp>
45 template<
size_t R1,
size_t R2,
size_t R3,
size_t R4,
size_t I1,
size_t I2,
size_t I3,
size_t I4,
typename T =
float >
63 template<
typename T_init>
66 template<
typename T_init>
88 u2_ /= u2_.frobenius_norm();
89 u3_ /= u3_.frobenius_norm();
90 u4_ /= u4_.frobenius_norm();
109 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t R4, size_t I1, size_t I2, size_t I3, size_t I4, typename T >
110 #define VMML_TEMPLATE_CLASSNAME t4_hooi< R1, R2, R3, R4, I1, I2, I3, I4, T >
113 template<
typename T_init>
115 VMML_TEMPLATE_CLASSNAME::als(
const t4_type& data_,
116 u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_,
118 const double& max_f_norm_,
const size_t max_iterations_,
const float tolerance_) {
121 return als(data_, u1_, u2_, u3_, u4_, core, init, max_f_norm_, max_iterations_, tolerance_);
125 template<
typename T_init>
127 VMML_TEMPLATE_CLASSNAME::als(
const t4_type& data_,
128 u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_,
131 const double& max_f_norm_,
const size_t max_iterations_,
const float tolerance_) {
135 init(data_, u2_, u3_, u4_);
139 T f_norm, fit, fitchange, fitold, normresidual;
140 if (tolerance_ > 0) {
141 max_f_norm = max_f_norm_;
143 if (max_f_norm <= 0.0) {
144 max_f_norm = data_.frobenius_norm();
159 tensor4< I1, R2, R3, R4, T > projection1;
160 tensor4< R1, I2, R3, R4, T > projection2;
161 tensor4< R1, R2, I3, R4, T > projection3;
162 tensor4< R1, R2, R3, I4, T > projection4;
165 std::cout <<
"HOOI ALS (for tensor3) " << std::endl
166 <<
"initial fit: " << fit <<
", "
167 <<
"frobenius norm original: " << max_f_norm << std::endl;
170 while (i < max_iterations_ && (tolerance_ < 0 || fitchange >= tolerance_)) {
174 optimize_mode1(data_, u2_, u3_, u4_, projection1);
175 t4_hosvd< R1, R2, R3, R4, I1, R2, R3, R4, T >::apply_mode1(projection1, u1_);
177 optimize_mode2(data_, u1_, u3_, u4_, projection2);
178 t4_hosvd< R1, R2, R3, R4, R1, I2, R3, R4, T >::apply_mode2(projection2, u2_);
180 optimize_mode3(data_, u1_, u2_, u4_, projection3);
181 t4_hosvd< R1, R2, R3, R4, R1, R2, I3, R4, T >::apply_mode3(projection3, u3_);
183 optimize_mode4(data_, u1_, u2_, u3_, projection4);
184 t4_hosvd< R1, R2, R3, R4, R1, R2, R3, I4, T >::apply_mode4(projection4, u4_);
186 t4_ttm::mode4_multiply_fwd(projection4, transpose(u4_), core_);
188 if (tolerance_ > 0) {
189 f_norm = core_.frobenius_norm();
190 normresidual = sqrt( fabs(max_f_norm * max_f_norm - f_norm * f_norm) );
191 fit = 1 - (normresidual / max_f_norm);
192 fitchange = fabs(fitold - fit);
194 std::cout <<
"iteration '" << i <<
"', fit: " << fit
195 <<
", fitdelta: " << fitchange
196 <<
", frobenius norm of core: " << f_norm << std::endl;
201 result.set_n_iterations(i);
207 VMML_TEMPLATE_CLASSNAME::optimize_mode1(
const t4_type& data_,
const u2_type& u2_,
const u3_type& u3_,
const u4_type& u4_,
208 tensor4< I1, R2, R3, R4, T >& projection_) {
209 u2_t_type* u2_inv =
new u2_t_type;
210 u3_t_type* u3_inv =
new u3_t_type;
211 u4_t_type* u4_inv =
new u4_t_type;
212 u2_.transpose_to(*u2_inv);
213 u3_.transpose_to(*u3_inv);
214 u4_.transpose_to(*u4_inv);
217 tensor4< I1, R2, I3, I4, T > tmp1_;
218 tensor4< I1, R2, R3, I4, T > tmp2_;
219 t4_ttm::mode2_multiply_fwd(data_, *u2_inv, tmp1_);
220 t4_ttm::mode3_multiply_fwd(tmp1_, *u3_inv, tmp2_);
221 t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
230 VMML_TEMPLATE_CLASSNAME::optimize_mode2(
const t4_type& data_,
const u1_type& u1_,
const u3_type& u3_,
const u4_type& u4_,
231 tensor4< R1, I2, R3, R4, T >& projection_) {
232 u1_t_type* u1_inv =
new u1_t_type;
233 u3_t_type* u3_inv =
new u3_t_type;
234 u4_t_type* u4_inv =
new u4_t_type;
235 u1_.transpose_to(*u1_inv);
236 u3_.transpose_to(*u3_inv);
237 u4_.transpose_to(*u4_inv);
240 tensor4< R1, I2, I3, I4, T > tmp1_;
241 tensor4< R1, I2, R3, I4, T > tmp2_;
242 t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
243 t4_ttm::mode3_multiply_fwd(tmp1_, *u3_inv, tmp2_);
244 t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
253 VMML_TEMPLATE_CLASSNAME::optimize_mode3(
const t4_type& data_,
const u1_type& u1_,
const u2_type& u2_,
const u4_type& u4_,
254 tensor4< R1, R2, I3, R4, T >& projection_) {
255 u1_t_type* u1_inv =
new u1_t_type;
256 u2_t_type* u2_inv =
new u2_t_type;
257 u4_t_type* u4_inv =
new u4_t_type;
258 u1_.transpose_to(*u1_inv);
259 u2_.transpose_to(*u2_inv);
260 u4_.transpose_to(*u4_inv);
263 tensor4< R1, I2, I3, I4, T > tmp1_;
264 tensor4< R1, R2, I3, I4, T > tmp2_;
265 t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
266 t4_ttm::mode2_multiply_fwd(tmp1_, *u2_inv, tmp2_);
267 t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
276 VMML_TEMPLATE_CLASSNAME::optimize_mode4(
const t4_type& data_,
const u1_type& u1_,
const u2_type& u2_,
const u3_type& u3_,
277 tensor4< R1, R2, R3, I4, T >& projection_) {
278 u1_t_type* u1_inv =
new u1_t_type;
279 u2_t_type* u2_inv =
new u2_t_type;
280 u3_t_type* u3_inv =
new u3_t_type;
281 u1_.transpose_to(*u1_inv);
282 u2_.transpose_to(*u2_inv);
283 u3_.transpose_to(*u3_inv);
286 tensor4< R1, I2, I3, I4, T > tmp1_;
287 tensor4< R1, R2, I3, I4, T > tmp2_;
288 t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
289 t4_ttm::mode2_multiply_fwd(tmp1_, *u2_inv, tmp2_);
290 t4_ttm::mode3_multiply_fwd(tmp2_, *u3_inv, projection_);
298 #undef VMML_TEMPLATE_STRING
299 #undef VMML_TEMPLATE_CLASSNAME