vmmlib  1.7.0
 All Classes Namespaces Functions Pages
t4_hooi.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef __VMML__T4_HOOI__HPP__
34 #define __VMML__T4_HOOI__HPP__
35 
36 
37 #include <vmmlib/t4_hosvd.hpp>
38 #include <vmmlib/t3_ttm.hpp>
39 #include <vmmlib/t4_ttm.hpp>
40 #include <vmmlib/matrix_pseudoinverse.hpp>
41 #include <vmmlib/tensor_stats.hpp>
42 
43 namespace vmml {
44 
45  template< size_t R1, size_t R2, size_t R3, size_t R4, size_t I1, size_t I2, size_t I3, size_t I4, typename T = float >
46  class t4_hooi {
47  public:
48 
50 
52 
57 
62 
63  template< typename T_init>
64  static tensor_stats als(const t4_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_, T_init init, const double& max_f_norm_ = 0.0, const size_t max_iterations = 10, const float tolerance = 1e-04);
65 
66  template< typename T_init>
67  static tensor_stats als(const t4_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_, t4_core_type& core_, T_init init, const double& max_f_norm_ = 0.0, const size_t max_iterations = 10, const float tolerance = 1e-04);
68 
69  // init functors
70 
71  struct init_hosvd {
72 
73  inline void operator()(const t4_type& data_, u2_type& u2_, u3_type & u3_, u4_type & u4_) {
77  }
78  };
79 
80  struct init_random {
81 
82  inline void operator()(const t4_type&, u2_type& u2_, u3_type & u3_, u4_type & u4_) {
83  srand(time(NULL));
84  u2_.set_random();
85  u3_.set_random();
86  u4_.set_random();
87 
88  u2_ /= u2_.frobenius_norm();
89  u3_ /= u3_.frobenius_norm();
90  u4_ /= u4_.frobenius_norm();
91  }
92  };
93 
94  protected:
95 
96 
97  static void optimize_mode1(const t4_type& data_, const u2_type& u2_, const u3_type& u3_, const u4_type& u4_,
98  tensor4< I1, R2, R3, R4, T >& projection_);
99  static void optimize_mode2(const t4_type& data_, const u1_type& u1_, const u3_type& u3_, const u4_type& u4_,
100  tensor4< R1, I2, R3, R4, T >& projection_);
101  static void optimize_mode3(const t4_type& data_, const u1_type& u1_, const u2_type& u2_, const u4_type& u4_,
102  tensor4< R1, R2, I3, R4, T >& projection_);
103  static void optimize_mode4(const t4_type& data_, const u1_type& u1_, const u2_type& u2_, const u3_type& u3_,
104  tensor4< R1, R2, R3, I4, T >& projection_);
105 
106 
107  }; //end class t3_hooi
108 
109 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t R4, size_t I1, size_t I2, size_t I3, size_t I4, typename T >
110 #define VMML_TEMPLATE_CLASSNAME t4_hooi< R1, R2, R3, R4, I1, I2, I3, I4, T >
111 
112  VMML_TEMPLATE_STRING
113  template< typename T_init>
115  VMML_TEMPLATE_CLASSNAME::als(const t4_type& data_,
116  u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_,
117  T_init init,
118  const double& max_f_norm_, const size_t max_iterations_, const float tolerance_) {
119  t4_core_type core;
120  core.zero();
121  return als(data_, u1_, u2_, u3_, u4_, core, init, max_f_norm_, max_iterations_, tolerance_);
122  }
123 
124  VMML_TEMPLATE_STRING
125  template< typename T_init>
126  tensor_stats
127  VMML_TEMPLATE_CLASSNAME::als(const t4_type& data_,
128  u1_type& u1_, u2_type& u2_, u3_type& u3_, u4_type& u4_,
129  t4_core_type& core_,
130  T_init init,
131  const double& max_f_norm_, const size_t max_iterations_, const float tolerance_) {
132  tensor_stats result;
133 
134  //intialize basis matrices
135  init(data_, u2_, u3_, u4_);
136 
137  core_.zero();
138  T max_f_norm = 0.0;
139  T f_norm, fit, fitchange, fitold, normresidual;
140  if (tolerance_ > 0) {
141  max_f_norm = max_f_norm_;
142 
143  if (max_f_norm <= 0.0) {
144  max_f_norm = data_.frobenius_norm();
145  }
146  fit = 0;
147  //removed to save computation
148  /*if ( (max_f_norm != 0) && (max_f_norm > f_norm) )
149  {
150  fit = 1 - (normresidual / max_f_norm);
151  } else {
152  fit = 1;
153  }*/
154  fitchange = 1;
155  fitold = fit;
156  normresidual = 0;
157  }
158 
159  tensor4< I1, R2, R3, R4, T > projection1;
160  tensor4< R1, I2, R3, R4, T > projection2;
161  tensor4< R1, R2, I3, R4, T > projection3;
162  tensor4< R1, R2, R3, I4, T > projection4;
163 
164 #if TUCKER_LOG
165  std::cout << "HOOI ALS (for tensor3) " << std::endl
166  << "initial fit: " << fit << ", "
167  << "frobenius norm original: " << max_f_norm << std::endl;
168 #endif
169  size_t i = 0;
170  while (i < max_iterations_ && (tolerance_ < 0 || fitchange >= tolerance_)) { //do until converges
171  fitold = fit;
172 
173  //optimize modes
174  optimize_mode1(data_, u2_, u3_, u4_, projection1);
175  t4_hosvd< R1, R2, R3, R4, I1, R2, R3, R4, T >::apply_mode1(projection1, u1_);
176 
177  optimize_mode2(data_, u1_, u3_, u4_, projection2);
178  t4_hosvd< R1, R2, R3, R4, R1, I2, R3, R4, T >::apply_mode2(projection2, u2_);
179 
180  optimize_mode3(data_, u1_, u2_, u4_, projection3);
181  t4_hosvd< R1, R2, R3, R4, R1, R2, I3, R4, T >::apply_mode3(projection3, u3_);
182 
183  optimize_mode4(data_, u1_, u2_, u3_, projection4);
184  t4_hosvd< R1, R2, R3, R4, R1, R2, R3, I4, T >::apply_mode4(projection4, u4_);
185 
186  t4_ttm::mode4_multiply_fwd(projection4, transpose(u4_), core_);
187 
188  if (tolerance_ > 0) {
189  f_norm = core_.frobenius_norm();
190  normresidual = sqrt( fabs(max_f_norm * max_f_norm - f_norm * f_norm) );
191  fit = 1 - (normresidual / max_f_norm);
192  fitchange = fabs(fitold - fit);
193 #if TUCKER_LOG
194  std::cout << "iteration '" << i << "', fit: " << fit
195  << ", fitdelta: " << fitchange
196  << ", frobenius norm of core: " << f_norm << std::endl;
197 #endif
198  }
199  ++i;
200  }
201  result.set_n_iterations(i);
202  return result;
203  }
204 
205  VMML_TEMPLATE_STRING
206  void
207  VMML_TEMPLATE_CLASSNAME::optimize_mode1(const t4_type& data_, const u2_type& u2_, const u3_type& u3_, const u4_type& u4_,
208  tensor4< I1, R2, R3, R4, T >& projection_) {
209  u2_t_type* u2_inv = new u2_t_type;
210  u3_t_type* u3_inv = new u3_t_type;
211  u4_t_type* u4_inv = new u4_t_type;
212  u2_.transpose_to(*u2_inv);
213  u3_.transpose_to(*u3_inv);
214  u4_.transpose_to(*u4_inv);
215 
216  //forward cyclic matricization/unfolding (after Kiers, 2000) -> memory optimized
217  tensor4< I1, R2, I3, I4, T > tmp1_;
218  tensor4< I1, R2, R3, I4, T > tmp2_;
219  t4_ttm::mode2_multiply_fwd(data_, *u2_inv, tmp1_);
220  t4_ttm::mode3_multiply_fwd(tmp1_, *u3_inv, tmp2_);
221  t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
222 
223  delete u2_inv;
224  delete u3_inv;
225  delete u4_inv;
226  }
227 
228  VMML_TEMPLATE_STRING
229  void
230  VMML_TEMPLATE_CLASSNAME::optimize_mode2(const t4_type& data_, const u1_type& u1_, const u3_type& u3_, const u4_type& u4_,
231  tensor4< R1, I2, R3, R4, T >& projection_) {
232  u1_t_type* u1_inv = new u1_t_type;
233  u3_t_type* u3_inv = new u3_t_type;
234  u4_t_type* u4_inv = new u4_t_type;
235  u1_.transpose_to(*u1_inv);
236  u3_.transpose_to(*u3_inv);
237  u4_.transpose_to(*u4_inv);
238 
239  //forward cyclic matricization/unfolding (after Kiers, 2000) -> memory optimized
240  tensor4< R1, I2, I3, I4, T > tmp1_;
241  tensor4< R1, I2, R3, I4, T > tmp2_;
242  t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
243  t4_ttm::mode3_multiply_fwd(tmp1_, *u3_inv, tmp2_);
244  t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
245 
246  delete u1_inv;
247  delete u3_inv;
248  delete u4_inv;
249  }
250 
251  VMML_TEMPLATE_STRING
252  void
253  VMML_TEMPLATE_CLASSNAME::optimize_mode3(const t4_type& data_, const u1_type& u1_, const u2_type& u2_, const u4_type& u4_,
254  tensor4< R1, R2, I3, R4, T >& projection_) {
255  u1_t_type* u1_inv = new u1_t_type;
256  u2_t_type* u2_inv = new u2_t_type;
257  u4_t_type* u4_inv = new u4_t_type;
258  u1_.transpose_to(*u1_inv);
259  u2_.transpose_to(*u2_inv);
260  u4_.transpose_to(*u4_inv);
261 
262  //forward cyclic matricization/unfolding (after Kiers, 2000) -> memory optimized
263  tensor4< R1, I2, I3, I4, T > tmp1_;
264  tensor4< R1, R2, I3, I4, T > tmp2_;
265  t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
266  t4_ttm::mode2_multiply_fwd(tmp1_, *u2_inv, tmp2_);
267  t4_ttm::mode4_multiply_fwd(tmp2_, *u4_inv, projection_);
268 
269  delete u1_inv;
270  delete u2_inv;
271  delete u4_inv;
272  }
273 
274  VMML_TEMPLATE_STRING
275  void
276  VMML_TEMPLATE_CLASSNAME::optimize_mode4(const t4_type& data_, const u1_type& u1_, const u2_type& u2_, const u3_type& u3_,
277  tensor4< R1, R2, R3, I4, T >& projection_) {
278  u1_t_type* u1_inv = new u1_t_type;
279  u2_t_type* u2_inv = new u2_t_type;
280  u3_t_type* u3_inv = new u3_t_type;
281  u1_.transpose_to(*u1_inv);
282  u2_.transpose_to(*u2_inv);
283  u3_.transpose_to(*u3_inv);
284 
285  //forward cyclic matricization/unfolding (after Kiers, 2000) -> memory optimized
286  tensor4< R1, I2, I3, I4, T > tmp1_;
287  tensor4< R1, R2, I3, I4, T > tmp2_;
288  t4_ttm::mode1_multiply_fwd(data_, *u1_inv, tmp1_);
289  t4_ttm::mode2_multiply_fwd(tmp1_, *u2_inv, tmp2_);
290  t4_ttm::mode3_multiply_fwd(tmp2_, *u3_inv, projection_);
291 
292  delete u1_inv;
293  delete u2_inv;
294  delete u3_inv;
295  }
296 
297 
298 #undef VMML_TEMPLATE_STRING
299 #undef VMML_TEMPLATE_CLASSNAME
300 
301 
302 }//end vmml namespace
303 
304 #endif