vmmlib  1.7.0
 All Classes Namespaces Functions Pages
t3_ihopm.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Susanne Suter
34  * @author Rafa Ballester
35  *
36  * iHOPM stands for incremental higher-order power method.
37  * in other words, it is an incremental rank-r CP-ALS
38  *
39  * CP stands for Candecomp/Parafac (1970); ALS for alternating least squares algorithm
40  * references:
41  * - Carroll & Chang, 1970: Analysis of Individual Differences in Multidimensional Scaling via an N-way generalization of ``Eckart--Young'' decompositions, Psychometrika.
42  * - Harshman, 1970: Foundations of the PARAFAC procedure: Models and conditions for an 'explanatory' multi-modal factor analysis, UCLA Working Papers in Phonetics.
43  * - De Lathauwer, De Moor, Vandewalle, 2000: A multilinear singular value decomposition, SIAM J. Matrix Anal. Appl.
44  * - Kolda & Bader, 2009: Tensor Decompositions and Applications, SIAM Review.
45  *
46  * incremental rank-r approximation:
47  * - Zang & Golub, 2001: Rank-one approximation to higher order tensors, SIAM J. Matrix Anal. Appl.
48  */
49 
50 
51 
52 #ifndef __VMML__T3_IHOPM__HPP__
53 #define __VMML__T3_IHOPM__HPP__
54 
55 #include <vmmlib/t3_hopm.hpp>
56 
57 namespace vmml {
58 
59  template< size_t R, size_t NBLOCKS, size_t I1, size_t I2, size_t I3, typename T_val = float, typename T_coeff = double >
60  class t3_ihopm {
61  public:
62 
63  // typedef tensor3< I1, I2, I3, T_val > t3_type;
64  // typedef tensor3< I1, I2, I3, T_coeff > t3_coeff_type;
65  //
66  // typedef vector< R*NBLOCKS, T_val > lambda_type;
67  // typedef vector< R*NBLOCKS, T_coeff > lambda_incr_type;
68  // typedef vector< R, T_coeff > lambda_tmp_type;
69  //
70  // typedef matrix< I1, R*NBLOCKS, T_val > u1_type;
71  // typedef matrix< I2, R*NBLOCKS, T_val > u2_type;
72  // typedef matrix< I3, R*NBLOCKS, T_val > u3_type;
73  //
74  // typedef matrix< R*NBLOCKS, I1, T_val > u1_inv_type;
75  // typedef matrix< R*NBLOCKS, I2, T_val > u2_inv_type;
76  // typedef matrix< R*NBLOCKS, I3, T_val > u3_inv_type;
77  //
78  // typedef matrix< I1, R*NBLOCKS, T_coeff > u1_incr_type;
79  // typedef matrix< I2, R*NBLOCKS, T_coeff > u2_incr_type;
80  // typedef matrix< I3, R*NBLOCKS, T_coeff > u3_incr_type;
81  //
82  // typedef matrix< I1, R, T_coeff > u1_tmp_type;
83  // typedef matrix< I2, R, T_coeff > u2_tmp_type;
84  // typedef matrix< I3, R, T_coeff > u3_tmp_type;
85  //
86  // typedef matrix< I1, 1, T_coeff > u1_1col_type;
87  // typedef matrix< I2, 1, T_coeff > u2_1col_type;
88  // typedef matrix< I3, 1, T_coeff > u3_1col_type;
89 
92 
95  typedef vector< R / NBLOCKS, T_coeff > lambda_tmp_type;
96 
100 
104 
108 
109  typedef matrix< I1, R / NBLOCKS, T_coeff > u1_tmp_type;
110  typedef matrix< I2, R / NBLOCKS, T_coeff > u2_tmp_type;
111  typedef matrix< I3, R / NBLOCKS, T_coeff > u3_tmp_type;
112 
116 
117  //incremental cp als (zang&golub, 2001)
118 // template< typename T_init >
119  static tensor_stats incremental_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, lambda_type& lambdas_, const size_t max_iterations_ = 50, const float tolerance = 1e-04);
120  static void reconstruct(t3_type& data_, const u1_type& u1_, const u2_type& u2_, const u3_type& u3_, const lambda_type& lambdas_);
121 
122  };
123 
124 
125 
126 #define VMML_TEMPLATE_STRING template< size_t R, size_t NBLOCKS, size_t I1, size_t I2, size_t I3, typename T_val, typename T_coeff >
127 #define VMML_TEMPLATE_CLASSNAME t3_ihopm< R, NBLOCKS, I1, I2, I3, T_val, T_coeff >
128 
129  VMML_TEMPLATE_STRING
130 // template< typename T_init>
132  VMML_TEMPLATE_CLASSNAME::incremental_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, lambda_type& lambdas_, const size_t max_iterations_, const float tolerance_) {
133  tensor_stats result;
134 
135  if (R % NBLOCKS != 0) {
136  std::ostringstream convert1, convert2;
137  convert1 << R;
138  convert2 << NBLOCKS;
139  VMMLIB_ERROR("In incremental CP, R = " + convert1.str() + ", NBLOCKS = " + convert2.str() + " (must be divisible)", VMMLIB_HERE);
140  }
141  t3_coeff_type* approx_data = new t3_coeff_type;
142  approx_data->zero();
143  t3_coeff_type* residual_data = new t3_coeff_type;
144  residual_data->cast_from(data_);
145 
146  lambda_tmp_type* lambdas_tmp = new lambda_tmp_type;
147  lambdas_tmp->set(0);
148  u1_tmp_type* u1_tmp = new u1_tmp_type;
149  u2_tmp_type* u2_tmp = new u2_tmp_type;
150  u3_tmp_type* u3_tmp = new u3_tmp_type;
151 
152  lambda_incr_type* lambdas_incr = new lambda_incr_type;
153  lambdas_incr->set(0);
154  u1_incr_type* u1_incr = new u1_incr_type;
155  u1_incr->zero();
156  u2_incr_type* u2_incr = new u2_incr_type;
157  u2_incr->zero();
158  u3_incr_type* u3_incr = new u3_incr_type;
159  u3_incr->zero();
160 
161  u1_1col_type* u1_1col = new u1_1col_type;
162  u2_1col_type* u2_1col = new u2_1col_type;
163  u3_1col_type* u3_1col = new u3_1col_type;
164 
165  typedef t3_hopm < R / NBLOCKS, I1, I2, I3, T_coeff > hopm_type;
166 
167  for (size_t i = 0; i < NBLOCKS; ++i) {
168 #ifdef CP_LOG
169  std::cout << "Incremental CP: block number '" << i << "'" << std::endl;
170 #endif
171  //init all values to zero
172  u1_tmp->zero();
173  u2_tmp->zero();
174  u3_tmp->zero();
175  *lambdas_tmp = 0.0;
176  approx_data->zero();
177 
178  result += hopm_type::als(*residual_data, *u1_tmp, *u2_tmp, *u3_tmp, *lambdas_tmp, typename hopm_type::init_hosvd(), max_iterations_, tolerance_);
179 
180  //set lambdas und us to appropriate position
181  size_t r_incr = 0;
182  T_coeff lambda_r = 0;
183  for (size_t r = 0; r < R / NBLOCKS; ++r) {
184  r_incr = i * R / NBLOCKS + r;
185  u1_tmp->get_column(r, *u1_1col);
186  u1_incr->set_column(r_incr, *u1_1col);
187  u2_tmp->get_column(r, *u2_1col);
188  u2_incr->set_column(r_incr, *u2_1col);
189  u3_tmp->get_column(r, *u3_1col);
190  u3_incr->set_column(r_incr, *u3_1col);
191 
192  lambda_r = lambdas_tmp->at(r);
193  lambdas_incr->at(r_incr) = lambda_r;
194  //set lambda
195  }
196 
197 
198  t3_hopm < R / NBLOCKS, I1, I2, I3, T_coeff >::reconstruct(*approx_data, *u1_tmp, *u2_tmp, *u3_tmp, *lambdas_tmp);
199 
200  *residual_data = *residual_data - *approx_data;
201  }
202 
203  u1_.cast_from(*u1_incr);
204  u2_.cast_from(*u2_incr);
205  u3_.cast_from(*u3_incr);
206  lambdas_.cast_from(*lambdas_incr);
207 
208  delete u1_1col;
209  delete u2_1col;
210  delete u3_1col;
211  delete u1_tmp;
212  delete u2_tmp;
213  delete u3_tmp;
214  delete lambdas_tmp;
215  delete u1_incr;
216  delete u2_incr;
217  delete u3_incr;
218  delete lambdas_incr;
219  delete residual_data;
220  delete approx_data;
221 
222  return result;
223  }
224 
225  VMML_TEMPLATE_STRING
226  void
227  VMML_TEMPLATE_CLASSNAME::reconstruct(t3_type& data_, const u1_type& u1_, const u2_type& u2_, const u3_type& u3_, const lambda_type& lambdas_) {
228  u1_inv_type* u1_t = new u1_inv_type;
229  u2_inv_type* u2_t = new u2_inv_type;
230  u3_inv_type* u3_t = new u3_inv_type;
231  typedef matrix< R*NBLOCKS, I2 * I3, T_val > m_temp_type;
232  m_temp_type* temp = new m_temp_type;
233 
234  u1_.transpose_to(*u1_t);
235  u2_.transpose_to(*u2_t);
236  u3_.transpose_to(*u3_t);
237 
238  data_.reconstruct_CP(lambdas_, *u1_t, *u2_t, *u3_t, *temp);
239 
240  delete temp;
241  delete u1_t;
242  delete u2_t;
243  delete u3_t;
244  }
245 
246 #undef VMML_TEMPLATE_STRING
247 #undef VMML_TEMPLATE_CLASSNAME
248 
249 }//end vmml namespace
250 
251 #endif
252