vmmlib  1.7.0
 All Classes Namespaces Functions Pages
t3_ihooi.hpp
1 /*
2  * Copyright (c) 2006-2014, Visualization and Multimedia Lab,
3  * University of Zurich <http://vmml.ifi.uzh.ch>,
4  * Eyescale Software GmbH,
5  * Blue Brain Project, EPFL
6  *
7  * This file is part of VMMLib <https://github.com/VMML/vmmlib/>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above copyright notice, this
13  * list of conditions and the following disclaimer. Redistributions in binary
14  * form must reproduce the above copyright notice, this list of conditions and
15  * the following disclaimer in the documentation and/or other materials provided
16  * with the distribution. Neither the name of the Visualization and Multimedia
17  * Lab, University of Zurich nor the names of its contributors may be used to
18  * endorse or promote products derived from this software without specific prior
19  * written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* @author Rafael Ballester
34  *
35  * incremental version of higher-order orthogonal iteration (HOOI), see t3_hooi.hpp
36  *
37  */
38 
39 
40 #ifndef __VMML__T3_IHOOI_HPP__
41 #define __VMML__T3_IHOOI_HPP__
42 
43 #include <vmmlib/t3_hopm.hpp>
44 #include <vmmlib/t3_hooi.hpp>
45 
46 namespace vmml {
47 
48  template< size_t R1, size_t R2, size_t R3, size_t NBLOCKS, size_t I1, size_t I2, size_t I3, typename T_val = float, typename T_coeff = double >
49  class t3_ihooi {
50  public:
51 
54 
55  // The output data
60 
61  // The incremental data, it is iteratively filled and later cast into the output
66 
67  // The pieces that are used at each iteration
68  typedef tensor3< R1 / NBLOCKS, R2 / NBLOCKS, R3 / NBLOCKS, T_coeff > t3_core_tmp_type;
69  typedef matrix< I1, R1 / NBLOCKS, T_coeff > u1_tmp_type;
70  typedef matrix< I2, R2 / NBLOCKS, T_coeff > u2_tmp_type;
71  typedef matrix< I3, R3 / NBLOCKS, T_coeff > u3_tmp_type;
72 
73  // Used to "transport" individual columns between the tmp_type's and the incr_type's
77 
78  // Incremental Tucker-ALS
79  static tensor_stats i_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, t3_core_type& core_, const float tolerance = 1e-04);
80 
81  // Incremental CP-Tucker-ALS: at each iteration, R-rank CP is performed, but a (R1,R2,R3)-Tucker core (R1,R2,R3 <= R) is computed from the resulting matrices.
82  template< size_t R >
83  static tensor_stats i_cp_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, t3_core_type& core_, const size_t max_iterations_ = 50, const float tolerance = 1e-04);
84  };
85 
86 
87 
88 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t NBLOCKS, size_t I1, size_t I2, size_t I3, typename T_val, typename T_coeff >
89 #define VMML_TEMPLATE_CLASSNAME t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_val, T_coeff >
90 
91  VMML_TEMPLATE_STRING
93  VMML_TEMPLATE_CLASSNAME::i_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, t3_core_type& core_, const float tolerance) {
94  tensor_stats result;
95 
96  if ((R1 % NBLOCKS != 0) or (R2 % NBLOCKS != 0) or (R3 % NBLOCKS != 0)) {
97  std::ostringstream convert1, convert2, convert3, convert4;
98  convert1 << R1;
99  convert2 << R2;
100  convert3 << R3;
101  convert4 << NBLOCKS;
102  VMMLIB_ERROR("In incremental Tucker, (R1,R2,R3) = (" + convert1.str() + "," + convert2.str() + "," + convert3.str() + "), NBLOCKS = " + convert2.str() + " (must be divisible)", VMMLIB_HERE);
103  }
104  t3_coeff_type* approx_data = new t3_coeff_type;
105  approx_data->zero();
106  t3_coeff_type* residual_data = new t3_coeff_type;
107  residual_data->cast_from(data_);
108 
109  t3_core_tmp_type* t3_core_tmp = new t3_core_tmp_type;
110  t3_core_tmp->zero();
111  u1_tmp_type* u1_tmp = new u1_tmp_type;
112  u2_tmp_type* u2_tmp = new u2_tmp_type;
113  u3_tmp_type* u3_tmp = new u3_tmp_type;
114 
115  t3_core_incr_type* t3_core_incr = new t3_core_incr_type;
116  t3_core_incr->zero();
117  u1_incr_type* u1_incr = new u1_incr_type;
118  u1_incr->zero();
119  u2_incr_type* u2_incr = new u2_incr_type;
120  u2_incr->zero();
121  u3_incr_type* u3_incr = new u3_incr_type;
122  u3_incr->zero();
123 
124  u1_1col_type* u1_1col = new u1_1col_type;
125  u2_1col_type* u2_1col = new u2_1col_type;
126  u3_1col_type* u3_1col = new u3_1col_type;
127 
128  typedef t3_hooi < R1 / NBLOCKS, R2 / NBLOCKS, R3 / NBLOCKS, I1, I2, I3, T_coeff > hooi_type;
129 
130  for (size_t i = 0; i < NBLOCKS; ++i) {
131 #ifdef TUCKER_LOG
132  std::cout << "Incremental Tucker: block number '" << i << "'" << std::endl;
133 #endif
134 
135  // Do Tucker-ALS for this block
136  result += hooi_type::als(*residual_data, *u1_tmp, *u2_tmp, *u3_tmp, *t3_core_tmp, typename hooi_type::init_hosvd(), tolerance);
137 
138  // Copy the newly obtained columns into ui_incr
139  for (size_t r = 0; r < R1 / NBLOCKS; ++r) {
140  u1_tmp->get_column(r, *u1_1col);
141  u1_incr->set_column(i * R1 / NBLOCKS + r, *u1_1col);
142  }
143  for (size_t r = 0; r < R2 / NBLOCKS; ++r) {
144  u2_tmp->get_column(r, *u2_1col);
145  u2_incr->set_column(i * R2 / NBLOCKS + r, *u2_1col);
146  }
147  for (size_t r = 0; r < R3 / NBLOCKS; ++r) {
148  u3_tmp->get_column(r, *u3_1col);
149  u3_incr->set_column(i * R3 / NBLOCKS + r, *u3_1col);
150  }
151 
152  // Copy the newly obtained block core into t3_core_incr
153  t3_core_incr->set_sub_tensor3(*t3_core_tmp, i * R1 / NBLOCKS, i * R2 / NBLOCKS, i * R3 / NBLOCKS);
154 
155  // Reconstruct this t3_core_tmp to set a new residual value.
156  t3_ttm::full_tensor3_matrix_multiplication(*t3_core_tmp, *u1_tmp, *u2_tmp, *u3_tmp, *approx_data);
157  *residual_data = *residual_data - *approx_data;
158  }
159 
160  u1_.cast_from(*u1_incr);
161  u2_.cast_from(*u2_incr);
162  u3_.cast_from(*u3_incr);
163 
164  core_.cast_from(*t3_core_incr);
165 
166  delete u1_1col;
167  delete u2_1col;
168  delete u3_1col;
169  delete t3_core_tmp;
170  delete u1_tmp;
171  delete u2_tmp;
172  delete u3_tmp;
173  delete t3_core_incr;
174  delete u1_incr;
175  delete u2_incr;
176  delete u3_incr;
177  delete residual_data;
178  delete approx_data;
179 
180  return result;
181  }
182 
183  VMML_TEMPLATE_STRING
184  template< size_t R >
185  tensor_stats
186  VMML_TEMPLATE_CLASSNAME::i_cp_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, t3_core_type& core_, const size_t max_iterations_, const float tolerance) {
187  tensor_stats result;
188 
189  if ((R1 % NBLOCKS != 0) or (R2 % NBLOCKS != 0) or (R3 % NBLOCKS != 0)) {
190  std::ostringstream convert1, convert2, convert3, convert4, convert5;
191  convert1 << R;
192  convert2 << R1;
193  convert3 << R2;
194  convert4 << R3;
195  convert5 << NBLOCKS;
196  VMMLIB_ERROR("In incremental CP-Tucker, R = " + convert1.str() + ", (R1,R2,R3) = (" + convert2.str() + "," + convert3.str() + "," + convert4.str() + "), NBLOCKS = " + convert5.str() + " (must be divisible, and R1,R2,R3 <= R)", VMMLIB_HERE);
197  }
198 
199  t3_coeff_type* approx_data = new t3_coeff_type;
200  approx_data->zero();
201  t3_coeff_type* residual_data = new t3_coeff_type;
202  residual_data->cast_from(data_);
203 
204  t3_core_tmp_type* t3_core_tmp = new t3_core_tmp_type;
205  t3_core_tmp->zero();
206  u1_tmp_type* u1_tmp = new u1_tmp_type;
207  u1_tmp_type* u1_tmp2 = new u1_tmp_type;
208  u2_tmp_type* u2_tmp = new u2_tmp_type;
209  u2_tmp_type* u2_tmp2 = new u2_tmp_type;
210  u3_tmp_type* u3_tmp = new u3_tmp_type;
211  u3_tmp_type* u3_tmp2 = new u3_tmp_type;
212 
213  t3_core_incr_type* t3_core_incr = new t3_core_incr_type;
214  t3_core_incr->zero();
215  u1_incr_type* u1_incr = new u1_incr_type;
216  u1_incr->zero();
217  u2_incr_type* u2_incr = new u2_incr_type;
218  u2_incr->zero();
219  u3_incr_type* u3_incr = new u3_incr_type;
220  u3_incr->zero();
221 
222  u1_1col_type* u1_1col = new u1_1col_type;
223  u2_1col_type* u2_1col = new u2_1col_type;
224  u3_1col_type* u3_1col = new u3_1col_type;
225 
226  typedef t3_hopm < R / NBLOCKS, I1, I2, I3, T_coeff > hopm_type;
227  typedef vector < R / NBLOCKS, T_coeff > lambda_tmp_type;
228  lambda_tmp_type* lambdas_tmp = new lambda_tmp_type;
229  typedef matrix < I1, R / NBLOCKS, T_coeff > u1_cp_tmp_type;
230  u1_cp_tmp_type* u1_cp_tmp = new u1_cp_tmp_type;
231  typedef matrix < I2, R / NBLOCKS, T_coeff > u2_cp_tmp_type;
232  u2_cp_tmp_type* u2_cp_tmp = new u2_cp_tmp_type;
233  typedef matrix < I3, R / NBLOCKS, T_coeff > u3_cp_tmp_type;
234  u3_cp_tmp_type* u3_cp_tmp = new u3_cp_tmp_type;
235 
236  typedef matrix < R1 / NBLOCKS, I1, T_coeff > u1_inv_tmp_type;
237  u1_inv_tmp_type* u1_inv_tmp = new u1_inv_tmp_type;
238  typedef matrix < R2 / NBLOCKS, I2, T_coeff > u2_inv_tmp_type;
239  u2_inv_tmp_type* u2_inv_tmp = new u2_inv_tmp_type;
240  typedef matrix < R3 / NBLOCKS, I3, T_coeff > u3_inv_tmp_type;
241  u3_inv_tmp_type* u3_inv_tmp = new u3_inv_tmp_type;
242 
243  for (size_t i = 0; i < NBLOCKS; ++i) {
244 #ifdef TUCKER_LOG
245  std::cout << "Incremental CP-Tucker: block number '" << i << "'" << std::endl;
246 #endif
247 
248  // Do CP-ALS for this block
249  result += hopm_type::als(*residual_data, *u1_cp_tmp, *u2_cp_tmp, *u3_cp_tmp, *lambdas_tmp, typename hopm_type::init_hosvd(), max_iterations_, tolerance);
250 
251  // Compute the pseudoinverses
252  u1_cp_tmp->get_sub_matrix(*u1_tmp, 0, 0);
253  compute_pseudoinverse< u1_tmp_type > compute_pinv1;
254  compute_pinv1(*u1_tmp, *u1_tmp2);
255  u1_tmp2->transpose_to(*u1_inv_tmp);
256 
257  u2_cp_tmp->get_sub_matrix(*u2_tmp, 0, 0);
258  compute_pseudoinverse< u2_tmp_type > compute_pinv2;
259  compute_pinv2(*u2_tmp, *u2_tmp2);
260  u2_tmp2->transpose_to(*u2_inv_tmp);
261 
262  u3_cp_tmp->get_sub_matrix(*u3_tmp, 0, 0);
263  compute_pseudoinverse< u3_tmp_type > compute_pinv3;
264  compute_pinv3(*u3_tmp, *u3_tmp2);
265  u3_tmp2->transpose_to(*u3_inv_tmp);
266  // hooi_type::als(*residual_data, *u1_tmp, *u2_tmp, *u3_tmp, *t3_core_tmp, typename hooi_type::init_hosvd());
267 
268  // Project the initial data onto the pseudoinverses to get the core
269  t3_ttm::full_tensor3_matrix_multiplication(*residual_data, *u1_inv_tmp, *u2_inv_tmp, *u3_inv_tmp, *t3_core_tmp);
270 
271  // Copy the newly obtained columns into ui_incr
272  for (size_t r = 0; r < R1 / NBLOCKS; ++r) {
273  u1_tmp->get_column(r, *u1_1col);
274  u1_incr->set_column(i * R1 / NBLOCKS + r, *u1_1col);
275  }
276  for (size_t r = 0; r < R2 / NBLOCKS; ++r) {
277  u2_tmp->get_column(r, *u2_1col);
278  u2_incr->set_column(i * R2 / NBLOCKS + r, *u2_1col);
279  }
280  for (size_t r = 0; r < R3 / NBLOCKS; ++r) {
281  u3_tmp->get_column(r, *u3_1col);
282  u3_incr->set_column(i * R3 / NBLOCKS + r, *u3_1col);
283  }
284 
285  // Copy the newly obtained block core into t3_core_incr
286  t3_core_incr->set_sub_tensor3(*t3_core_tmp, i * R1 / NBLOCKS, i * R2 / NBLOCKS, i * R3 / NBLOCKS);
287 
288  // Reconstruct this t3_core_tmp to set a new residual value.
289  t3_ttm::full_tensor3_matrix_multiplication(*t3_core_tmp, *u1_tmp, *u2_tmp, *u3_tmp, *approx_data);
290  *residual_data = *residual_data - *approx_data;
291  }
292 
293  u1_.cast_from(*u1_incr);
294  u2_.cast_from(*u2_incr);
295  u3_.cast_from(*u3_incr);
296 
297  core_.cast_from(*t3_core_incr);
298 
299  delete u1_1col;
300  delete u2_1col;
301  delete u3_1col;
302  delete t3_core_tmp;
303  delete u1_tmp;
304  delete u2_tmp;
305  delete u3_tmp;
306  delete t3_core_incr;
307  delete u1_incr;
308  delete u2_incr;
309  delete u3_incr;
310  delete residual_data;
311  delete approx_data;
312 
313  return result;
314  }
315 
316 #undef VMML_TEMPLATE_STRING
317 #undef VMML_TEMPLATE_CLASSNAME
318 
319 }//end vmml namespace
320 
321 #endif /* T3_IHOOI_HPP */