GEDLIB  1.0
ring.ipp
Go to the documentation of this file.
1 /***************************************************************************
2 * *
3 * Copyright (C) 2018 by David B. Blumenthal *
4 * *
5 * This file is part of GEDLIB. *
6 * *
7 * GEDLIB is free software: you can redistribute it and/or modify it *
8 * under the terms of the GNU Lesser General Public License as published *
9 * by the Free Software Foundation, either version 3 of the License, or *
10 * (at your option) any later version. *
11 * *
12 * GEDLIB is distributed in the hope that it will be useful, *
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
15 * GNU Lesser General Public License for more details. *
16 * *
17 * You should have received a copy of the GNU Lesser General Public *
18 * License along with GEDLIB. If not, see <http://www.gnu.org/licenses/>. *
19 * *
20 ***************************************************************************/
21 
27 #ifndef SRC_METHODS_RING_IPP_
28 #define SRC_METHODS_RING_IPP_
29 
30 namespace ged {
31 
32 // === Definitions of destructors and constructors. ===
33 template<class UserNodeLabel, class UserEdgeLabel>
34 Ring<UserNodeLabel, UserEdgeLabel>::
35 ~Ring() {}
36 
37 template<class UserNodeLabel, class UserEdgeLabel>
38 Ring<UserNodeLabel, UserEdgeLabel>::
39 Ring(const GEDData<UserNodeLabel, UserEdgeLabel> & ged_data) :
40 LSAPEBasedMethod<UserNodeLabel, UserEdgeLabel>(ged_data),
41 rings_(),
42 led_method_{LSAPE_OPTIMAL},
43 sort_method_{COUNTING},
44 num_layers_{undefined()},
45 alpha_(),
46 lambda_(),
47 mu_{1.0},
48 num_evals_{50},
49 num_x0s_{100},
50 infile_(""),
51 outfile_("") {
52  this->compute_lower_bound_ = false;
53 }
54 
55 // === Definitions of member functions inherited from LSAPEBasedMethod. ===
56 template<class UserNodeLabel, class UserEdgeLabel>
57 void
59 lsape_init_graph_(const GEDGraph & graph) {
60  build_rings_(graph);
61 }
62 
63 template<class UserNodeLabel, class UserEdgeLabel>
64 void
67  led_method_ = LSAPE_OPTIMAL;
68  sort_method_ = COUNTING;
69  mu_ = 1;
70  num_evals_ = 50;
71  num_x0s_ = 100;
72 }
73 
74 template<class UserNodeLabel, class UserEdgeLabel>
75 void
77 lsape_pre_graph_init_(bool called_at_runtime) {
78  if (load_config_file_()) {
79  read_params_from_file_();
80  }
81 }
82 
83 template<class UserNodeLabel, class UserEdgeLabel>
84 void
87  if (not load_config_file_()) {
88  set_num_layers_();
89  for (std::size_t level{0}; level < num_layers_; level++) {
90  lambda_.push_back(1.0 / static_cast<double>(num_layers_));
91  }
92  for (std::size_t i{0}; i < 3; i++) {
93  alpha_.push_back(1.0 / 3.0);
94  }
95  }
96 }
97 
98 template<class UserNodeLabel, class UserEdgeLabel>
99 void
102 
103  // Return if the method is initialized from a file.
104  if (load_config_file_()) {
105  return;
106  }
107 
108  // Set the number of layers to the maximum diameter of the graphs in the instance.
109  set_num_layers_();
110 
111  // Initialise the starting points employed by NOMAD.
112  std::vector<NOMAD::Point> x0s;
113  init_x0s_(x0s);
114 
115  // Run NOMAD.
116  std::vector<NOMAD::Point> solutions;
117  std::vector<NOMAD::Double> objectives;
118  ProgressBar progress_bar(num_x0s_);
119  std::cout << "\rNOMAD: " << progress_bar << std::flush;
120  // Run NOMAD for all the starting points.
121 
122 #ifdef _OPENMP
123  omp_set_num_threads(this->num_threads_ - 1);
124 #pragma omp parallel for if(this->num_threads_ > 1)
125 #endif
126  for (std::size_t i = 0; i < num_x0s_; i++) {
127  NOMAD::Display out (std::cout);
128  out.precision (NOMAD::DISPLAY_PRECISION_STD);
129  try {
130  // Setup NOMAD.
131  NOMAD::begin(0, nullptr);
132  NOMAD::Parameters p(out);
133  std::vector<NOMAD::bb_output_type> bbot(3);
134  bbot[0] = NOMAD::OBJ; // objective function
135  bbot[1] = NOMAD::PB; // sum over alpha > 0
136  bbot[2] = NOMAD::PB; // sum over lambda > 0
137  p.set_BB_OUTPUT_TYPE(bbot);
138  p.set_DISPLAY_STATS ("bbe ( sol ) obj");
139  p.set_DIMENSION(3 + num_layers_);
140  p.set_LOWER_BOUND(NOMAD::Point(3 + num_layers_, 0.0));
141  p.set_UPPER_BOUND(NOMAD::Point(3 + num_layers_, 1.0));
142  p.set_MAX_BB_EVAL(num_evals_);
143  p.set_DISPLAY_DEGREE("NO_DISPLAY");
144 
145  // Setup evaluator and Mads.
146  p.set_X0(x0s.at(i));
147  p.check();
148  Evaluator_ ev(p, this);
149  NOMAD::Mads mads(p, &ev);
150 
151  // Run Mads.
152  mads.run();
153 #ifdef _OPENMP
154 #pragma omp critical
155 #endif
156  {
157  solutions.push_back(*mads.get_best_feasible());
158  objectives.push_back(mads.get_best_feasible()->get_f());
159  }
160  }
161  catch (NOMAD::Exception & e) {
162  throw Error(std::string("NOMAD error: ") + e.what());
163  }
164  NOMAD::Slave::stop_slaves(out);
165  NOMAD::end();
166 #ifdef _OPENMP
167 #pragma omp critical
168 #endif
169  {
170  progress_bar.increment();
171  std::cout << "\rNOMAD: " << progress_bar << std::flush;
172  }
173  }
174  std::cout << "\n";
175 
176  NOMAD::Double best_objective(objectives.at(0));
177  std::size_t pos_best_solution{0};
178  for (std::size_t pos{1}; pos < objectives.size(); pos++) {
179  if (objectives.at(pos) < best_objective) {
180  best_objective = objectives.at(pos);
181  pos_best_solution = pos;
182  }
183  }
184  NOMAD::Point best_solution(solutions.at(pos_best_solution));
185 
186  // Write the best parameters in alpha_ and lambda_ and write them to output file.
187  nomad_point_to_params_(best_solution, alpha_, lambda_);
188  normalize_params_();
189 
190  if (outfile_ != "") {
191  write_params_to_file_();
192  }
193 }
194 
195 template<class UserNodeLabel, class UserEdgeLabel>
196 std::string
199  return "[--led-method <arg>] [--sort-method <arg>] [--load <arg>] [--save <arg>] [--init-evaluations <arg>] [--init-initial-solutions <arg>] [--init-mu <arg>]";
200 }
201 
202 template<class UserNodeLabel, class UserEdgeLabel>
203 bool
205 lsape_parse_option_(const std::string & option, const std::string & arg) {
206  if (option == "led-method") {
207  if (arg == "LSAPE_OPTIMAL") {
208  led_method_ = LSAPE_OPTIMAL;
209  }
210  else if (arg == "LSAPE_GREEDY") {
211  led_method_ = LSAPE_GREEDY;
212  }
213  else if (arg == "GAMMA") {
214  led_method_ = GAMMA;
215  }
216  else {
217  throw Error(std::string("Invalid argument \"") + arg + "\" for option led-method. Usage: options = \"[--led-method LSAPE_OPTIMAL|LSAPE_GREEDY|GAMMA] [...]\"");
218  }
219  return true;
220  }
221  else if (option == "sort-method") {
222  if (arg == "COUNTING") {
223  sort_method_ = COUNTING;
224  }
225  else if (arg == "STD") {
226  sort_method_ = STD;
227  }
228  else {
229  throw Error(std::string("Invalid argument \"") + arg + "\" for option sort-method. Usage: options = \"[--sort-method COUNTING|STD] [...]\"");
230  }
231  return true;
232  }
233  else if (option == "load") {
234  infile_ = arg;
235  return true;
236  }
237  else if (option == "save") {
238  outfile_ = arg;
239  return true;
240  }
241  else if (option == "init-evaluations") {
242  try {
243  num_evals_ = std::stoi(arg);
244  }
245  catch (...) {
246  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-evaluations. Usage: options = \"[--init-evaluations <convertible to int greater 0>] [...]");
247  }
248  if (num_evals_ <= 0) {
249  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-evaluations. Usage: options = \"[--init-evaluations <convertible to int greater 0>] [...]");
250  }
251  return true;
252  }
253  else if (option == "init-initial-solutions") {
254  try {
255  num_x0s_ = std::stoi(arg);
256  }
257  catch (...) {
258  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-initial-solutions. Usage: options = \"[--init-initial-solutions <convertible to int greater 0>] [...]");
259  }
260  if (num_x0s_ <= 0) {
261  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-initial-solutions. Usage: options = \"[--init-initial-solutions <convertible to int greater 0>] [...]");
262  }
263  return true;
264  }
265  else if (option == "init-mu") {
266  try {
267  mu_ = std::stod(arg);
268  }
269  catch (...) {
270  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-mu. Usage: options = \"[--init-mu <convertible to double between 0 and 1>] [...]");
271  }
272  if (mu_ < 0 or mu_ > 1) {
273  throw Error(std::string("Invalid argument \"") + arg + "\" for option init-mu. Usage: options = \"[--init-mu <convertible to double between 0 and 1>] [...]");
274  }
275  return true;
276  }
277  return false;
278 }
279 
280 template<class UserNodeLabel, class UserEdgeLabel>
281 void
283 lsape_populate_instance_(const GEDGraph & g, const GEDGraph & h, DMatrix & master_problem) {
284  const NodeRingMap_ & rings_g = rings_.at(g.id());
285  const NodeRingMap_ & rings_h = rings_.at(h.id());
286 #ifdef _OPENMP
287  omp_set_num_threads(this->num_threads_ - 1);
288 #pragma omp parallel for if(this->num_threads_ > 1)
289 #endif
290  for (std::size_t row_in_master = 0; row_in_master < master_problem.num_rows(); row_in_master++) {
291  for (std::size_t col_in_master = 0; col_in_master < master_problem.num_cols(); col_in_master++) {
292  master_problem(row_in_master, col_in_master) = compute_ring_distance_(g, h, rings_g, rings_h, alpha_, lambda_, row_in_master, col_in_master);
293  }
294  }
295 }
296 
297 // === Defintion of private class Ring :: Layer_. ===
298 template<class UserNodeLabel, class UserEdgeLabel>
300 Layer_ ::
301 Layer_(std::size_t level) :
302 level{level},
303 node_labels(),
304 inner_edge_labels(),
305 outer_edge_labels() {}
306 
307 // === Definition of private class Ring<UserNodeLabel, UserEdgeLabel>:: Ring_. ===
308 template<class UserNodeLabel, class UserEdgeLabel>
310 Ring_ ::
311 Ring_() :
312 layers() {}
313 
314 // === Definition of private class Ring<UserNodeLabel, UserEdgeLabel>:: Evaluator_. ===
315 template<class UserNodeLabel, class UserEdgeLabel>
318 Evaluator_(const NOMAD::Parameters & param, Ring<UserNodeLabel, UserEdgeLabel> * ring) :
319 NOMAD::Evaluator(param),
320 ring_{ring}{}
321 
322 template<class UserNodeLabel, class UserEdgeLabel>
325 ~Evaluator_(){}
326 
327 template<class UserNodeLabel, class UserEdgeLabel>
328 bool
331 eval_x(NOMAD::Eval_Point & x, const NOMAD::Double & h_max, bool & count_eval) const {
332  count_eval = true;
333  ring_->eval_x_(x);
334  return true;
335 }
336 
337 // === Definition of helper member functions. ===
338 template<class UserNodeLabel, class UserEdgeLabel>
339 void
341 nomad_point_to_params_(const NOMAD::Point & x, std::vector<double> & alpha, std::vector<double> & lambda) const {
342  alpha.clear();
343  for (std::size_t i{0} ; i < 3; i++) {
344  alpha.push_back(x.get_coord(i).value());
345  }
346  std::size_t max_non_zero_level{0};
347  for (std::size_t i{0} ; i < num_layers_; i++) {
348  if (x.get_coord(i + 3).value() > 0) {
349  max_non_zero_level = i;
350  }
351  }
352  lambda.clear();
353  for (std::size_t i{0} ; i <= max_non_zero_level; i++) {
354  lambda.push_back(x.get_coord(i + 3).value());
355  }
356 }
357 
358 template<class UserNodeLabel, class UserEdgeLabel>
359 void
362  double sum_alpha {0.0};
363  for (auto alpha = alpha_.begin(); alpha != alpha_.end(); alpha++) {
364  sum_alpha += *alpha;
365  }
366  for (auto alpha = alpha_.begin(); alpha != alpha_.end(); alpha++) {
367  *alpha /= sum_alpha;
368  }
369  double sum_lambda {0.0};
370  for (auto lambda = lambda_.begin(); lambda != lambda_.end(); lambda++) {
371  sum_lambda += *lambda;
372  }
373  for (auto lambda = lambda_.begin(); lambda != lambda_.end(); lambda++) {
374  *lambda /= sum_lambda;
375  }
376 }
377 
378 template<class UserNodeLabel, class UserEdgeLabel>
379 void
381 eval_x_(NOMAD::Eval_Point & x) const {
382  std::vector<double> alpha;
383  std::vector<double> lambda;
384  nomad_point_to_params_(x, alpha, lambda);
385  double val {0.0};
386  LSAPESolver lsape_solver;
387  lsape_solver.set_model(this->lsape_model_);
388  for (auto g = this->ged_data_.begin(); g != this->ged_data_.end(); g++) {
389  if (this->ged_data_.is_shuffled_graph_copy(g->id())) {
390  continue;
391  }
392  for (auto h = this->ged_data_.begin(); h != this->ged_data_.end(); h++) {
393  if (this->ged_data_.is_shuffled_graph_copy(h->id())) {
394  continue;
395  }
396  NodeMap matching(g->num_nodes(), h->num_nodes());
397  DMatrix lsape_instance(g->num_nodes() + 1, h->num_nodes() + 1, 0.0);
398  lsape_solver.set_problem(&lsape_instance);
399  if (this->ged_data_.shuffled_graph_copies_available() and (g->id() == h->id())) {
400  GEDGraph::GraphID id_shuffled_graph_copy{this->ged_data_.id_shuffled_graph_copy(h->id())};
401  populate_instance_with_params_(*g, this->ged_data_.graph(id_shuffled_graph_copy), alpha, lambda, lsape_instance);
402  lsape_solver.solve();
403  util::construct_node_map_from_solver(lsape_solver, matching);
404  this->ged_data_.compute_induced_cost(*g, this->ged_data_.graph(id_shuffled_graph_copy), matching);
405  }
406  else {
407  populate_instance_with_params_(*g, *h, alpha, lambda, lsape_instance);
408  lsape_solver.solve();
409  util::construct_node_map_from_solver(lsape_solver, matching);
410  this->ged_data_.compute_induced_cost(*g, *h, matching);
411  }
412  val += matching.induced_cost();
413  }
414  }
415  val /= static_cast<double>(this->ged_data_.num_graphs() * this->ged_data_.num_graphs());
416  std::size_t supp_lambda{0};
417  double sum_lambda{0.0};
418  for (auto lambda_l = lambda.begin(); lambda_l != lambda.end(); lambda_l++) {
419  if (*lambda_l > 0) {
420  supp_lambda++;
421  sum_lambda += *lambda_l;
422  }
423  }
424  if (num_layers_ > 1 and mu_ < 1) {
425  val *= (mu_ + ((1 - mu_) * static_cast<double>(supp_lambda - 1) / static_cast<double>(num_layers_ - 1)));
426  }
427  double sum_alpha{0.0};
428  for (auto alpha_i = alpha.begin(); alpha_i != alpha.end(); alpha_i++) {
429  sum_alpha += *alpha_i;
430  }
431  double epsilon{0.00001};
432  x.set_bb_output(0, val);
433  x.set_bb_output(1, epsilon - sum_alpha);
434  x.set_bb_output(2, epsilon - sum_lambda);
435 }
436 
437 template<class UserNodeLabel, class UserEdgeLabel>
438 void
440 build_rings_(const GEDGraph & graph) {
441  rings_[graph.id()] = NodeRingMap_();
442  for (auto node = graph.nodes().first; node != graph.nodes().second; node++) {
443  build_ring_(graph, *node, rings_.at(graph.id()));
444  }
445 }
446 
447 template<class UserNodeLabel, class UserEdgeLabel>
448 void
450 build_ring_(const GEDGraph & graph, GEDGraph::NodeID root, NodeRingMap_ & rings) {
451  std::map<GEDGraph::NodeID, int> distance_to_root;
452  for (auto node = graph.nodes().first; node != graph.nodes().second; node++) {
453  distance_to_root[*node] = -1;
454  }
455  distance_to_root[root] = 0;
456 
457  std::map<GEDGraph::EdgeID, bool> discovered_edge;
458  for (auto edge = graph.edges().first; edge != graph.edges().second; edge++) {
459  discovered_edge[*edge] = false;
460  }
461 
462  Layer_ current_layer(0);
463  std::queue<GEDGraph::NodeID> queue;
464  queue.push(root);
465  while (not queue.empty()) {
466  GEDGraph::NodeID current_node{queue.front()};
467  queue.pop();
468  if (static_cast<int>(current_layer.level) < distance_to_root.at(current_node)) {
469  if (sort_method_ == COUNTING) {
470  util::counting_sort(current_layer.node_labels.begin(), current_layer.node_labels.end());
471  util::counting_sort(current_layer.inner_edge_labels.begin(), current_layer.inner_edge_labels.end());
472  util::counting_sort(current_layer.outer_edge_labels.begin(), current_layer.outer_edge_labels.end());
473  }
474  else {
475  std::sort(current_layer.node_labels.begin(), current_layer.node_labels.end());
476  std::sort(current_layer.inner_edge_labels.begin(), current_layer.inner_edge_labels.end());
477  std::sort(current_layer.outer_edge_labels.begin(), current_layer.outer_edge_labels.end());
478  }
479  rings[root].layers.push_back(current_layer);
480  current_layer = Layer_(current_layer.level + 1);
481  }
482  current_layer.node_labels.push_back(graph.get_node_label(current_node));
483  for (auto edge = graph.incident_edges(current_node).first; edge != graph.incident_edges(current_node).second; edge++) {
484  GEDGraph::NodeID next_node{graph.head(*edge)};
485  if (distance_to_root.at(next_node) == -1) {
486  distance_to_root[next_node] = current_layer.level + 1;
487  if (current_layer.level < num_layers_) {
488  queue.push(next_node);
489  }
490  }
491  if (not discovered_edge.at(*edge)) {
492  discovered_edge[*edge] = true;
493  if (distance_to_root.at(current_node) == distance_to_root.at(next_node)) {
494  current_layer.inner_edge_labels.push_back(graph.get_edge_label(*edge));
495  }
496  else if (distance_to_root.at(current_node) < distance_to_root.at(next_node)) {
497  current_layer.outer_edge_labels.push_back(graph.get_edge_label(*edge));
498  }
499  else {
500  throw Error(std::string("Error when building ring rooted at ") + std::to_string(root) +
501  " for graph " + std::to_string(graph.id()) + ": dist(" +
502  std::to_string(current_node) +") = " + std::to_string(distance_to_root.at(current_node)) +
503  " > dist(" + std::to_string(next_node) +") = " + std::to_string(distance_to_root.at(next_node)));
504  }
505  }
506  }
507  }
508  if (led_method_ == GAMMA) {
509  if (sort_method_ == COUNTING) {
510  util::counting_sort(current_layer.node_labels.begin(), current_layer.node_labels.end());
511  util::counting_sort(current_layer.inner_edge_labels.begin(), current_layer.inner_edge_labels.end());
512  util::counting_sort(current_layer.outer_edge_labels.begin(), current_layer.outer_edge_labels.end());
513  }
514  else {
515  std::sort(current_layer.node_labels.begin(), current_layer.node_labels.end());
516  std::sort(current_layer.inner_edge_labels.begin(), current_layer.inner_edge_labels.end());
517  std::sort(current_layer.outer_edge_labels.begin(), current_layer.outer_edge_labels.end());
518  }
519  }
520  rings[root].layers.push_back(current_layer);
521 }
522 
523 template<class UserNodeLabel, class UserEdgeLabel>
524 void
526 set_num_layers_() {
527  std::size_t max_num_layers{0};
528  for (auto graph_rings_pair = rings_.begin(); graph_rings_pair != rings_.end(); graph_rings_pair++) {
529  for (auto ring = graph_rings_pair->second.begin(); ring != graph_rings_pair->second.end(); ring++) {
530  max_num_layers = std::max(max_num_layers, ring->second.layers.size());
531  }
532  }
533  num_layers_ = std::min(num_layers_, max_num_layers);
534 }
535 
536 template<class UserNodeLabel, class UserEdgeLabel>
537 bool
539 load_config_file_() const {
540  return (infile_ != "");
541 }
542 
543 template<class UserNodeLabel, class UserEdgeLabel>
544 void
546 init_x0s_(std::vector<NOMAD::Point> & x0s) const {
547  std::random_device rd;
548  std::mt19937 rng(rd());
549  std::uniform_real_distribution<double> uni(0.0, 1.0);
550  uni(rng);
551  NOMAD::Point x0(3 + num_layers_, 0.0);
552  bool found_new_x0{true};
553  std::vector<double> alpha_gen;
554  std::vector<double> lambda_gen;
555  while(x0s.size() < num_x0s_) {
556  alpha_gen.clear();
557  alpha_gen.push_back(0.0);
558  alpha_gen.push_back(1.0);
559  for (std::size_t i{0}; i < 2; i++) {
560  alpha_gen.push_back(uni(rng));
561  }
562  std::sort(alpha_gen.begin(), alpha_gen.end());
563  for (std::size_t i{0}; i < 3; i++) {
564  x0[i] = alpha_gen.at(i+1) - alpha_gen.at(i);
565  }
566  lambda_gen.clear();
567  lambda_gen.push_back(0.0);
568  lambda_gen.push_back(1.0);
569  for (std::size_t level{0}; level < num_layers_ - 1; level++) {
570  lambda_gen.push_back(uni(rng));
571  }
572  std::sort(lambda_gen.begin(), lambda_gen.end());
573  for (std::size_t level{0}; level < num_layers_; level++) {
574  x0[level + 3] = lambda_gen.at(level + 1) - lambda_gen.at(level);
575  }
576  for (auto old_x0 = x0s.begin(); old_x0 != x0s.end(); old_x0++) {
577  if (*old_x0 == x0) {
578  found_new_x0 = false;
579  break;
580  }
581  }
582  if (found_new_x0) {
583  x0s.push_back(x0);
584  }
585  found_new_x0 = true;
586  }
587 }
588 
589 template<class UserNodeLabel, class UserEdgeLabel>
590 double
592 compute_ring_distance_(const GEDGraph & g, const GEDGraph & h, const NodeRingMap_ & rings_g, const NodeRingMap_ & rings_h,
593  const std::vector<double> & alpha, const std::vector<double> & lambda, std::size_t row_in_master, std::size_t col_in_master) const {
594  double red{0.0};
595  if ((row_in_master < g.num_nodes()) and (col_in_master < h.num_nodes())) { // compute substitution cost
596  const Ring_ & ring_i = rings_g.at(row_in_master);
597  const Ring_ & ring_k = rings_h.at(col_in_master);
598  for (std::size_t level{0}; level < lambda.size(); level++) {
599  red += lambda.at(level) * compute_substitution_cost_(ring_i, ring_k, alpha, level);
600  }
601  }
602  else if (row_in_master < g.num_nodes()) { // compute deletion cost
603  const Ring_ & ring_i = rings_g.at(row_in_master);
604  for (std::size_t level{0}; level < lambda.size(); level++) {
605  red += lambda.at(level) * compute_deletion_cost_(ring_i, alpha, level);
606  }
607  }
608  else if (col_in_master < h.num_nodes()) { // compute insertion cost
609  const Ring_ & ring_k = rings_h.at(col_in_master);
610  for (std::size_t level{0}; level < lambda.size(); level++) {
611  red += lambda.at(level) * compute_insertion_cost_(ring_k, alpha, level);
612  }
613  }
614  return red;
615 }
616 
617 template<class UserNodeLabel, class UserEdgeLabel>
618 double
620 compute_substitution_cost_(const Ring_ & ring_i, const Ring_ & ring_k, const std::vector<double> & alpha, std::size_t level) const {
621  if ((ring_i.layers.size() > level) and (ring_k.layers.size() > level)) {
622  return compute_layer_distance_(ring_i.layers.at(level), ring_k.layers.at(level), alpha);
623  }
624  if (ring_i.layers.size() > level) {
625  return compute_layer_distance_(ring_i.layers.at(level), Layer_(0), alpha);
626  }
627  if (ring_k.layers.size() > level) {
628  return compute_layer_distance_(Layer_(0), ring_k.layers.at(level), alpha);
629  }
630  return 0.0;
631 }
632 
633 template<class UserNodeLabel, class UserEdgeLabel>
634 double
636 compute_deletion_cost_(const Ring_ & ring, const std::vector<double> & alpha, std::size_t level) const {
637  if (ring.layers.size() > level) {
638  return compute_layer_distance_(ring.layers.at(level), Layer_(0), alpha);
639  }
640  return 0.0;
641 }
642 
643 template<class UserNodeLabel, class UserEdgeLabel>
644 double
646 compute_insertion_cost_(const Ring_ & ring, const std::vector<double> & alpha, std::size_t level) const {
647  if (ring.layers.size() > level) {
648  return compute_layer_distance_(Layer_(0), ring.layers.at(level), alpha);
649  }
650  return 0.0;
651 }
652 
653 template<class UserNodeLabel, class UserEdgeLabel>
654 double
656 compute_layer_distance_(const Layer_ & lhs, const Layer_ & rhs, const std::vector<double> & alpha) const {
657  double node_cost{0.0};
658  double inner_edge_cost{0.0};
659  double outer_edge_cost{0.0};
660 
661  switch (led_method_) {
662  case GAMMA:
663  node_cost = gamma_multiset_cost_(lhs.node_labels, rhs.node_labels, true);
664  inner_edge_cost = gamma_multiset_cost_(lhs.inner_edge_labels, rhs.inner_edge_labels, false);
665  outer_edge_cost = gamma_multiset_cost_(lhs.outer_edge_labels, rhs.outer_edge_labels, false);
666  break;
667  default:
668  node_cost = lsape_multiset_cost_(lhs.node_labels, rhs.node_labels, true);
669  inner_edge_cost = lsape_multiset_cost_(lhs.inner_edge_labels, rhs.inner_edge_labels, false);
670  outer_edge_cost = lsape_multiset_cost_(lhs.outer_edge_labels, rhs.outer_edge_labels, false);
671  break;
672  }
673 
674  std::size_t max_num_node_labels{std::max(lhs.node_labels.size(), rhs.node_labels.size())};
675  if (max_num_node_labels > 0) {
676  node_cost /= static_cast<double>(max_num_node_labels);
677  }
678  std::size_t max_num_inner_edge_labels{std::max(lhs.inner_edge_labels.size(), rhs.inner_edge_labels.size())};
679  if (max_num_inner_edge_labels > 0) {
680  inner_edge_cost /= static_cast<double>(max_num_inner_edge_labels);
681  }
682  std::size_t max_num_outer_edge_labels{std::max(lhs.outer_edge_labels.size(), rhs.outer_edge_labels.size())};
683  if (max_num_outer_edge_labels > 0) {
684  outer_edge_cost /= static_cast<double>(max_num_outer_edge_labels);
685  }
686 
687  return alpha.at(0) * node_cost + alpha.at(1) * inner_edge_cost + alpha.at(2) * outer_edge_cost;
688 }
689 
690 template<class UserNodeLabel, class UserEdgeLabel>
691 double
693 lsape_multiset_cost_(const std::vector<LabelID> & lhs, const std::vector<LabelID> & rhs, bool node_labels) const {
694 
695  if ((lhs.size() == 0) and (rhs.size() == 0)) {
696  return 0.0;
697  }
698 
699  if ((lhs.size() > 0) and (rhs.size() == 0)) {
700  double cost{0.0};
701  for (std::size_t row{0}; row < lhs.size(); row++) {
702  if (node_labels) {
703  cost += this->ged_data_.node_cost(lhs.at(row), dummy_label());
704  }
705  else {
706  cost += this->ged_data_.edge_cost(lhs.at(row), dummy_label());
707  }
708  }
709  return cost;
710  }
711 
712  if ((lhs.size() == 0) and (rhs.size() > 0)) {
713  double cost{0.0};
714  for (std::size_t col{0}; col < rhs.size(); col++) {
715  if (node_labels) {
716  cost += this->ged_data_.node_cost(dummy_label(), rhs.at(col));
717  }
718  else {
719  cost += this->ged_data_.edge_cost(dummy_label(), rhs.at(col));
720  }
721  }
722 
723  return cost;
724  }
725 
726  DMatrix problem(lhs.size() + 1, rhs.size() + 1, 0.0);
727  // Collect deletion costs.
728  for (std::size_t row{0}; row < lhs.size(); row++) {
729  if (node_labels) {
730  problem(row, rhs.size()) = this->ged_data_.node_cost(lhs.at(row), dummy_label());
731  }
732  else {
733  problem(row, rhs.size()) = this->ged_data_.edge_cost(lhs.at(row), dummy_label());
734  }
735  }
736 
737  // Collect insertion costs.
738  for (std::size_t col{0}; col < rhs.size(); col++) {
739  if (node_labels) {
740  problem(lhs.size(), col) = this->ged_data_.node_cost(dummy_label(), rhs.at(col));
741  }
742  else {
743  problem(lhs.size(), col) = this->ged_data_.edge_cost(dummy_label(), rhs.at(col));
744  }
745  }
746 
747  // Collect substitution costs.
748  for (std::size_t row{0}; row < lhs.size(); row++) {
749  for (std::size_t col{0}; col < rhs.size(); col++) {
750  if (node_labels) {
751  problem(row, col) = this->ged_data_.node_cost(lhs.at(row), rhs.at(col));
752  }
753  else {
754  problem(row, col) = this->ged_data_.edge_cost(lhs.at(row), rhs.at(col));
755  }
756  }
757  }
758 
759  LSAPESolver problem_solver(&problem);
760  if (led_method_ == LSAPE_OPTIMAL) {
761  problem_solver.set_model(this->lsape_model_);
762  }
763  else {
764  problem_solver.set_greedy_method(this->greedy_method_);
765  }
766  problem_solver.solve();
767 
768  return problem_solver.minimal_cost();
769 }
770 
771 template<class UserNodeLabel, class UserEdgeLabel>
772 double
774 gamma_multiset_cost_(const std::vector<LabelID> & lhs, const std::vector<LabelID> & rhs, bool node_labels) const {
775  double cost{0.0};
776 
777  // Compute and add minimal edge insertion costs.
778  if (lhs.size() < rhs.size()) {
779  double avg_ins_cost{0.0};
780  for (auto label_rhs = rhs.begin(); label_rhs != rhs.end(); label_rhs++) {
781  if (node_labels) {
782  avg_ins_cost += this->ged_data_.node_cost(dummy_label(), *label_rhs);
783  }
784  else {
785  avg_ins_cost += this->ged_data_.edge_cost(dummy_label(), *label_rhs);
786  }
787  }
788  avg_ins_cost /= static_cast<double>(rhs.size());
789  cost += static_cast<double>(rhs.size() - lhs.size()) * avg_ins_cost;
790  }
791 
792  // Compute and add minimal edge deletion costs.
793  if (lhs.size() > rhs.size()) {
794  double avg_del_cost{0.0};
795  for (auto label_lhs = lhs.begin(); label_lhs != lhs.end(); label_lhs++) {
796  if (node_labels) {
797  avg_del_cost += this->ged_data_.node_cost(*label_lhs, dummy_label());
798  }
799  else {
800  avg_del_cost += this->ged_data_.edge_cost(*label_lhs, dummy_label());
801  }
802  }
803  avg_del_cost /= static_cast<double>(lhs.size());
804  cost += static_cast<double>(lhs.size() - rhs.size()) * avg_del_cost;
805  }
806 
807  // Compute minimal edge relabelling costs.
808  double avg_rel_cost{0.0};
809  std::size_t count_diff_labels{0};
810  for (auto label_lhs = lhs.begin(); label_lhs != lhs.end(); label_lhs++) {
811  for (auto label_rhs = rhs.begin(); label_rhs != rhs.end(); label_rhs++) {
812  if (*label_lhs != *label_rhs) {
813  count_diff_labels++;
814  if (node_labels) {
815  avg_rel_cost += this->ged_data_.node_cost(*label_lhs, *label_rhs);
816  }
817  else {
818  avg_rel_cost += this->ged_data_.edge_cost(*label_lhs, *label_rhs);
819  }
820  }
821  }
822  }
823  avg_rel_cost /= static_cast<double>(count_diff_labels);
824 
825  // Compute multiset intersection size.
826  std::size_t intersection_size{0};
827  auto label_lhs = lhs.begin();
828  auto label_rhs = rhs.begin();
829  while ((label_lhs != lhs.end()) and (label_rhs != rhs.end())) {
830  if (*label_lhs == *label_rhs) {
831  intersection_size++;
832  label_lhs++;
833  label_rhs++;
834  }
835  else if (*label_lhs < *label_rhs) {
836  label_lhs++;
837  }
838  else {
839  label_rhs++;
840  }
841  }
842 
843  std::size_t gamma(std::min(lhs.size(), rhs.size()) - intersection_size);
844  if (gamma > 0) {
845  cost += static_cast<double>(gamma) * avg_rel_cost;
846  }
847 
848  return cost;
849 }
850 
851 template<class UserNodeLabel, class UserEdgeLabel>
852 void
854 populate_instance_with_params_(const GEDGraph & g, const GEDGraph & h, const vector<double> & alpha, const vector<double> & lambda, DMatrix & lsape_instance) const {
855  const NodeRingMap_ & rings_g = rings_.at(g.id());
856  const NodeRingMap_ & rings_h = rings_.at(h.id());
857 
858  for (std::size_t row_in_master = 0; row_in_master < lsape_instance.num_rows(); row_in_master++) {
859  for (std::size_t col_in_master = 0; col_in_master < lsape_instance.num_cols(); col_in_master++) {
860  lsape_instance(row_in_master, col_in_master) = compute_ring_distance_(g, h, rings_g, rings_h, alpha, lambda, row_in_master, col_in_master);
861  }
862  }
863 }
864 
865 template<class UserNodeLabel, class UserEdgeLabel>
866 void
868 write_params_to_file_() const {
869  std::map<std::string, std::string> options;
870  options["num_layers"] = std::to_string(lambda_.size());
871  for (std::size_t i{0}; i < alpha_.size(); i++) {
872  double val{alpha_.at(i)};
873  options["alpha_" + std::to_string(i)] = std::to_string(val);
874  }
875  for (std::size_t i{0}; i <lambda_.size(); i++) {
876  double val{lambda_.at(i)};
877  options["lambda_" + std::to_string(i)] = std::to_string(val);
878  }
879  util::save_as_config_file(outfile_, options);
880 }
881 
882 template<class UserNodeLabel, class UserEdgeLabel>
883 void
886  std::map<std::string, std::string> options;
887  util::parse_config_file(infile_, options);
888  num_layers_ = std::stoul(options.at("num_layers"));
889  alpha_.clear();
890  for (std::size_t i{0}; i < 3; i++) {
891  alpha_.push_back(std::stod(options.at("alpha_" + std::to_string(i))));
892  }
893  lambda_.clear();
894  for (std::size_t i{0}; i < num_layers_; i++) {
895  lambda_.push_back(std::stod(options.at("lambda_" + std::to_string(i))));
896  }
897 }
898 
899 }
900 
901 #endif /* SRC_METHODS_RING_IPP_ */
void set_model(const Model &model)
Makes the solver use a specific model for optimal solving.
virtual void lsape_init_() final
Initializes the method after initializing the global variables for the graphs.
Definition: ring.ipp:101
std::size_t num_nodes() const
Returns the number of nodes.
Definition: ged_graph.ipp:211
std::size_t num_threads_
The number of threads to be used.
virtual void lsape_pre_graph_init_(bool called_at_runtime) final
Initializes the method at runtime or during initialization before initializing the global variables f...
Definition: ring.ipp:77
This class solves LSAPE instances by calling the library lsape available at https://bougleux.users.greyc.fr/lsape/.
virtual bool lsape_parse_option_(const std::string &option, const std::string &arg) final
Parses one option that is not among the ones shared by all derived classes of ged::LSAPEBasedMethod.
Definition: ring.ipp:205
A class for node maps.
Definition: node_map.hpp:43
std::vector< GEDGraph >::size_type GraphID
Type of internally used graph IDs.
Definition: ged_graph.hpp:112
const GEDData< UserNodeLabel, UserEdgeLabel > & ged_data_
The data on which the method is run.
Definition: ged_method.hpp:124
NodeID head(EdgeID edge) const
Returns the head of an edge.
Definition: ged_graph.ipp:199
void solve(int num_solutions=1)
Solves the LSAPE problem instance.
virtual void lsape_set_default_options_() final
Sets all options that are not among the ones shared by all derived classes of ged::LSAPEBasedMethod t...
Definition: ring.ipp:66
std::size_t num_cols() const
Returns the number of columns.
Definition: matrix.ipp:110
virtual std::string lsape_valid_options_string_() const final
Returns string of all valid options that are not among the ones shared by all derived classes of ged:...
Definition: ring.ipp:198
void counting_sort(std::vector< LabelID >::iterator first, std::vector< LabelID >::iterator last)
Implementation of counting sort.
Definition: misc.ipp:111
LabelID get_node_label(NodeID node) const
Returns the label of a given node.
Definition: ged_graph.ipp:126
Runtime error class.
Definition: error.hpp:37
void parse_config_file(const std::string &filename, std::map< std::string, std::string > &options)
Parses a configuration file.
Definition: misc.ipp:49
bool compute_lower_bound_
Flag that should be set to true if and only if the method computes a lower bound. ...
LSAPESolver::Model lsape_model_
Specifies model for optimal LSAPE solver.
LabelID get_edge_label(EdgeID edge) const
Returns the label of a given edge.
Definition: ged_graph.ipp:135
std::pair< incident_edge_iterator, incident_edge_iterator > incident_edges(NodeID node) const
Provides access to all incident edges of a node.
Definition: ged_graph.ipp:150
double minimal_cost() const
Returns the cost of the computed solutions.
A progress bar class.
std::pair< node_iterator, node_iterator > nodes() const
Provides access to all nodes in the graph.
Definition: ged_graph.ipp:205
The normalized input graphs used by GEDLIB. All labels are integers.
Definition: ged_graph.hpp:104
virtual void lsape_default_post_graph_init_() final
Default initializes the method at runtime after initializing the global variables for the graphs...
Definition: ring.ipp:86
constexpr LabelID dummy_label()
Returns a dummy label.
constexpr std::size_t undefined()
Returns undefined size.
Global namespace for GEDLIB.
void increment()
Increments the number of solved tasks.
virtual void lsape_init_graph_(const GEDGraph &graph) final
Initializes global variables for one graph.
Definition: ring.ipp:59
void save_as_config_file(const std::string &filename, const std::map< std::string, std::string > &options)
Saves a string map as a configuration file as expected by parse_config_file().
Definition: misc.ipp:76
Computes an upper bound for general edit costs.
Definition: ring.hpp:51
void construct_node_map_from_solver(const Solver &solver, NodeMap &node_map, std::size_t solution_id=0)
Constructs a node map from a solution to LSAPE or LSAPE stored in a ged::LSAPESolver or a ged::LSAPSo...
Definition: misc.ipp:86
void set_problem(const DMatrix *cost_matrix)
Sets the LSAPE problem instance.
LSAPESolver::GreedyMethod greedy_method_
Specifies method for greedy LSAPE solver.
std::size_t NodeID
Internally used vertex ID type.
Definition: ged_graph.hpp:108
void set_greedy_method(const GreedyMethod &greedy_method)
Makes the solver use a greedy method.
GraphID id() const
Returns the ID of the graph.
Definition: ged_graph.ipp:184
virtual void lsape_populate_instance_(const GEDGraph &g, const GEDGraph &h, DMatrix &master_problem) final
Populates the LSAPE instance.
Definition: ring.ipp:283
std::pair< edge_iterator, edge_iterator > edges() const
Provides access to all edge in the graph.
Definition: ged_graph.ipp:217
std::size_t num_rows() const
Returns the number of rows.
Definition: matrix.ipp:85