GraphLab: Distributed Graph-Parallel API  2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rpc_example9.cpp
1 /*
2  * Copyright (c) 2009 Carnegie Mellon University.
3  * All rights reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing,
12  * software distributed under the License is distributed on an "AS
13  * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
14  * express or implied. See the License for the specific language
15  * governing permissions and limitations under the License.
16  *
17  * For more about this software visit:
18  *
19  * http://www.graphlab.ml.cmu.edu
20  *
21  */
22 
23 #include <vector>
24 #include <iostream>
25 #include <graphlab/rpc/dc.hpp>
26 #include <graphlab/serialization/iarchive.hpp>
27 using namespace graphlab;
28 
29 
30 struct test_struct {
32  test_struct(distributed_control &dc):rmi(dc, this) {
33  dc.barrier();
34  }
35 
36  void test_blob(size_t len, wild_pointer w) {
37  assert(len == sizeof(procid_t));
38  std::cout << "split call from : " << *reinterpret_cast<const procid_t*>(w.ptr) << "\n";
39  }
40 
41  void print(int val) {
42  std::cout << rmi.procid() << ": Receiving print with value : " << val << std::endl;
43  }
44 
45  void test() {
46  if (rmi.procid() == 0) {
47  std::cout << "First set of calls... Proc 1 and 3 should receive" << std::endl;
48  std::vector<procid_t> s;
49  s.push_back(1); s.push_back(3);
50  rmi.pod_call(s.begin(), s.end(), &test_struct::print, 1);
51  }
52  rmi.full_barrier();
53 
54  if (rmi.procid() == 0) {
55  std::cout << "Second set of calls... Proc 0 and 2 should receive" << std::endl;
56  std::vector<procid_t> s;
57  s.push_back(2); s.push_back(0);
58  rmi.pod_call(s.begin(), s.end(), &test_struct::print, 1);
59  }
60  rmi.full_barrier();
61 
62  oarchive* oarc = rmi.split_call_begin(&test_struct::test_blob);
63  (*oarc) << rmi.procid();
64  rmi.split_call_end(1, oarc);
65  rmi.full_barrier();
66  }
67 };
68 
69 int main(int argc, char ** argv) {
70  mpi_tools::init(argc, argv);
72 
73  if (dc.numprocs() != 4) {
74  std::cout<< "RPC Example 8: Basic Broadcast Test\n";
75  std::cout << "Run with exactly 4 MPI nodes.\n";
76  return 0;
77  }
78  test_struct ts(dc);
79  ts.test();
80 
81  // terminate MPI
82  mpi_tools::finalize();
83 }