26 #include <graphlab/serialization/serialization_includes.hpp>
27 #include <graphlab/parallel/pthread_tools.hpp>
28 #include <graphlab/rpc/dc.hpp>
29 #include <graphlab/rpc/dc_dist_object.hpp>
30 using namespace graphlab;
33 class distributed_vector {
36 std::map<size_t, T> data;
44 procid_t owningmachine = i % rmi.dc().numprocs();
46 if (owningmachine == rmi.dc().procid()) {
56 return rmi.remote_request(owningmachine,
57 &distributed_vector<T>::get,
63 void set(
size_t i,
const T& val) {
65 procid_t owningmachine = i % rmi.dc().numprocs();
67 if (owningmachine == rmi.dc().procid()) {
75 rmi.remote_request(owningmachine,
76 &distributed_vector<T>::set,
83 int main(
int argc,
char ** argv) {
84 mpi_tools::init(argc, argv);
88 std::cout<<
"RPC Example 7: Distributed Object\n";
89 std::cout <<
"Run with exactly 2 MPI nodes.\n";
95 std::cout << i <<
"\n";
97 distributed_vector<std::string> vec(dc);
100 vec.set(10,
"set from 0");
101 vec.set(11,
"set from 0");
104 vec.set(1,
"set from 1");
105 vec.set(2,
"set from 1");
109 std::cout << vec.get(1) <<
"\n";
110 std::cout << vec.get(2) <<
"\n";
111 std::cout << vec.get(10) <<
"\n";
112 std::cout << vec.get(11) << std::endl;
116 std::cout << vec.get(1) <<
"\n";
117 std::cout << vec.get(2) <<
"\n";
118 std::cout << vec.get(10) <<
"\n";
119 std::cout << vec.get(11) << std::endl;
123 mpi_tools::finalize();