GraphLab: Distributed Graph-Parallel API  2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dc_services.hpp
1 /*
2  * Copyright (c) 2009 Carnegie Mellon University.
3  * All rights reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing,
12  * software distributed under the License is distributed on an "AS
13  * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
14  * express or implied. See the License for the specific language
15  * governing permissions and limitations under the License.
16  *
17  * For more about this software visit:
18  *
19  * http://www.graphlab.ml.cmu.edu
20  *
21  */
22 
23 
24 #include <graphlab/rpc/dc_dist_object.hpp>
25 #ifndef GRAPHLAB_DC_SERVICES_HPP
26 #define GRAPHLAB_DC_SERVICES_HPP
27 #include <graphlab/parallel/pthread_tools.hpp>
28 
29 
30 
31 #include <graphlab/macros_def.hpp>
32 namespace graphlab {
33 
34  /**
35  \internal
36  \ingroup rpc
37  Creates a new context for MPI-like global global operations.
38  Where all machines create an instance of dc_services at the same time,
39  operations performed by the new dc_services instance will not interfere
40  and will run in parallel with other contexts. i.e. If I have two
41  distributed dc_services instances, one instance can
42  perform a barrier while another instance performs a broadcast() at the same
43  time.
44  */
45  class dc_services {
46  private:
47  dc_dist_object<dc_services> rmi;
48 
49  public:
50  dc_services(distributed_control &dc):rmi(dc, this) { }
51 
52  /// Returns the underlying dc_dist_object
53  dc_dist_object<dc_services>& rmi_instance() {
54  return rmi;
55  }
56 
57  /// Returns the underlying dc_dist_object
58  const dc_dist_object<dc_services>& rmi_instance() const {
59  return rmi;
60  }
61 
62  /**
63  \copydoc distributed_control::send_to()
64  */
65  template <typename U>
66  inline void send_to(procid_t target, U& t, bool control = false) {
67  rmi.send_to(target, t, control);
68  }
69 
70  /**
71  \copydoc distributed_control::recv_from()
72  */
73  template <typename U>
74  inline void recv_from(procid_t source, U& t, bool control = false) {
75  rmi.recv_from(source, t, control);
76  }
77 
78  /**
79  \copydoc distributed_control::broadcast()
80  */
81  template <typename U>
82  inline void broadcast(U& data, bool originator, bool control = false) {
83  rmi.broadcast(data, originator, control);
84  }
85 
86  /**
87  \copydoc distributed_control::gather()
88  */
89  template <typename U>
90  inline void gather(std::vector<U>& data, procid_t sendto, bool control = false) {
91  rmi.gather(data, sendto, control);
92  }
93 
94  /**
95  \copydoc distributed_control::all_gather()
96  */
97  template <typename U>
98  inline void all_gather(std::vector<U>& data, bool control = false) {
99  rmi.all_gather(data, control);
100  }
101 
102  /**
103  \copydoc distributed_control::all_reduce()
104  */
105  template <typename U>
106  inline void all_reduce(U& data, bool control = false) {
107  rmi.all_reduce(data, control);
108  }
109 
110  /// \copydoc distributed_control::all_reduce2()
111  template <typename U, typename PlusEqual>
112  void all_reduce2(U& data, PlusEqual plusequal, bool control = false) {
113  rmi.all_reduce2(data, plusequal, control);
114  }
115 
116  /// \copydoc distributed_control::barrier()
117  inline void barrier() {
118  rmi.barrier();
119  }
120 
121 
122  /// \copydoc distributed_control::full_barrier()
123  inline void full_barrier() {
124  rmi.full_barrier();
125  }
126 
127 
128 
129  };
130 
131 
132 } // end of namespace graphlab
133 
134 
135 #include <graphlab/macros_undef.hpp>
136 #endif
137