/
rpc_example3.cpp
69 lines (55 loc) · 1.68 KB
/
rpc_example3.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/*
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#include <iostream>
#include <string>
#include <map>
#include <graphlab/serialization/serialization_includes.hpp>
#include <graphlab/rpc/dc.hpp>
using namespace graphlab;
struct teststruct: public IS_POD_TYPE{
int a;
double b;
};
void print(std::map<int, teststruct> &data1,
std::string data2) {
std::cout << "1.a = " << data1[1].a << std::endl;
std::cout << "10.b = " << data1[10].b << std::endl;
std::cout << "string = " << data2 << std::endl;
}
int main(int argc, char ** argv) {
// init MPI
mpi_tools::init(argc, argv);
distributed_control dc;
if (dc.numprocs() != 2) {
std::cout<< "RPC Example 3: Asynchronous RPC with Struct POD Serialization\n";
std::cout << "Run with exactly 2 MPI nodes.\n";
return 0;
}
if (dc.procid() == 0) {
std::map<int, teststruct> data;
data[1].a = 10;
data[2].b = 15.0;
dc.remote_call(1, print, data, "hello world!");
}
dc.barrier();
mpi_tools::finalize();
}