-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathmargo-test-client.c
186 lines (155 loc) · 5.27 KB
/
margo-test-client.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/*
* (C) 2015 The University of Chicago
*
* See COPYRIGHT in top-level directory.
*/
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
#include <stdlib.h>
#include <mercury.h>
#include <abt.h>
#include <margo.h>
#include "my-rpc.h"
/* This is an example client program that issues 4 concurrent RPCs, each of
* which includes a bulk transfer driven by the server.
*
* Each client operation executes as an independent ULT in Argobots.
* The HG forward call is executed using asynchronous operations.
*/
struct run_my_rpc_args {
int val;
margo_instance_id mid;
hg_addr_t svr_addr;
};
static void run_my_rpc(void* _arg);
static hg_id_t my_rpc_id;
int main(int argc, char** argv)
{
struct run_my_rpc_args args[4];
ABT_thread threads[4];
int i;
int ret;
hg_return_t hret;
ABT_xstream xstream;
ABT_pool pool;
margo_instance_id mid;
hg_addr_t svr_addr = HG_ADDR_NULL;
char proto[12] = {0};
if (argc != 2) {
fprintf(stderr, "Usage: ./client <server_addr>\n");
return (-1);
}
/* initialize Mercury using the transport portion of the destination
* address (i.e., the part before the first : character if present)
*/
for (i = 0; i < 11 && argv[1][i] != '\0' && argv[1][i] != ':'; i++)
proto[i] = argv[1][i];
/* actually start margo -- margo_init() encapsulates the Mercury &
* Argobots initialization, so this step must precede their use. */
/* Use main process to drive progress (it will relinquish control to
* Mercury during blocking communication calls). No RPC threads are
* used because this is a pure client that will not be servicing
* rpc requests.
*/
/***************************************/
mid = margo_init(proto, MARGO_CLIENT_MODE, 0, 0);
if (mid == MARGO_INSTANCE_NULL) {
fprintf(stderr, "Error: margo_init()\n");
return (-1);
}
/* retrieve current pool to use for ULT creation */
ret = ABT_xstream_self(&xstream);
if (ret != 0) {
fprintf(stderr, "Error: ABT_xstream_self()\n");
return (-1);
}
ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
if (ret != 0) {
fprintf(stderr, "Error: ABT_xstream_get_main_pools()\n");
return (-1);
}
/* register RPCs */
my_rpc_id = MARGO_REGISTER(mid, "my_rpc", my_rpc_in_t, my_rpc_out_t, NULL);
/* find addr for server */
hret = margo_addr_lookup(mid, argv[1], &svr_addr);
assert(hret == HG_SUCCESS);
for (i = 0; i < 4; i++) {
args[i].val = i;
args[i].mid = mid;
args[i].svr_addr = svr_addr;
/* Each ult gets a pointer to an element of the array to use
* as input for the run_my_rpc() function.
*/
ret = ABT_thread_create(pool, run_my_rpc, &args[i],
ABT_THREAD_ATTR_NULL, &threads[i]);
if (ret != 0) {
fprintf(stderr, "Error: ABT_thread_create()\n");
return (-1);
}
}
/* yield to one of the threads */
ABT_thread_yield_to(threads[0]);
for (i = 0; i < 4; i++) {
ret = ABT_thread_join(threads[i]);
if (ret != 0) {
fprintf(stderr, "Error: ABT_thread_join()\n");
return (-1);
}
ret = ABT_thread_free(&threads[i]);
if (ret != 0) {
fprintf(stderr, "Error: ABT_thread_join()\n");
return (-1);
}
}
/* send one rpc to server to shut it down */
ret = margo_shutdown_remote_instance(mid, svr_addr);
if (ret != 0) {
fprintf(stderr, "Error: margo_shutdown_remote_instance()\n");
}
margo_addr_free(mid, svr_addr);
/* shut down everything */
margo_finalize(mid);
return (0);
}
static void run_my_rpc(void* _arg)
{
struct run_my_rpc_args* arg = _arg;
hg_handle_t handle;
my_rpc_in_t in;
my_rpc_out_t out;
hg_return_t hret;
hg_size_t size;
void* buffer;
printf("ULT [%d] running.\n", arg->val);
/* allocate buffer for bulk transfer */
size = 512;
buffer = calloc(1, 512);
assert(buffer);
sprintf((char*)buffer, "Hello world!\n");
/* create handle */
hret = margo_create(arg->mid, arg->svr_addr, my_rpc_id, &handle);
assert(hret == HG_SUCCESS);
/* register buffer for rdma/bulk access by server */
hret = margo_bulk_create(arg->mid, 1, &buffer, &size, HG_BULK_READ_ONLY,
&in.bulk_handle);
assert(hret == HG_SUCCESS);
/* Send rpc. Note that we are also transmitting the bulk handle in the
* input struct. It was set above.
*/
in.input_val = arg->val;
in.dump_state = 0;
hret = margo_forward(handle, &in);
assert(hret == HG_SUCCESS);
/* decode response */
hret = margo_get_output(handle, &out);
assert(hret == HG_SUCCESS);
printf("Got response ret: %d\n", out.ret);
/* clean up resources consumed by this rpc */
margo_bulk_free(in.bulk_handle);
margo_free_output(handle, &out);
margo_destroy(handle);
free(buffer);
printf("ULT [%d] done.\n", arg->val);
return;
}