14
14
//
15
15
// Authors: Philipp Dumitrescu, Olivier Parcollet, Nils Wentzell
16
16
17
- #include < mpi/mpi.hpp>
18
- #include < mpi/vector.hpp>
19
17
#include < gtest/gtest.h>
18
+ #include < mpi/mpi.hpp>
19
+
20
+ #include < array>
21
+ #include < cstddef>
22
+ #include < iterator>
20
23
#include < numeric>
24
+ #include < span>
25
+ #include < utility>
26
+ #include < vector>
21
27
22
28
// Test cases are adapted from slides and exercises of the HLRS course:
23
29
// Introduction to the Message Passing Interface (MPI)
26
32
// https://fs.hlrs.de/projects/par/par_prog_ws/pdf/mpi_3.1_rab.pdf
27
33
// https://fs.hlrs.de/projects/par/par_prog_ws/practical/MPI31single.tar.gz
28
34
29
- TEST (MPI_Window, CommunicatorMember ) {
35
+ TEST (MPI, WindowCommunicatorMember ) {
30
36
mpi::communicator world;
31
37
32
38
int data = world.rank ();
@@ -39,7 +45,7 @@ TEST(MPI_Window, CommunicatorMember) {
39
45
EXPECT_EQ (win_comm.size (), world.size ());
40
46
}
41
47
42
- TEST (MPI_Window, SharedCommMember ) {
48
+ TEST (MPI, WindowSharedCommunicatorMember ) {
43
49
auto shm = mpi::communicator{}.split_shared ();
44
50
45
51
mpi::shared_window<int > win{shm, 1 };
@@ -50,39 +56,33 @@ TEST(MPI_Window, SharedCommMember) {
50
56
EXPECT_EQ (sh_win_comm.size (), shm.size ());
51
57
}
52
58
53
- TEST (MPI_Window, SharedCommunicator ) {
59
+ TEST (MPI, WindowGetAttrBase ) {
54
60
mpi::communicator world;
55
- [[maybe_unused]] auto shm = world.split_shared ();
56
- }
57
-
58
- TEST (MPI_Window, GetAttrBase) {
59
- mpi::communicator world;
60
- int rank = world.rank ();
61
61
62
- int buffer = rank;
62
+ int buffer = world. rank () ;
63
63
mpi::window<int > win{world, &buffer, 1 };
64
64
65
65
void *base_ptr = win.base ();
66
66
EXPECT_NE (base_ptr, nullptr );
67
67
EXPECT_EQ (base_ptr, &buffer);
68
68
}
69
69
70
- TEST (MPI_Window , WindowAllocate) {
70
+ TEST (MPI , WindowAllocate) {
71
71
mpi::communicator world;
72
72
int rank = world.rank ();
73
73
74
74
mpi::window<int > win{world, 1 };
75
75
*(win.base ()) = rank;
76
76
77
77
win.fence ();
78
- int rcv;
78
+ int rcv{} ;
79
79
win.get (&rcv, 1 , rank);
80
80
win.fence ();
81
81
82
82
EXPECT_EQ (rcv, rank);
83
83
}
84
84
85
- TEST (MPI_Window, PassiveTargetCommunication ) {
85
+ TEST (MPI, WindowPassiveTargetCommunication ) {
86
86
mpi::communicator world;
87
87
if (world.size () < 2 ) { GTEST_SKIP () << " Test requires at least 2 processes\n " ; }
88
88
int rank = world.rank ();
@@ -104,7 +104,7 @@ TEST(MPI_Window, PassiveTargetCommunication) {
104
104
}
105
105
}
106
106
107
- TEST (MPI_Window, ActiveTargetCommunication ) {
107
+ TEST (MPI, WindowActiveTargetCommunication ) {
108
108
mpi::communicator world;
109
109
if (world.size () < 2 ) {
110
110
// Target rank cannot be equal to origin rank (deadlocks), so we need at
@@ -132,23 +132,23 @@ TEST(MPI_Window, ActiveTargetCommunication) {
132
132
133
133
if (rank == origin_rank) {
134
134
win.start (target_group); // blocks until target_rank calls post()
135
- int origin_addr[] = {42 };
136
- int origin_count = 1 ;
137
- win.put (origin_addr , origin_count, target_rank);
135
+ auto origin_arr = std::array< int , 1 > {42 };
136
+ int origin_count = 1 ;
137
+ win.put (origin_arr. data () , origin_count, target_rank);
138
138
win.complete ();
139
139
}
140
140
}
141
141
142
- TEST (MPI_Window, GetAttrSize ) {
142
+ TEST (MPI, WindowGetAttrSize ) {
143
143
mpi::communicator world;
144
- int buffer;
144
+ int buffer{} ;
145
145
mpi::window<int > win{world, &buffer, 1 };
146
146
147
147
MPI_Aint size = win.size ();
148
148
EXPECT_EQ (size, sizeof (int ));
149
149
}
150
150
151
- TEST (MPI_Window, MoveConstructor ) {
151
+ TEST (MPI, WindowMoveConstructor ) {
152
152
mpi::communicator world;
153
153
int i = 1 ;
154
154
mpi::window<int > win1{world, &i, 1 };
@@ -159,19 +159,19 @@ TEST(MPI_Window, MoveConstructor) {
159
159
EXPECT_EQ (win1.base (), nullptr );
160
160
}
161
161
162
- TEST (MPI_Window, NullptrSizeZero ) {
162
+ TEST (MPI, WindowNullptrSizeZero ) {
163
163
mpi::communicator world;
164
164
mpi::window<int > win{world, nullptr , 0 };
165
165
166
166
EXPECT_EQ (win.base (), nullptr );
167
167
EXPECT_EQ (win.size (), 0 );
168
168
}
169
169
170
- TEST (MPI_Window, OneSidedGet ) {
170
+ TEST (MPI, WindowOneSidedGet ) {
171
171
mpi::communicator world;
172
172
int const rank = world.rank ();
173
173
174
- int snd_buf, rcv_buf = -1 ;
174
+ int snd_buf{} , rcv_buf = -1 ;
175
175
mpi::window<int > win{world, &snd_buf, 1 };
176
176
snd_buf = rank;
177
177
@@ -182,11 +182,11 @@ TEST(MPI_Window, OneSidedGet) {
182
182
EXPECT_EQ (rcv_buf, rank);
183
183
}
184
184
185
- TEST (MPI_Window, OneSidedPut ) {
185
+ TEST (MPI, WindowOneSidedPut ) {
186
186
mpi::communicator world;
187
187
int const rank = world.rank ();
188
188
189
- int snd_buf, rcv_buf = -1 ;
189
+ int snd_buf{} , rcv_buf = -1 ;
190
190
mpi::window<int > win{world, &rcv_buf, 1 };
191
191
snd_buf = rank;
192
192
@@ -197,13 +197,13 @@ TEST(MPI_Window, OneSidedPut) {
197
197
EXPECT_EQ (rcv_buf, rank);
198
198
}
199
199
200
- TEST (MPI_Window, RingOneSidedGet ) {
200
+ TEST (MPI, WindowRingOneSidedGet ) {
201
201
mpi::communicator world;
202
202
int const rank = world.rank ();
203
203
int const size = world.size ();
204
204
int const left = (rank - 1 + size) % size;
205
205
206
- int snd_buf, rcv_buf;
206
+ int snd_buf{} , rcv_buf{} ;
207
207
mpi::window<int > win{world, &snd_buf, 1 };
208
208
snd_buf = rank;
209
209
@@ -219,13 +219,13 @@ TEST(MPI_Window, RingOneSidedGet) {
219
219
EXPECT_EQ (sum, (size * (size - 1 )) / 2 );
220
220
}
221
221
222
- TEST (MPI_Window, RingOneSidedPut ) {
222
+ TEST (MPI, WindowRingOneSidedPut ) {
223
223
mpi::communicator world;
224
224
int const rank = world.rank ();
225
225
int const size = world.size ();
226
226
int const right = (rank + 1 ) % size;
227
227
228
- int snd_buf, rcv_buf;
228
+ int snd_buf{} , rcv_buf{} ;
229
229
mpi::window<int > win{world, &rcv_buf, 1 };
230
230
snd_buf = rank;
231
231
@@ -241,7 +241,7 @@ TEST(MPI_Window, RingOneSidedPut) {
241
241
EXPECT_EQ (sum, (size * (size - 1 )) / 2 );
242
242
}
243
243
244
- TEST (MPI_Window, RingOneSidedAllocShared ) {
244
+ TEST (MPI, WindowRingOneSidedAllocShared ) {
245
245
mpi::communicator world;
246
246
auto shm = world.split_shared ();
247
247
int const rank_shm = shm.rank ();
@@ -264,7 +264,7 @@ TEST(MPI_Window, RingOneSidedAllocShared) {
264
264
EXPECT_EQ (sum, (size_shm * (size_shm - 1 )) / 2 );
265
265
}
266
266
267
- TEST (MPI_Window, RingOneSidedStoreWinAllocSharedSignal ) {
267
+ TEST (MPI, WindowRingOneSidedStoreWinAllocSharedSignal ) {
268
268
if (not mpi::has_env) {
269
269
// Test doesn't make sense without MPI
270
270
GTEST_SKIP ();
@@ -284,9 +284,9 @@ TEST(MPI_Window, RingOneSidedStoreWinAllocSharedSignal) {
284
284
int sum = 0 ;
285
285
int snd_buf = rank_shm;
286
286
287
- MPI_Request rq;
287
+ MPI_Request rq{} ;
288
288
MPI_Status status;
289
- int snd_dummy, rcv_dummy;
289
+ int snd_dummy{} , rcv_dummy{} ;
290
290
291
291
for (int i = 0 ; i < size_shm; ++i) {
292
292
// ... The local Win_syncs are needed to sync the processor and real memory.
@@ -327,7 +327,7 @@ TEST(MPI_Window, RingOneSidedStoreWinAllocSharedSignal) {
327
327
win.unlock ();
328
328
}
329
329
330
- TEST (MPI_Window, SharedArray ) {
330
+ TEST (MPI, WindowSharedArray ) {
331
331
mpi::communicator world;
332
332
auto shm = world.split_shared ();
333
333
int const rank_shm = shm.rank ();
@@ -339,16 +339,16 @@ TEST(MPI_Window, SharedArray) {
339
339
340
340
// Fill array with local rank in parallel by chunking the range into the communicator
341
341
win.fence ();
342
- auto slice = itertools::chunk_range (0 , array_view.size (), shm.size (), shm.rank ());
343
- for (auto i = slice.first ; i < slice.second ; ++i) { array_view[i] = i ; }
342
+ auto slice = itertools::chunk_range (0 , static_cast <std:: ptrdiff_t >( array_view.size () ), shm.size (), shm.rank ());
343
+ for (auto i = slice.first ; i < slice.second ; ++i) { array_view[i] = static_cast < int >(i) ; }
344
344
win.fence ();
345
345
346
346
// Total sum is just sum of numbers in interval [0, array_size)
347
347
int sum = std::accumulate (array_view.begin (), array_view.end (), int {0 });
348
348
EXPECT_EQ (sum, (array_size * (array_size - 1 )) / 2 );
349
349
}
350
350
351
- TEST (MPI_Window, DistributedSharedArray ) {
351
+ TEST (MPI, WindowDistributedSharedArray ) {
352
352
mpi::communicator world;
353
353
auto shm = world.split_shared ();
354
354
@@ -396,8 +396,8 @@ TEST(MPI_Window, DistributedSharedArray) {
396
396
// Fill array with global index (= local index + global offset)
397
397
// We do this in parallel on each shared memory island by chunking the total range
398
398
win.fence ();
399
- auto slice = itertools::chunk_range (0 , array_view.size (), shm.size (), shm.rank ());
400
- for (auto i = slice.first ; i < slice.second ; ++i) { array_view[i] = i + offset; }
399
+ auto slice = itertools::chunk_range (0 , static_cast <std:: ptrdiff_t >( array_view.size () ), shm.size (), shm.rank ());
400
+ for (auto i = slice.first ; i < slice.second ; ++i) { array_view[i] = static_cast < int >( i + offset) ; }
401
401
win.fence ();
402
402
403
403
// Calculate partial sum on head node of each shared memory island and
0 commit comments