Skip to content

Commit 5182735

Browse files
committed
Clean up in mpi_window.cpp
1 parent d2559e0 commit 5182735

File tree

1 file changed

+41
-41
lines changed

1 file changed

+41
-41
lines changed

test/c++/mpi_window.cpp

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,16 @@
1414
//
1515
// Authors: Philipp Dumitrescu, Olivier Parcollet, Nils Wentzell
1616

17-
#include <mpi/mpi.hpp>
18-
#include <mpi/vector.hpp>
1917
#include <gtest/gtest.h>
18+
#include <mpi/mpi.hpp>
19+
20+
#include <array>
21+
#include <cstddef>
22+
#include <iterator>
2023
#include <numeric>
24+
#include <span>
25+
#include <utility>
26+
#include <vector>
2127

2228
// Test cases are adapted from slides and exercises of the HLRS course:
2329
// Introduction to the Message Passing Interface (MPI)
@@ -26,7 +32,7 @@
2632
// https://fs.hlrs.de/projects/par/par_prog_ws/pdf/mpi_3.1_rab.pdf
2733
// https://fs.hlrs.de/projects/par/par_prog_ws/practical/MPI31single.tar.gz
2834

29-
TEST(MPI_Window, CommunicatorMember) {
35+
TEST(MPI, WindowCommunicatorMember) {
3036
mpi::communicator world;
3137

3238
int data = world.rank();
@@ -39,7 +45,7 @@ TEST(MPI_Window, CommunicatorMember) {
3945
EXPECT_EQ(win_comm.size(), world.size());
4046
}
4147

42-
TEST(MPI_Window, SharedCommMember) {
48+
TEST(MPI, WindowSharedCommunicatorMember) {
4349
auto shm = mpi::communicator{}.split_shared();
4450

4551
mpi::shared_window<int> win{shm, 1};
@@ -50,39 +56,33 @@ TEST(MPI_Window, SharedCommMember) {
5056
EXPECT_EQ(sh_win_comm.size(), shm.size());
5157
}
5258

53-
TEST(MPI_Window, SharedCommunicator) {
59+
TEST(MPI, WindowGetAttrBase) {
5460
mpi::communicator world;
55-
[[maybe_unused]] auto shm = world.split_shared();
56-
}
57-
58-
TEST(MPI_Window, GetAttrBase) {
59-
mpi::communicator world;
60-
int rank = world.rank();
6161

62-
int buffer = rank;
62+
int buffer = world.rank();
6363
mpi::window<int> win{world, &buffer, 1};
6464

6565
void *base_ptr = win.base();
6666
EXPECT_NE(base_ptr, nullptr);
6767
EXPECT_EQ(base_ptr, &buffer);
6868
}
6969

70-
TEST(MPI_Window, WindowAllocate) {
70+
TEST(MPI, WindowAllocate) {
7171
mpi::communicator world;
7272
int rank = world.rank();
7373

7474
mpi::window<int> win{world, 1};
7575
*(win.base()) = rank;
7676

7777
win.fence();
78-
int rcv;
78+
int rcv{};
7979
win.get(&rcv, 1, rank);
8080
win.fence();
8181

8282
EXPECT_EQ(rcv, rank);
8383
}
8484

85-
TEST(MPI_Window, PassiveTargetCommunication) {
85+
TEST(MPI, WindowPassiveTargetCommunication) {
8686
mpi::communicator world;
8787
if (world.size() < 2) { GTEST_SKIP() << "Test requires at least 2 processes\n"; }
8888
int rank = world.rank();
@@ -104,7 +104,7 @@ TEST(MPI_Window, PassiveTargetCommunication) {
104104
}
105105
}
106106

107-
TEST(MPI_Window, ActiveTargetCommunication) {
107+
TEST(MPI, WindowActiveTargetCommunication) {
108108
mpi::communicator world;
109109
if (world.size() < 2) {
110110
// Target rank cannot be equal to origin rank (deadlocks), so we need at
@@ -132,23 +132,23 @@ TEST(MPI_Window, ActiveTargetCommunication) {
132132

133133
if (rank == origin_rank) {
134134
win.start(target_group); // blocks until target_rank calls post()
135-
int origin_addr[] = {42};
136-
int origin_count = 1;
137-
win.put(origin_addr, origin_count, target_rank);
135+
auto origin_arr = std::array<int, 1>{42};
136+
int origin_count = 1;
137+
win.put(origin_arr.data(), origin_count, target_rank);
138138
win.complete();
139139
}
140140
}
141141

142-
TEST(MPI_Window, GetAttrSize) {
142+
TEST(MPI, WindowGetAttrSize) {
143143
mpi::communicator world;
144-
int buffer;
144+
int buffer{};
145145
mpi::window<int> win{world, &buffer, 1};
146146

147147
MPI_Aint size = win.size();
148148
EXPECT_EQ(size, sizeof(int));
149149
}
150150

151-
TEST(MPI_Window, MoveConstructor) {
151+
TEST(MPI, WindowMoveConstructor) {
152152
mpi::communicator world;
153153
int i = 1;
154154
mpi::window<int> win1{world, &i, 1};
@@ -159,19 +159,19 @@ TEST(MPI_Window, MoveConstructor) {
159159
EXPECT_EQ(win1.base(), nullptr);
160160
}
161161

162-
TEST(MPI_Window, NullptrSizeZero) {
162+
TEST(MPI, WindowNullptrSizeZero) {
163163
mpi::communicator world;
164164
mpi::window<int> win{world, nullptr, 0};
165165

166166
EXPECT_EQ(win.base(), nullptr);
167167
EXPECT_EQ(win.size(), 0);
168168
}
169169

170-
TEST(MPI_Window, OneSidedGet) {
170+
TEST(MPI, WindowOneSidedGet) {
171171
mpi::communicator world;
172172
int const rank = world.rank();
173173

174-
int snd_buf, rcv_buf = -1;
174+
int snd_buf{}, rcv_buf = -1;
175175
mpi::window<int> win{world, &snd_buf, 1};
176176
snd_buf = rank;
177177

@@ -182,11 +182,11 @@ TEST(MPI_Window, OneSidedGet) {
182182
EXPECT_EQ(rcv_buf, rank);
183183
}
184184

185-
TEST(MPI_Window, OneSidedPut) {
185+
TEST(MPI, WindowOneSidedPut) {
186186
mpi::communicator world;
187187
int const rank = world.rank();
188188

189-
int snd_buf, rcv_buf = -1;
189+
int snd_buf{}, rcv_buf = -1;
190190
mpi::window<int> win{world, &rcv_buf, 1};
191191
snd_buf = rank;
192192

@@ -197,13 +197,13 @@ TEST(MPI_Window, OneSidedPut) {
197197
EXPECT_EQ(rcv_buf, rank);
198198
}
199199

200-
TEST(MPI_Window, RingOneSidedGet) {
200+
TEST(MPI, WindowRingOneSidedGet) {
201201
mpi::communicator world;
202202
int const rank = world.rank();
203203
int const size = world.size();
204204
int const left = (rank - 1 + size) % size;
205205

206-
int snd_buf, rcv_buf;
206+
int snd_buf{}, rcv_buf{};
207207
mpi::window<int> win{world, &snd_buf, 1};
208208
snd_buf = rank;
209209

@@ -219,13 +219,13 @@ TEST(MPI_Window, RingOneSidedGet) {
219219
EXPECT_EQ(sum, (size * (size - 1)) / 2);
220220
}
221221

222-
TEST(MPI_Window, RingOneSidedPut) {
222+
TEST(MPI, WindowRingOneSidedPut) {
223223
mpi::communicator world;
224224
int const rank = world.rank();
225225
int const size = world.size();
226226
int const right = (rank + 1) % size;
227227

228-
int snd_buf, rcv_buf;
228+
int snd_buf{}, rcv_buf{};
229229
mpi::window<int> win{world, &rcv_buf, 1};
230230
snd_buf = rank;
231231

@@ -241,7 +241,7 @@ TEST(MPI_Window, RingOneSidedPut) {
241241
EXPECT_EQ(sum, (size * (size - 1)) / 2);
242242
}
243243

244-
TEST(MPI_Window, RingOneSidedAllocShared) {
244+
TEST(MPI, WindowRingOneSidedAllocShared) {
245245
mpi::communicator world;
246246
auto shm = world.split_shared();
247247
int const rank_shm = shm.rank();
@@ -264,7 +264,7 @@ TEST(MPI_Window, RingOneSidedAllocShared) {
264264
EXPECT_EQ(sum, (size_shm * (size_shm - 1)) / 2);
265265
}
266266

267-
TEST(MPI_Window, RingOneSidedStoreWinAllocSharedSignal) {
267+
TEST(MPI, WindowRingOneSidedStoreWinAllocSharedSignal) {
268268
if (not mpi::has_env) {
269269
// Test doesn't make sense without MPI
270270
GTEST_SKIP();
@@ -284,9 +284,9 @@ TEST(MPI_Window, RingOneSidedStoreWinAllocSharedSignal) {
284284
int sum = 0;
285285
int snd_buf = rank_shm;
286286

287-
MPI_Request rq;
287+
MPI_Request rq{};
288288
MPI_Status status;
289-
int snd_dummy, rcv_dummy;
289+
int snd_dummy{}, rcv_dummy{};
290290

291291
for (int i = 0; i < size_shm; ++i) {
292292
// ... The local Win_syncs are needed to sync the processor and real memory.
@@ -327,7 +327,7 @@ TEST(MPI_Window, RingOneSidedStoreWinAllocSharedSignal) {
327327
win.unlock();
328328
}
329329

330-
TEST(MPI_Window, SharedArray) {
330+
TEST(MPI, WindowSharedArray) {
331331
mpi::communicator world;
332332
auto shm = world.split_shared();
333333
int const rank_shm = shm.rank();
@@ -339,16 +339,16 @@ TEST(MPI_Window, SharedArray) {
339339

340340
// Fill array with local rank in parallel by chunking the range into the communicator
341341
win.fence();
342-
auto slice = itertools::chunk_range(0, array_view.size(), shm.size(), shm.rank());
343-
for (auto i = slice.first; i < slice.second; ++i) { array_view[i] = i; }
342+
auto slice = itertools::chunk_range(0, static_cast<std::ptrdiff_t>(array_view.size()), shm.size(), shm.rank());
343+
for (auto i = slice.first; i < slice.second; ++i) { array_view[i] = static_cast<int>(i); }
344344
win.fence();
345345

346346
// Total sum is just sum of numbers in interval [0, array_size)
347347
int sum = std::accumulate(array_view.begin(), array_view.end(), int{0});
348348
EXPECT_EQ(sum, (array_size * (array_size - 1)) / 2);
349349
}
350350

351-
TEST(MPI_Window, DistributedSharedArray) {
351+
TEST(MPI, WindowDistributedSharedArray) {
352352
mpi::communicator world;
353353
auto shm = world.split_shared();
354354

@@ -396,8 +396,8 @@ TEST(MPI_Window, DistributedSharedArray) {
396396
// Fill array with global index (= local index + global offset)
397397
// We do this in parallel on each shared memory island by chunking the total range
398398
win.fence();
399-
auto slice = itertools::chunk_range(0, array_view.size(), shm.size(), shm.rank());
400-
for (auto i = slice.first; i < slice.second; ++i) { array_view[i] = i + offset; }
399+
auto slice = itertools::chunk_range(0, static_cast<std::ptrdiff_t>(array_view.size()), shm.size(), shm.rank());
400+
for (auto i = slice.first; i < slice.second; ++i) { array_view[i] = static_cast<int>(i + offset); }
401401
win.fence();
402402

403403
// Calculate partial sum on head node of each shared memory island and

0 commit comments

Comments
 (0)