enum_name
Loading...
Searching...
No Matches
doctest_mpi.h
1#ifndef DOCTEST_MPI_H
2#define DOCTEST_MPI_H
3
4#ifdef DOCTEST_CONFIG_IMPLEMENT
5
6#include "doctest/extensions/mpi_sub_comm.h"
7#include "mpi_reporter.h"
8#include <unordered_map>
9
10namespace doctest {
11
12// Each time a MPI_TEST_CASE is executed on N procs,
13// we need a sub-communicator of N procs to execute it.
14// It is then registered here and can be re-used
15// by other tests that requires a sub-comm of the same size
16std::unordered_map<int,mpi_sub_comm> sub_comms_by_size;
17
18// Record if at least one MPI_TEST_CASE was registered "skipped"
19// because there is not enought procs to execute it
20int nb_test_cases_skipped_insufficient_procs = 0;
21
22
23std::string thread_level_to_string(int thread_lvl);
24int mpi_init_thread(int argc, char *argv[], int required_thread_support);
25void mpi_finalize();
26
27
28// Can be safely called before MPI_Init()
29// This is needed for MPI_TEST_CASE because we use doctest::skip()
30// to prevent execution of tests where there is not enough procs,
31// but doctest::skip() is called during test registration, that is, before main(), and hence before MPI_Init()
32int mpi_comm_world_size() {
33 #if defined(OPEN_MPI)
34 const char* size_str = std::getenv("OMPI_COMM_WORLD_SIZE");
35 #elif defined(I_MPI_VERSION) || defined(MPI_VERSION) // Intel MPI + MPICH (at least)
36 const char* size_str = std::getenv("PMI_SIZE"); // see https://community.intel.com/t5/Intel-oneAPI-HPC-Toolkit/Environment-variables-defined-by-intel-mpirun/td-p/1096703
37 #else
38 #error "Unknown MPI implementation: please submit an issue or a PR to doctest. Meanwhile, you can look at the output of e.g. `mpirun -np 3 env` to search for an environnement variable that contains the size of MPI_COMM_WORLD and extend this code accordingly"
39 #endif
40 if (size_str==nullptr) return 1; // not launched with mpirun/mpiexec, so assume only one process
41 return std::stoi(size_str);
42}
43
44// Record size of MPI_COMM_WORLD with mpi_comm_world_size()
45int world_size_before_init = mpi_comm_world_size();
46
47
48std::string thread_level_to_string(int thread_lvl) {
49 switch (thread_lvl) {
50 case MPI_THREAD_SINGLE: return "MPI_THREAD_SINGLE";
51 case MPI_THREAD_FUNNELED: return "MPI_THREAD_FUNNELED";
52 case MPI_THREAD_SERIALIZED: return "MPI_THREAD_SERIALIZED";
53 case MPI_THREAD_MULTIPLE: return "MPI_THREAD_MULTIPLE";
54 default: return "Invalid MPI thread level";
55 }
56}
57int mpi_init_thread(int argc, char *argv[], int required_thread_support) {
58 int provided_thread_support;
59 MPI_Init_thread(&argc, &argv, required_thread_support, &provided_thread_support);
60
61 int world_size;
62 MPI_Comm_size(MPI_COMM_WORLD,&world_size);
63 if (world_size_before_init != world_size) {
64 DOCTEST_INTERNAL_ERROR(
65 "doctest found "+std::to_string(world_size_before_init)+" MPI processes before `MPI_Init_thread`,"
66 " but MPI_COMM_WORLD is actually of size "+std::to_string(world_size)+".\n"
67 "This is most likely due to your MPI implementation not being well supported by doctest. Please report this issue on GitHub"
68 );
69 }
70
71 if (provided_thread_support!=required_thread_support) {
72 std::cout <<
73 "WARNING: " + thread_level_to_string(required_thread_support) + " was asked, "
74 + "but only " + thread_level_to_string(provided_thread_support) + " is provided by the MPI library\n";
75 }
76 return provided_thread_support;
77}
78void mpi_finalize() {
79 // We need to destroy all created sub-communicators before calling MPI_Finalize()
80 doctest::sub_comms_by_size.clear();
81 MPI_Finalize();
82}
83
84} // doctest
85
86#else // DOCTEST_CONFIG_IMPLEMENT
87
88#include "doctest/extensions/mpi_sub_comm.h"
89#include <unordered_map>
90#include <exception>
91
92namespace doctest {
93
94extern std::unordered_map<int,mpi_sub_comm> sub_comms_by_size;
95extern int nb_test_cases_skipped_insufficient_procs;
96extern int world_size_before_init;
97int mpi_comm_world_size();
98
99int mpi_init_thread(int argc, char *argv[], int required_thread_support);
100void mpi_finalize();
101
102template<int nb_procs, class F>
103void execute_mpi_test_case(F func) {
104 auto it = sub_comms_by_size.find(nb_procs);
105 if (it==end(sub_comms_by_size)) {
106 bool was_emplaced = false;
107 std::tie(it,was_emplaced) = sub_comms_by_size.emplace(std::make_pair(nb_procs,mpi_sub_comm(nb_procs)));
108 assert(was_emplaced);
109 }
110 const mpi_sub_comm& sub = it->second;
111 if (sub.comm != MPI_COMM_NULL) {
112 func(sub.rank,nb_procs,sub.comm,std::integral_constant<int,nb_procs>{});
113 };
114}
115
116inline bool
117insufficient_procs(int test_nb_procs) {
118 static const int world_size = mpi_comm_world_size();
119 bool insufficient = test_nb_procs>world_size;
120 if (insufficient) {
121 ++nb_test_cases_skipped_insufficient_procs;
122 }
123 return insufficient;
124}
125
126} // doctest
127
128
129#define DOCTEST_MPI_GEN_ASSERTION(rank_to_test, assertion, ...) \
130 static_assert(rank_to_test<test_nb_procs_as_int_constant.value,"Trying to assert on a rank greater than the number of procs of the test!"); \
131 if(rank_to_test == test_rank) assertion(__VA_ARGS__)
132
133#define DOCTEST_MPI_WARN(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_WARN,__VA_ARGS__)
134#define DOCTEST_MPI_CHECK(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_CHECK,__VA_ARGS__)
135#define DOCTEST_MPI_REQUIRE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_REQUIRE,__VA_ARGS__)
136#define DOCTEST_MPI_WARN_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_WARN_FALSE,__VA_ARGS__)
137#define DOCTEST_MPI_CHECK_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_CHECK_FALSE,__VA_ARGS__)
138#define DOCTEST_MPI_REQUIRE_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_REQUIRE_FALSE,__VA_ARGS__)
139
140#define DOCTEST_CREATE_MPI_TEST_CASE(name,nb_procs,func) \
141 static void func(DOCTEST_UNUSED int test_rank, DOCTEST_UNUSED int test_nb_procs, DOCTEST_UNUSED MPI_Comm test_comm, DOCTEST_UNUSED std::integral_constant<int,nb_procs>); \
142 TEST_CASE(name * doctest::description("MPI_TEST_CASE") * doctest::skip(doctest::insufficient_procs(nb_procs))) { \
143 doctest::execute_mpi_test_case<nb_procs>(func); \
144 } \
145 static void func(DOCTEST_UNUSED int test_rank, DOCTEST_UNUSED int test_nb_procs, DOCTEST_UNUSED MPI_Comm test_comm, DOCTEST_UNUSED std::integral_constant<int,nb_procs> test_nb_procs_as_int_constant)
146 // DOC: test_rank, test_nb_procs, and test_comm are available UNDER THESE SPECIFIC NAMES in the body of the unit test
147 // DOC: test_nb_procs_as_int_constant is equal to test_nb_procs, but as a compile time value
148 // (used in CHECK-like macros to assert the checked rank exists)
149
150#define DOCTEST_MPI_TEST_CASE(name,nb_procs) \
151 DOCTEST_CREATE_MPI_TEST_CASE(name,nb_procs,DOCTEST_ANONYMOUS(DOCTEST_MPI_FUNC))
152
153
154// == SHORT VERSIONS OF THE MACROS
155#if !defined(DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES)
156#define MPI_WARN DOCTEST_MPI_WARN
157#define MPI_CHECK DOCTEST_MPI_CHECK
158#define MPI_REQUIRE DOCTEST_MPI_REQUIRE
159#define MPI_WARN_FALSE DOCTEST_MPI_WARN_FALSE
160#define MPI_CHECK_FALSE DOCTEST_MPI_CHECK_FALSE
161#define MPI_REQUIRE_FALSE DOCTEST_MPI_REQUIRE_FALSE
162
163#define MPI_TEST_CASE DOCTEST_MPI_TEST_CASE
164#endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
165
166
167#endif // DOCTEST_CONFIG_IMPLEMENT
168
169#endif // DOCTEST_MPI_H