Skip to content

Commit 2bf471c

Browse files
committed
Changing define names.
1 parent ae95dc9 commit 2bf471c

File tree

72 files changed

+249
-286
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+249
-286
lines changed

benchmarks/benchmarkArray1DR2TensorMultiplication.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -218,12 +218,12 @@ void pointerRAJA( benchmark::State & state )
218218

219219
INDEX_TYPE const SERIAL_SIZE = (2 << 18) - 87;
220220

221-
#if defined(USE_OPENMP)
221+
#if defined(LVARRAY_USE_OPENMP)
222222
INDEX_TYPE const OMP_SIZE = (2 << 22) - 87;
223223
#endif
224224

225225
// The non Array benchmarks could be run without chai, but then what's the point.
226-
#if defined(USE_CUDA) && defined(USE_CHAI)
226+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
227227
constexpr INDEX_TYPE CUDA_SIZE = (2 << 24) - 87;
228228
#endif
229229

@@ -271,11 +271,11 @@ void registerBenchmarks()
271271
},
272272
std::make_tuple( SERIAL_SIZE, RAJA::PERM_IJK {}, serialPolicy {} )
273273
, std::make_tuple( SERIAL_SIZE, RAJA::PERM_KJI {}, serialPolicy {} )
274-
#if defined(USE_OPENMP)
274+
#if defined(LVARRAY_USE_OPENMP)
275275
, std::make_tuple( OMP_SIZE, RAJA::PERM_IJK {}, parallelHostPolicy {} )
276276
, std::make_tuple( OMP_SIZE, RAJA::PERM_KJI {}, parallelHostPolicy {} )
277277
#endif
278-
#if defined(USE_CUDA) && defined(USE_CHAI)
278+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
279279
, std::make_tuple( CUDA_SIZE, RAJA::PERM_IJK {}, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
280280
, std::make_tuple( CUDA_SIZE, RAJA::PERM_KJI {}, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
281281
#endif
@@ -295,11 +295,11 @@ int main( int argc, char * * argv )
295295
LVARRAY_LOG( "VALUE_TYPE = " << LvArray::system::demangleType< LvArray::benchmarking::VALUE_TYPE >() );
296296
LVARRAY_LOG( "Serial problems of size ( " << LvArray::benchmarking::SERIAL_SIZE << ", 3, 3 )." );
297297

298-
#if defined(USE_OPENMP)
298+
#if defined(LVARRAY_USE_OPENMP)
299299
LVARRAY_LOG( "OMP problems of size ( " << LvArray::benchmarking::OMP_SIZE << ", 3, 3 )." );
300300
#endif
301301

302-
#if defined(USE_CUDA) && defined(USE_CHAI)
302+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
303303
LVARRAY_LOG( "CUDA problems of size ( " << LvArray::benchmarking::CUDA_SIZE << ", 3, 3 )." );
304304
#endif
305305

benchmarks/benchmarkArray1DR2TensorMultiplicationKernels.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,12 +290,12 @@ template class ArrayOfR2TensorsNative< RAJA::PERM_KJI >;
290290
template class ArrayOfR2TensorsRAJA< RAJA::PERM_IJK, serialPolicy >;
291291
template class ArrayOfR2TensorsRAJA< RAJA::PERM_KJI, serialPolicy >;
292292

293-
#if defined(USE_OPENMP)
293+
#if defined(LVARRAY_USE_OPENMP)
294294
template class ArrayOfR2TensorsRAJA< RAJA::PERM_IJK, parallelHostPolicy >;
295295
template class ArrayOfR2TensorsRAJA< RAJA::PERM_KJI, parallelHostPolicy >;
296296
#endif
297297

298-
#if defined(USE_CUDA) && defined(USE_CHAI)
298+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
299299
template class ArrayOfR2TensorsRAJA< RAJA::PERM_IJK, RAJA::cuda_exec< THREADS_PER_BLOCK > >;
300300
template class ArrayOfR2TensorsRAJA< RAJA::PERM_KJI, RAJA::cuda_exec< THREADS_PER_BLOCK > >;
301301
#endif

benchmarks/benchmarkArrayOfArraysNodeToElementMapConstruction.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ void registerBenchmarks()
6767
REGISTER_BENCHMARK_TEMPLATE( WRAP( { nx, ny, nz } ), overAllocation, POLICY );
6868
REGISTER_BENCHMARK_TEMPLATE( WRAP( { nx, ny, nz } ), resizeFromCapacities, POLICY );
6969
}, std::make_tuple( NX, NY, NZ, serialPolicy {} )
70-
#if defined(USE_OPENMP)
70+
#if defined(LVARRAY_USE_OPENMP)
7171
, std::make_tuple( NX, NY, NZ, parallelHostPolicy {} )
7272
#endif
7373
);

benchmarks/benchmarkArrayOfArraysNodeToElementMapConstructionKernels.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ resizeFromCapacities( ArrayView< INDEX_TYPE const, 2, 1, INDEX_TYPE, DEFAULT_BUF
127127
// Explicit instantiation of NodeToElemMapConstruction.
128128
template class NodeToElemMapConstruction< serialPolicy >;
129129

130-
#if defined(USE_OPENMP)
130+
#if defined(LVARRAY_USE_OPENMP)
131131
template class NodeToElemMapConstruction< parallelHostPolicy >;
132132
#endif
133133

benchmarks/benchmarkArrayOfArraysNodeToElementMapConstructionKernels.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class NaiveNodeToElemMapConstruction
4949
{
5050
CALI_CXX_MARK_SCOPE( "~NaiveNodeToElemMapConstruction" );
5151

52-
// #if defined(USE_OPENMP)
52+
// #if defined(LVARRAY_USE_OPENMP)
5353
// using EXEC_POLICY = parallelHostPolicy;
5454
// #else
5555
using EXEC_POLICY = serialPolicy;

benchmarks/benchmarkEigendecomposition.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,13 @@ void eigenvectors( benchmark::State & state )
3838
INDEX_TYPE const SERIAL_SIZE_2x2 = (2 << 22) - 87;
3939
INDEX_TYPE const SERIAL_SIZE_3x3 = (2 << 19) - 87;
4040

41-
#if defined(USE_OPENMP)
41+
#if defined(LVARRAY_USE_OPENMP)
4242
INDEX_TYPE const OMP_SIZE_2x2 = (2 << 24) - 87;
4343
INDEX_TYPE const OMP_SIZE_3x3 = (2 << 23) - 87;
4444
#endif
4545

4646
// The non Array benchmarks could be run without chai, but then what's the point.
47-
#if defined(USE_CUDA) && defined(USE_CHAI)
47+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
4848
constexpr INDEX_TYPE CUDA_SIZE_2x2 = (2 << 24) - 87;
4949
constexpr INDEX_TYPE CUDA_SIZE_3x3 = (2 << 24) - 87;
5050
#endif
@@ -67,13 +67,13 @@ void registerBenchmarks()
6767
, std::make_tuple( SERIAL_SIZE_2x2, std::integral_constant< int, 2 > {}, RAJA::PERM_JI {}, RAJA::PERM_KJI {}, serialPolicy {} )
6868
, std::make_tuple( SERIAL_SIZE_3x3, std::integral_constant< int, 3 > {}, RAJA::PERM_IJ {}, RAJA::PERM_IJK {}, serialPolicy {} )
6969
, std::make_tuple( SERIAL_SIZE_3x3, std::integral_constant< int, 3 > {}, RAJA::PERM_JI {}, RAJA::PERM_KJI {}, serialPolicy {} )
70-
#if defined(USE_OPENMP)
70+
#if defined(LVARRAY_USE_OPENMP)
7171
, std::make_tuple( OMP_SIZE_2x2, std::integral_constant< int, 2 > {}, RAJA::PERM_IJ {}, RAJA::PERM_IJK {}, parallelHostPolicy {} )
7272
, std::make_tuple( OMP_SIZE_2x2, std::integral_constant< int, 2 > {}, RAJA::PERM_JI {}, RAJA::PERM_KJI {}, parallelHostPolicy {} )
7373
, std::make_tuple( OMP_SIZE_3x3, std::integral_constant< int, 3 > {}, RAJA::PERM_IJ {}, RAJA::PERM_IJK {}, parallelHostPolicy {} )
7474
, std::make_tuple( OMP_SIZE_3x3, std::integral_constant< int, 3 > {}, RAJA::PERM_JI {}, RAJA::PERM_KJI {}, parallelHostPolicy {} )
7575
#endif
76-
#if defined(USE_CUDA) && defined(USE_CHAI)
76+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
7777
, std::make_tuple( CUDA_SIZE_2x2, std::integral_constant< int, 2 > {}, RAJA::PERM_IJ {}, RAJA::PERM_IJK {}, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
7878
, std::make_tuple( CUDA_SIZE_2x2, std::integral_constant< int, 2 > {}, RAJA::PERM_JI {}, RAJA::PERM_KJI {}, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
7979
, std::make_tuple( CUDA_SIZE_3x3, std::integral_constant< int, 3 > {}, RAJA::PERM_IJ {}, RAJA::PERM_IJK {}, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
@@ -97,12 +97,12 @@ int main( int argc, char * * argv )
9797
LVARRAY_LOG( "Serial number of 2x2 matrices = " << LvArray::benchmarking::SERIAL_SIZE_2x2 );
9898
LVARRAY_LOG( "Serial number of 3x3 matrices = " << LvArray::benchmarking::SERIAL_SIZE_3x3 );
9999

100-
#if defined(USE_OPENMP)
100+
#if defined(LVARRAY_USE_OPENMP)
101101
LVARRAY_LOG( "OMP number of 2x2 matrices = " << LvArray::benchmarking::OMP_SIZE_2x2 );
102102
LVARRAY_LOG( "OMP number of 3x3 matrices = " << LvArray::benchmarking::OMP_SIZE_3x3 );
103103
#endif
104104

105-
#if defined(USE_CUDA) && defined(USE_CHAI)
105+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
106106
LVARRAY_LOG( "CUDA number of 2x2 matrices = " << LvArray::benchmarking::CUDA_SIZE_2x2 );
107107
LVARRAY_LOG( "CUDA number of 3x3 matrices = " << LvArray::benchmarking::CUDA_SIZE_3x3 );
108108
#endif

benchmarks/benchmarkEigendecompositionKernels.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,14 @@ template class Eigendecomposition< 2, RAJA::PERM_JI, RAJA::PERM_KJI, serialPolic
4141
template class Eigendecomposition< 3, RAJA::PERM_IJ, RAJA::PERM_IJK, serialPolicy >;
4242
template class Eigendecomposition< 3, RAJA::PERM_JI, RAJA::PERM_KJI, serialPolicy >;
4343

44-
#if defined(USE_OPENMP)
44+
#if defined(LVARRAY_USE_OPENMP)
4545
template class Eigendecomposition< 2, RAJA::PERM_IJ, RAJA::PERM_IJK, parallelHostPolicy >;
4646
template class Eigendecomposition< 2, RAJA::PERM_JI, RAJA::PERM_KJI, parallelHostPolicy >;
4747
template class Eigendecomposition< 3, RAJA::PERM_IJ, RAJA::PERM_IJK, parallelHostPolicy >;
4848
template class Eigendecomposition< 3, RAJA::PERM_JI, RAJA::PERM_KJI, parallelHostPolicy >;
4949
#endif
5050

51-
#if defined(USE_CUDA) && defined(USE_CHAI)
51+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
5252
template class Eigendecomposition< 2, RAJA::PERM_IJ, RAJA::PERM_IJK, parallelDevicePolicy< THREADS_PER_BLOCK > >;
5353
template class Eigendecomposition< 2, RAJA::PERM_JI, RAJA::PERM_KJI, parallelDevicePolicy< THREADS_PER_BLOCK > >;
5454
template class Eigendecomposition< 3, RAJA::PERM_IJ, RAJA::PERM_IJK, parallelDevicePolicy< THREADS_PER_BLOCK > >;

benchmarks/benchmarkHelpers.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include <chrono>
1919
#include <unordered_map>
2020

21-
#if defined(USE_CALIPER)
21+
#if defined(LVARRAY_USE_CALIPER)
2222
#include <caliper/cali.h>
2323
#define CALI_CXX_MARK_PRETTY_FUNCTION cali::Function __cali_ann ## __func__( __PRETTY_FUNCTION__ )
2424
#else
@@ -32,7 +32,7 @@ namespace LvArray
3232
using namespace testing;
3333

3434

35-
#if defined(USE_CHAI)
35+
#if defined(LVARRAY_USE_CHAI)
3636
static_assert( std::is_same< DEFAULT_BUFFER< int >, ChaiBuffer< int > >::value,
3737
"The default buffer should be ChaiBuffer when chai is enabled." );
3838
#endif

benchmarks/benchmarkInnerProduct.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -119,12 +119,12 @@ void pointerRAJA( benchmark::State & state )
119119

120120

121121
INDEX_TYPE const SERIAL_SIZE = (2 << 20) + 573;
122-
#if defined(USE_OPENMP)
122+
#if defined(LVARRAY_USE_OPENMP)
123123
INDEX_TYPE const OMP_SIZE = SERIAL_SIZE;
124124
#endif
125125

126126
// The non Array benchmarks could be run without chai, but then what's the point.
127-
#if defined(USE_CUDA) && defined(USE_CHAI)
127+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
128128
INDEX_TYPE const CUDA_SIZE = SERIAL_SIZE;
129129
#endif
130130

@@ -153,10 +153,10 @@ void registerBenchmarks()
153153
REGISTER_BENCHMARK_TEMPLATE( { size }, pointerRAJA, POLICY );
154154
},
155155
std::make_tuple( SERIAL_SIZE, serialPolicy {} )
156-
#if defined(USE_OPENMP)
156+
#if defined(LVARRAY_USE_OPENMP)
157157
, std::make_tuple( OMP_SIZE, parallelHostPolicy {} )
158158
#endif
159-
#if defined(USE_CUDA) && defined(USE_CHAI)
159+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
160160
, std::make_tuple( CUDA_SIZE, parallelDevicePolicy< THREADS_PER_BLOCK > {} )
161161
#endif
162162
);
@@ -178,11 +178,11 @@ int main( int argc, char * * argv )
178178

179179
LVARRAY_LOG( "Serial problems of size ( " << LvArray::benchmarking::SERIAL_SIZE << " )." );
180180

181-
#if defined(USE_OPENMP)
181+
#if defined(LVARRAY_USE_OPENMP)
182182
LVARRAY_LOG( "OMP problems of size ( " << LvArray::benchmarking::OMP_SIZE << " )." );
183183
#endif
184184

185-
#if defined(USE_CUDA) && defined(USE_CHAI)
185+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
186186
LVARRAY_LOG( "CUDA problems of size ( " << LvArray::benchmarking::CUDA_SIZE << " )." );
187187
#endif
188188

benchmarks/benchmarkInnerProductKernels.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,11 +109,11 @@ pointerKernel( INDEX_TYPE const N,
109109

110110
template class InnerProductRAJA< serialPolicy >;
111111

112-
#if defined(USE_OPENMP)
112+
#if defined(LVARRAY_USE_OPENMP)
113113
template class InnerProductRAJA< parallelHostPolicy >;
114114
#endif
115115

116-
#if defined(USE_CUDA) && defined(USE_CHAI)
116+
#if defined(LVARRAY_USE_CUDA) && defined(LVARRAY_USE_CHAI)
117117
template class InnerProductRAJA< RAJA::cuda_exec< THREADS_PER_BLOCK > >;
118118
#endif
119119

0 commit comments

Comments
 (0)