Skip to content

Commit faf0bea

Browse files
committed
Implement parallel cuda::std::replace
This implements the `replace` algorithm for the cuda backend. * `std::replace` see https://en.cppreference.com/w/cpp/algorithm/replace.html * `std::replace_if` see https://en.cppreference.com/w/cpp/algorithm/replace.html It provides tests and benchmarks similar to Thrust and some boilerplate for libcu++ The functionality is publicly available yet and implemented in a private internal header Fixes #7375
1 parent 0231796 commit faf0bea

File tree

7 files changed

+489
-0
lines changed

7 files changed

+489
-0
lines changed
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
//===----------------------------------------------------------------------===//
2+
//
3+
// Part of CUDA Experimental in CUDA C++ Core Libraries,
4+
// under the Apache License v2.0 with LLVM Exceptions.
5+
// See https://llvm.org/LICENSE.txt for license information.
6+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7+
// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES.
8+
//
9+
//===----------------------------------------------------------------------===//
10+
11+
#include <thrust/device_vector.h>
12+
13+
#include <cuda/memory_pool>
14+
#include <cuda/std/__pstl_algorithm>
15+
#include <cuda/stream_ref>
16+
17+
#include "nvbench_helper.cuh"
18+
19+
template <typename T>
20+
static void basic(nvbench::state& state, nvbench::type_list<T>)
21+
{
22+
const auto elements = static_cast<std::size_t>(state.get_int64("Elements"));
23+
24+
thrust::device_vector<T> in = generate(elements, bit_entropy::_1_000, T{0}, T{42});
25+
26+
state.add_element_count(elements);
27+
state.add_global_memory_reads<T>(elements);
28+
state.add_global_memory_writes<T>(elements);
29+
30+
caching_allocator_t alloc{};
31+
auto policy = cuda::execution::__cub_par_unseq.with_memory_resource(alloc);
32+
33+
state.exec(nvbench::exec_tag::gpu | nvbench::exec_tag::no_batch | nvbench::exec_tag::sync,
34+
[&](nvbench::launch& launch) {
35+
cuda::std::replace(policy.with_stream(launch.get_stream().get_stream()), in.begin(), in.end(), 42, 1337);
36+
});
37+
}
38+
39+
NVBENCH_BENCH_TYPES(basic, NVBENCH_TYPE_AXES(fundamental_types))
40+
.set_name("base")
41+
.set_type_axes_names({"T{ct}"})
42+
.add_int64_power_of_two_axis("Elements", nvbench::range(16, 28, 4));
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
//===----------------------------------------------------------------------===//
2+
//
3+
// Part of CUDA Experimental in CUDA C++ Core Libraries,
4+
// under the Apache License v2.0 with LLVM Exceptions.
5+
// See https://llvm.org/LICENSE.txt for license information.
6+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7+
// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES.
8+
//
9+
//===----------------------------------------------------------------------===//
10+
11+
#include <thrust/device_vector.h>
12+
13+
#include <cuda/memory_pool>
14+
#include <cuda/std/__pstl_algorithm>
15+
#include <cuda/stream_ref>
16+
17+
#include "nvbench_helper.cuh"
18+
19+
struct equal_to_42
20+
{
21+
template <class T>
22+
__device__ constexpr bool operator()(const T& val) const noexcept
23+
{
24+
return val == static_cast<T>(42);
25+
}
26+
};
27+
28+
template <typename T>
29+
static void basic(nvbench::state& state, nvbench::type_list<T>)
30+
{
31+
const auto elements = static_cast<std::size_t>(state.get_int64("Elements"));
32+
33+
thrust::device_vector<T> in = generate(elements, bit_entropy::_1_000, T{0}, T{42});
34+
35+
state.add_element_count(elements);
36+
state.add_global_memory_reads<T>(elements);
37+
state.add_global_memory_writes<T>(elements);
38+
39+
caching_allocator_t alloc{};
40+
auto policy = cuda::execution::__cub_par_unseq.with_memory_resource(alloc);
41+
42+
state.exec(nvbench::exec_tag::gpu | nvbench::exec_tag::no_batch | nvbench::exec_tag::sync,
43+
[&](nvbench::launch& launch) {
44+
cuda::std::replace_if(
45+
policy.with_stream(launch.get_stream().get_stream()), in.begin(), in.end(), equal_to_42{}, 1337);
46+
});
47+
}
48+
49+
NVBENCH_BENCH_TYPES(basic, NVBENCH_TYPE_AXES(fundamental_types))
50+
.set_name("base")
51+
.set_type_axes_names({"T{ct}"})
52+
.add_int64_power_of_two_axis("Elements", nvbench::range(16, 28, 4));
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
//===----------------------------------------------------------------------===//
2+
//
3+
// Part of libcu++, the C++ Standard Library for your entire system,
4+
// under the Apache License v2.0 with LLVM Exceptions.
5+
// See https://llvm.org/LICENSE.txt for license information.
6+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7+
// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES.
8+
//
9+
//===----------------------------------------------------------------------===//
10+
11+
#ifndef _CUDA_STD___PSTL_REPLACE_H
12+
#define _CUDA_STD___PSTL_REPLACE_H
13+
14+
#include <cuda/std/detail/__config>
15+
16+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
17+
# pragma GCC system_header
18+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
19+
# pragma clang system_header
20+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
21+
# pragma system_header
22+
#endif // no system header
23+
24+
#if !_CCCL_COMPILER(NVRTC)
25+
26+
# include <cuda/std/__algorithm/replace.h>
27+
# include <cuda/std/__concepts/concept_macros.h>
28+
# include <cuda/std/__execution/policy.h>
29+
# include <cuda/std/__functional/invoke.h>
30+
# include <cuda/std/__iterator/concepts.h>
31+
# include <cuda/std/__iterator/iterator_traits.h>
32+
# include <cuda/std/__pstl/dispatch.h>
33+
# include <cuda/std/__type_traits/always_false.h>
34+
# include <cuda/std/__type_traits/is_comparable.h>
35+
# include <cuda/std/__type_traits/is_execution_policy.h>
36+
# include <cuda/std/__type_traits/is_nothrow_copy_constructible.h>
37+
# include <cuda/std/__utility/move.h>
38+
39+
# if _CCCL_HAS_BACKEND_CUDA()
40+
# include <cuda/std/__pstl/cuda/transform.h>
41+
# endif // _CCCL_HAS_BACKEND_CUDA()
42+
43+
# include <cuda/std/__cccl/prologue.h>
44+
45+
_CCCL_BEGIN_NAMESPACE_CUDA_STD
46+
47+
template <class _Tp>
48+
struct __replace_compare_eq
49+
{
50+
_Tp __old_value_;
51+
52+
_CCCL_HOST_API constexpr __replace_compare_eq(const _Tp& __old_value) noexcept(is_nothrow_copy_constructible_v<_Tp>)
53+
: __old_value_(__old_value)
54+
{}
55+
56+
template <class _Up>
57+
[[nodiscard]] _CCCL_DEVICE_API constexpr bool operator()(const _Up& __value) const
58+
noexcept(__is_cpp17_nothrow_equality_comparable_v<_Tp, _Up>)
59+
{
60+
return static_cast<bool>(__old_value_ == __value);
61+
}
62+
};
63+
64+
template <class _Tp>
65+
struct __replace_return_value
66+
{
67+
_Tp __new_value_;
68+
69+
_CCCL_HOST_API constexpr __replace_return_value(const _Tp& __new_value) noexcept(is_nothrow_copy_constructible_v<_Tp>)
70+
: __new_value_(__new_value)
71+
{}
72+
73+
template <class _Up>
74+
[[nodiscard]] _CCCL_DEVICE_API constexpr _Tp operator()(const _Up&) const
75+
noexcept(is_nothrow_copy_constructible_v<_Tp>)
76+
{
77+
return __new_value_;
78+
}
79+
};
80+
81+
_CCCL_BEGIN_NAMESPACE_ARCH_DEPENDENT
82+
83+
_CCCL_TEMPLATE(class _Policy, class _InputIterator, class _Tp = iter_value_t<_InputIterator>)
84+
_CCCL_REQUIRES(__has_forward_traversal<_InputIterator> _CCCL_AND is_execution_policy_v<_Policy>)
85+
_CCCL_HOST_API void replace(
86+
[[maybe_unused]] const _Policy& __policy,
87+
_InputIterator __first,
88+
_InputIterator __last,
89+
const _Tp& __old_value,
90+
const _Tp& __new_value)
91+
{
92+
static_assert(__is_cpp17_equality_comparable_v<_Tp, iter_reference_t<_InputIterator>>,
93+
"cuda::std::replace requires T to be comparable with iter_reference_t<InputIterator>");
94+
95+
if (__first == __last)
96+
{
97+
return;
98+
}
99+
100+
[[maybe_unused]] auto __dispatch =
101+
::cuda::std::execution::__pstl_select_dispatch<::cuda::std::execution::__pstl_algorithm::__transform, _Policy>();
102+
if constexpr (::cuda::std::execution::__pstl_can_dispatch<decltype(__dispatch)>)
103+
{
104+
(void) __dispatch(
105+
__policy,
106+
__first,
107+
::cuda::std::move(__last),
108+
::cuda::std::move(__first),
109+
__replace_return_value{__new_value},
110+
__replace_compare_eq{__old_value});
111+
}
112+
else
113+
{
114+
static_assert(__always_false_v<_Policy>, "Parallel cuda::std::replace requires at least one selected backend");
115+
::cuda::std::replace(::cuda::std::move(__first), ::cuda::std::move(__last), __old_value, __new_value);
116+
}
117+
}
118+
119+
_CCCL_END_NAMESPACE_ARCH_DEPENDENT
120+
121+
_CCCL_END_NAMESPACE_CUDA_STD
122+
123+
# include <cuda/std/__cccl/epilogue.h>
124+
125+
#endif // !_CCCL_COMPILER(NVRTC)
126+
127+
#endif // _CUDA_STD___PSTL_REPLACE_H
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
//===----------------------------------------------------------------------===//
2+
//
3+
// Part of libcu++, the C++ Standard Library for your entire system,
4+
// under the Apache License v2.0 with LLVM Exceptions.
5+
// See https://llvm.org/LICENSE.txt for license information.
6+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7+
// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES.
8+
//
9+
//===----------------------------------------------------------------------===//
10+
11+
#ifndef _CUDA_STD___PSTL_REPLACE_IF_H
12+
#define _CUDA_STD___PSTL_REPLACE_IF_H
13+
14+
#include <cuda/std/detail/__config>
15+
16+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
17+
# pragma GCC system_header
18+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
19+
# pragma clang system_header
20+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
21+
# pragma system_header
22+
#endif // no system header
23+
24+
#if !_CCCL_COMPILER(NVRTC)
25+
26+
# include <cuda/std/__algorithm/replace_if.h>
27+
# include <cuda/std/__concepts/concept_macros.h>
28+
# include <cuda/std/__execution/policy.h>
29+
# include <cuda/std/__functional/invoke.h>
30+
# include <cuda/std/__iterator/concepts.h>
31+
# include <cuda/std/__iterator/iterator_traits.h>
32+
# include <cuda/std/__pstl/dispatch.h>
33+
# include <cuda/std/__pstl/replace.h>
34+
# include <cuda/std/__type_traits/always_false.h>
35+
# include <cuda/std/__type_traits/is_execution_policy.h>
36+
# include <cuda/std/__utility/move.h>
37+
38+
# if _CCCL_HAS_BACKEND_CUDA()
39+
# include <cuda/std/__pstl/cuda/transform.h>
40+
# endif // _CCCL_HAS_BACKEND_CUDA()
41+
42+
# include <cuda/std/__cccl/prologue.h>
43+
44+
_CCCL_BEGIN_NAMESPACE_CUDA_STD
45+
46+
_CCCL_BEGIN_NAMESPACE_ARCH_DEPENDENT
47+
48+
_CCCL_TEMPLATE(class _Policy, class _InputIterator, class _UnaryPred, class _Tp = iter_value_t<_InputIterator>)
49+
_CCCL_REQUIRES(__has_forward_traversal<_InputIterator> _CCCL_AND is_execution_policy_v<_Policy>)
50+
_CCCL_HOST_API void replace_if(
51+
[[maybe_unused]] const _Policy& __policy,
52+
_InputIterator __first,
53+
_InputIterator __last,
54+
_UnaryPred __pred,
55+
const _Tp& __new_value)
56+
{
57+
static_assert(indirect_unary_predicate<_UnaryPred, _InputIterator>,
58+
"cuda::std::replace_if: UnaryPred must satisfy indirect_unary_predicate<InputIterator>");
59+
60+
if (__first == __last)
61+
{
62+
return;
63+
}
64+
65+
[[maybe_unused]] auto __dispatch =
66+
::cuda::std::execution::__pstl_select_dispatch<::cuda::std::execution::__pstl_algorithm::__transform, _Policy>();
67+
if constexpr (::cuda::std::execution::__pstl_can_dispatch<decltype(__dispatch)>)
68+
{
69+
(void) __dispatch(
70+
__policy,
71+
__first,
72+
::cuda::std::move(__last),
73+
::cuda::std::move(__first),
74+
__replace_return_value{__new_value},
75+
__pred);
76+
}
77+
else
78+
{
79+
static_assert(__always_false_v<_Policy>, "Parallel cuda::std::replace_if requires at least one selected backend");
80+
::cuda::std::replace_if(
81+
::cuda::std::move(__first), ::cuda::std::move(__last), ::cuda::std::move(__pred), __new_value);
82+
}
83+
}
84+
85+
_CCCL_END_NAMESPACE_ARCH_DEPENDENT
86+
87+
_CCCL_END_NAMESPACE_CUDA_STD
88+
89+
# include <cuda/std/__cccl/epilogue.h>
90+
91+
#endif // !_CCCL_COMPILER(NVRTC)
92+
93+
#endif // _CUDA_STD___PSTL_REPLACE_IF_H

libcudacxx/include/cuda/std/__pstl_algorithm

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
#include <cuda/std/__pstl/for_each.h>
2525
#include <cuda/std/__pstl/for_each_n.h>
2626
#include <cuda/std/__pstl/reduce.h>
27+
#include <cuda/std/__pstl/replace.h>
28+
#include <cuda/std/__pstl/replace_if.h>
2729
#include <cuda/std/__pstl/transform.h>
2830

2931
// [algorithm.syn]

0 commit comments

Comments
 (0)