23 src.materialize_to_uninitialized(range, dst.data());
36 src.materialize_to_uninitialized(selection.slice(range), dst.data());
48 src.materialize_compressed_to_uninitialized(indices.slice(range), dst.slice(range).data());
65 dst.
slice(dst_offsets[i]).copy_from(src.
slice(src_offsets[i]));
78 for (const int i : indices.slice(range)) {
79 atomic_add_and_fetch_int32(&counts[i], 1);
88 for (const int i : range) {
101 return std::all_of(span.
begin(), span.
end(), [&](
const bool value) { return value == test; });
113 return BooleanMix::None;
116 if (
info.type == CommonVArrayInfo::Type::Single) {
117 return *
static_cast<const bool *
>(
info.data) ? BooleanMix::AllTrue : BooleanMix::AllFalse;
119 if (
info.type == CommonVArrayInfo::Type::Span) {
121 return threading::parallel_reduce(
126 if (
init == BooleanMix::Mixed) {
130 const bool compare = (
init == BooleanMix::None) ? slice.
first() :
131 (
init == BooleanMix::AllTrue);
133 return compare ? BooleanMix::AllTrue : BooleanMix::AllFalse;
135 return BooleanMix::Mixed;
139 return threading::parallel_reduce(
144 if (
init == BooleanMix::Mixed) {
148 const bool compare = (
init == BooleanMix::None) ? varray[
range.first()] :
149 (
init == BooleanMix::AllTrue);
151 return compare ? BooleanMix::AllTrue : BooleanMix::AllFalse;
153 return BooleanMix::Mixed;
166 if (
info.type == CommonVArrayInfo::Type::Single) {
167 return *
static_cast<const bool *
>(
info.data) ? varray.
size() : 0;
169 if (
info.type == CommonVArrayInfo::Type::Span) {
171 return threading::parallel_reduce(
176 const Span<bool> slice = span.slice(range);
177 return init + std::count(slice.begin(), slice.end(), true);
181 return threading::parallel_reduce(
186 int64_t value = init;
188 for (const int64_t i : range) {
189 value += int64_t(varray[i]);
196 if (
info.type == CommonVArrayInfo::Type::Single) {
197 return *
static_cast<const bool *
>(
info.data) ?
mask.size() : 0;
200 mask.foreach_segment([&](
const IndexMaskSegment segment) {
201 for (
const int64_t i : segment) {
218 return threading::parallel_reduce(
222 [&](
const IndexRange part,
const bool is_range) {
223 const Span<int> local_indices = indices.slice(part);
224 const IndexRange local_range = range.slice(part);
226 std::equal(local_indices.begin(), local_indices.end(), local_range.begin());
228 std::logical_and<>());
int BLI_system_thread_count(void)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
GMutableSpan slice(const int64_t start, int64_t size) const
const CPPType & type() const
GSpan slice(const int64_t start, int64_t size) const
const CPPType & type() const
IndexRange index_range() const
static GVArray ForSpan(GSpan span)
constexpr IndexRange index_range() const
constexpr Span slice(int64_t start, int64_t size) const
constexpr const T & first() const
constexpr const T * end() const
constexpr const T * begin() const
IndexRange index_range() const
CommonVArrayInfo common_info() const
int64_t min_array_size() const
IndexRange index_range() const
void foreach_index(Fn &&fn) const
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
void invert_booleans(MutableSpan< bool > span)
void count_indices(Span< int > indices, MutableSpan< int > counts)
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
bool indices_are_range(Span< int > indices, IndexRange range)
void copy_group_to_group(OffsetIndices< int > src_offsets, OffsetIndices< int > dst_offsets, const IndexMask &selection, GSpan src, GMutableSpan dst)
BooleanMix booleans_mix_calc(const VArray< bool > &varray, IndexRange range_to_check)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
static bool all_equal(const Span< bool > span, const bool test)
int64_t count_booleans(const VArray< bool > &varray)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))