|
| 1 | +#include "cpu_kernel.hh" |
| 2 | +#include <execution> |
| 3 | + |
| 4 | +namespace refactor::kernel { |
| 5 | + using K = PadCpu; |
| 6 | + |
| 7 | + K::PadCpu(PadInfo info_, PadType mode_, size_t value_) noexcept |
| 8 | + : Kernel(), info(std::move(info_)), mode(mode_), valueLength(value_) {} |
| 9 | + |
| 10 | + auto K::build(PadInfo info, PadType mode, std::optional<std::reference_wrapper<Tensor const>> value_) noexcept -> KernelBox { |
| 11 | + if (mode != PadType::Constant) { |
| 12 | + return nullptr; |
| 13 | + } |
| 14 | + size_t value = value_ ? value_->get().dataType.size() : 0; |
| 15 | + return std::make_unique<K>(std::move(info), mode, value); |
| 16 | + } |
| 17 | + auto K::typeId() noexcept -> size_t { |
| 18 | + static uint8_t ID = 1; |
| 19 | + return reinterpret_cast<size_t>(&ID); |
| 20 | + } |
| 21 | + |
| 22 | + auto K::kernelTypeId() const noexcept -> size_t { |
| 23 | + return typeId(); |
| 24 | + } |
| 25 | + auto K::description() const noexcept -> std::string_view { |
| 26 | + return "Performing pad operation on generic cpu"; |
| 27 | + } |
| 28 | + |
| 29 | + |
| 30 | + auto K::lower(Resources &) const noexcept -> RoutineWorkspace { |
| 31 | + using namespace runtime; |
| 32 | + |
| 33 | + return [info = this->info, value = this->valueLength](Resources &, void *workspace, void const *const *inputs, void *const *outputs) { |
| 34 | + auto src = reinterpret_cast<uint8_t const *>(inputs[0]); |
| 35 | + auto dst = reinterpret_cast<uint8_t *>(outputs[0]); |
| 36 | + std::vector<uint8_t> defaultValue(info.blockSize, 0); |
| 37 | + if (value != 0) { |
| 38 | + auto constValue = reinterpret_cast<uint8_t const *>(inputs[2]); |
| 39 | + for (auto i : range0_(info.blockSize / value)) { |
| 40 | + std::memcpy(defaultValue.data() + i * value, constValue, value); |
| 41 | + } |
| 42 | + } |
| 43 | + std::for_each_n(std::execution::par_unseq, |
| 44 | + natural_t(0), info.blockCount, |
| 45 | + [=, &info](auto i) { |
| 46 | + long rem = i, j = 0; |
| 47 | + bool flag = false; |
| 48 | + for (auto const &dim : info.dims) { |
| 49 | + auto pos = rem / dim.strideO - dim.padS; |
| 50 | + if (pos < 0 || pos >= dim.dimI) { |
| 51 | + flag = true; |
| 52 | + break; |
| 53 | + } |
| 54 | + j += pos * dim.strideI; |
| 55 | + rem %= dim.strideO; |
| 56 | + } |
| 57 | + if (flag) { |
| 58 | + std::memcpy(dst + i * info.blockSize, defaultValue.data(), info.blockSize); |
| 59 | + } else { |
| 60 | + std::memcpy(dst + i * info.blockSize, src + j * info.blockSize, info.blockSize); |
| 61 | + } |
| 62 | + }); |
| 63 | + }; |
| 64 | + } |
| 65 | + |
| 66 | +}// namespace refactor::kernel |
0 commit comments