Commit c973b0b9 authored by Ceng23333's avatar Ceng23333 Committed by thatPepe
Browse files

issue/691: 修复InfiniLM依赖问题


Signed-off-by: default avatarCeng23333 <441651826@qq.com>
parent 1c25a902
......@@ -9,6 +9,11 @@ namespace py = pybind11;
namespace infinicore::ops {
inline void bind_rope(py::module &m) {
py::enum_<infinicore::nn::RoPE::Algo>(m, "RoPEAlgo")
.value("GPT_J", infinicore::nn::RoPE::Algo::GPT_J)
.value("GPT_NEOX", infinicore::nn::RoPE::Algo::GPT_NEOX);
m.def("rope",
&op::rope,
py::arg("x"),
......
......@@ -22,21 +22,7 @@ void TensorImpl::copy_from(Tensor src) {
throw std::runtime_error("Cannot copy from tensor with different shape");
}
if (this->device() == src->device()) {
// If both tensors are contiguous, use direct memcpy (much faster and avoids rearrange issues)
if (this->is_contiguous() && src->is_contiguous()) {
// Use nbytes() to get the actual tensor size
size_t copy_size = std::min(this->nbytes(), src->nbytes());
// For CPU-to-CPU copies, use regular memcpy. For device-to-device, use D2D memcpy
if (this->device().getType() == Device::Type::CPU) {
context::memcpyH2H(this->data(), src->data(), copy_size);
} else {
context::memcpyD2D(this->data(), src->data(), copy_size);
}
} else {
op::rearrange_(Tensor(const_cast<TensorImpl *>(this)->shared_from_this()), src);
}
} else {
if (!src->is_contiguous()) {
src = src->contiguous();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment