CANN: Support Opt CONV_TRANSPOSE_1D and ELU (#12786)

* [CANN] Support ELU and CONV_TRANSPOSE_1D

* [CANN]Modification review comments

* [CANN]Modification review comments

* [CANN]name adjustment

* [CANN]remove lambda used in template

* [CANN]Use std::func instead of template

* [CANN]Modify the code according to the review comments

---------

Signed-off-by: noemotiovon <noemotiovon@gmail.com>
This commit is contained in:
Chenguang Li 2025-04-09 14:04:14 +08:00 committed by GitHub
parent 0090950f67
commit 6e1c4cebdb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 204 additions and 78 deletions

View file

@ -57,6 +57,8 @@
#include <aclnnop/aclnn_sub.h>
#include <aclnnop/aclnn_mul.h>
#include <aclnnop/aclnn_div.h>
#include <aclnnop/aclnn_convolution.h>
#include <aclnnop/aclnn_elu.h>
#include <float.h>
#include <cmath>
@ -86,6 +88,20 @@ void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, aclT
}
}
void ggml_cann_unary_op(
std::function<void(ggml_backend_cann_context&, aclTensor*, aclTensor*)> unary_op,
ggml_backend_cann_context& ctx, ggml_tensor* dst) {
ggml_tensor* src = dst->src[0];
aclTensor* acl_src = ggml_cann_create_tensor(src);
aclTensor* acl_dst = ggml_cann_create_tensor(dst);
unary_op(ctx, acl_src, acl_dst);
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
}
/**
* @brief Repeats elements of a tensor along each dimension according to the
* specified repeat array.
@ -2585,3 +2601,49 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
ACL_CHECK(aclDestroyTensor(acl_src));
ACL_CHECK(aclDestroyTensor(acl_dst));
}
void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
ggml_tensor * src1 = dst->src[1];
// stride
int64_t s0 = ((const int32_t*)(dst->op_params))[0];
aclTensor* acl_input = ggml_cann_create_tensor(src1, src1->ne, src1->nb, 3, ACL_FORMAT_NCL);
aclTensor* acl_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3, ACL_FORMAT_NCL);
aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3, ACL_FORMAT_NCL);
int64_t strideVal[1];
strideVal[0] = s0;
aclIntArray *stride = aclCreateIntArray(strideVal, 1);
int64_t paddingVal[] = {0};
aclIntArray *padding = aclCreateIntArray(paddingVal, 1);
int64_t dilationVal[] = {1};
aclIntArray *dilation = aclCreateIntArray(dilationVal, 1);
bool transposed = true;
int64_t groups = 1;
int8_t cubeMathType = 0;
GGML_CANN_CALL_ACLNN_OP(Convolution, acl_input, acl_weight, nullptr, stride,
padding, dilation, transposed, padding, groups, acl_dst, cubeMathType);
ACL_CHECK(aclDestroyTensor(acl_weight));
ACL_CHECK(aclDestroyTensor(acl_dst));
}
void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){
ggml_tensor * src0 = dst->src[0];
aclTensor* acl_input = ggml_cann_create_tensor(src0);
aclTensor* acl_dst = ggml_cann_create_tensor(dst);
float alphaValue = 1.0f;
aclScalar* alpha = nullptr;
alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT);
GGML_CANN_CALL_ACLNN_OP(Elu, acl_input, alpha, alpha, alpha,
acl_dst);
ACL_CHECK(aclDestroyTensor(acl_input));
ACL_CHECK(aclDestroyTensor(acl_dst));
}